Merge commit 'cc1470fe08' into release/v0.1
This commit is contained in:
commit
99ba233da7
886 changed files with 153388 additions and 19612 deletions
11
.github/dependabot.yml
vendored
Normal file
11
.github/dependabot.yml
vendored
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
# To get started with Dependabot version updates, you'll need to specify which
|
||||
# package ecosystems to update and where the package manifests are located.
|
||||
# Please see the documentation for all configuration options:
|
||||
# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
|
||||
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
4
.github/workflows/build-and-push.yml
vendored
4
.github/workflows/build-and-push.yml
vendored
|
|
@ -19,7 +19,7 @@ jobs:
|
|||
permissions:
|
||||
packages: write
|
||||
name: "Build GARM images"
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: "Checkout"
|
||||
uses: actions/checkout@v3
|
||||
|
|
@ -49,4 +49,4 @@ jobs:
|
|||
--label "org.opencontainers.image.licenses=Apache 2.0" \
|
||||
--build-arg="GARM_REF=${{ github.event.inputs.ref }}" \
|
||||
-t ${{ github.event.inputs.push_to_project }}/garm:"${VERSION}" \
|
||||
--push .
|
||||
--push .
|
||||
|
|
|
|||
6
.github/workflows/go-tests.yml
vendored
6
.github/workflows/go-tests.yml
vendored
|
|
@ -17,7 +17,7 @@ permissions: {}
|
|||
jobs:
|
||||
linters:
|
||||
name: Linters
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
|
||||
steps:
|
||||
|
|
@ -26,9 +26,9 @@ jobs:
|
|||
sudo apt-get update
|
||||
sudo apt-get install -y libbtrfs-dev build-essential apg jq
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
go-version: '^1.22.3'
|
||||
- uses: actions/checkout@v3
|
||||
- name: make lint
|
||||
run: make golangci-lint && GOLANGCI_LINT_EXTRA_ARGS="--timeout=8m --build-tags=testing,integration" make lint
|
||||
|
|
|
|||
56
.github/workflows/integration-tests.yml
vendored
56
.github/workflows/integration-tests.yml
vendored
|
|
@ -6,7 +6,7 @@ on:
|
|||
|
||||
jobs:
|
||||
integration-tests:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-noble-garm
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
|
@ -17,20 +17,50 @@ jobs:
|
|||
go-version-file: go.mod
|
||||
|
||||
- name: Setup LXD
|
||||
uses: canonical/setup-lxd@v0.1.1
|
||||
uses: canonical/setup-lxd@main
|
||||
with:
|
||||
channel: latest/stable
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list
|
||||
sudo apt-get -qq update && sudo apt-get -qq install -y apg coreutils make
|
||||
sudo apt-get -qq update && sudo apt-get -qq install -y apg coreutils make jq build-essential libsqlite3-dev libsqlite3-0
|
||||
|
||||
- name: Set up ngrok
|
||||
id: ngrok
|
||||
uses: gabriel-samfira/ngrok-tunnel-action@v1.1
|
||||
with:
|
||||
ngrok_authtoken: ${{ secrets.NGROK_AUTH_TOKEN }}
|
||||
port: 9997
|
||||
tunnel_type: http
|
||||
- name: Set up tunnel
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p /home/runner/.ssh
|
||||
echo "${{ secrets.SSH_PRIVATE_KEY }}" > /home/runner/.ssh/ssh_key
|
||||
sudo chown -R runner:runner /home/runner/.ssh
|
||||
sudo chmod 500 /home/runner/.ssh
|
||||
sudo chmod 400 /home/runner/.ssh/ssh_key
|
||||
|
||||
SUBDOMAIN=$(apg -a 0 -M l -m 12 -n 1)
|
||||
echo "::add-mask::$SUBDOMAIN"
|
||||
|
||||
BASE_URL="${{ secrets.TUNNEL_BASE_URL }}"
|
||||
GARM_BASE_URL="https://$SUBDOMAIN.$BASE_URL"
|
||||
echo "::add-mask::$GARM_BASE_URL"
|
||||
|
||||
echo "GARM_BASE_URL=$GARM_BASE_URL" >> $GITHUB_ENV
|
||||
|
||||
cat <<EOF | sudo tee /etc/systemd/system/garm-tunnel.service
|
||||
[Unit]
|
||||
Description=GARM tunnel
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/usr/bin/ssh -q -i /home/runner/.ssh/ssh_key -N -n -o ServerAliveInterval=60 -o ExitOnForwardFailure=yes -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -R $SUBDOMAIN:80:127.0.0.1:9997 $BASE_URL
|
||||
Restart=always
|
||||
User=runner
|
||||
[Install]
|
||||
WantedBy=default.target
|
||||
EOF
|
||||
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable garm-tunnel
|
||||
sudo systemctl start garm-tunnel
|
||||
|
||||
- name: Generate secrets
|
||||
run: |
|
||||
|
|
@ -57,12 +87,12 @@ jobs:
|
|||
set -o errexit
|
||||
make integration 2>&1
|
||||
env:
|
||||
GARM_BASE_URL: ${{ steps.ngrok.outputs.tunnel-url }}
|
||||
ORG_NAME: gsamfira
|
||||
REPO_NAME: garm-testing
|
||||
CREDENTIALS_NAME: test-garm-creds
|
||||
WORKFLOW_FILE_NAME: test.yml
|
||||
GH_TOKEN: ${{ secrets.GH_OAUTH_TOKEN }}
|
||||
LXD_REMOTE_SERVER: ${{ secrets.LXD_REMOTE_SERVER }}
|
||||
|
||||
- name: Show GARM logs
|
||||
if: always()
|
||||
|
|
@ -73,10 +103,11 @@ jobs:
|
|||
|
||||
- name: Upload GARM and e2e logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: garm-logs
|
||||
path: /artifacts-logs
|
||||
merge-multiple: true
|
||||
|
||||
- name: Cleanup orphan GARM resources via GitHub API
|
||||
if: always()
|
||||
|
|
@ -87,7 +118,6 @@ jobs:
|
|||
sudo systemctl stop garm@runner || true
|
||||
go run ./test/integration/gh_cleanup/main.go || true
|
||||
env:
|
||||
GARM_BASE_URL: ${{ steps.ngrok.outputs.tunnel-url }}
|
||||
ORG_NAME: gsamfira
|
||||
REPO_NAME: garm-testing
|
||||
GH_TOKEN: ${{ secrets.GH_OAUTH_TOKEN }}
|
||||
|
|
|
|||
|
|
@ -38,3 +38,7 @@ linters-settings:
|
|||
|
||||
goimports:
|
||||
local-prefixes: github.com/cloudbase/garm
|
||||
|
||||
gosec:
|
||||
excludes:
|
||||
- G115
|
||||
|
|
|
|||
|
|
@ -45,5 +45,6 @@ COPY --from=builder /opt/garm/providers.d/garm-provider-gcp /opt/garm/providers.
|
|||
COPY --from=builder /opt/garm/providers.d/garm-provider-equinix /opt/garm/providers.d/garm-provider-equinix
|
||||
|
||||
COPY --from=builder /opt/garm/providers.d/garm-provider-k8s /opt/garm/providers.d/garm-provider-k8s
|
||||
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
|
||||
ENTRYPOINT ["/bin/garm", "-config", "/etc/garm/config.toml"]
|
||||
|
|
|
|||
2
Makefile
2
Makefile
|
|
@ -106,7 +106,7 @@ $(LOCALBIN):
|
|||
GOLANGCI_LINT ?= $(LOCALBIN)/golangci-lint
|
||||
|
||||
## Tool Versions
|
||||
GOLANGCI_LINT_VERSION ?= v1.56.2
|
||||
GOLANGCI_LINT_VERSION ?= v1.61.0
|
||||
|
||||
.PHONY: golangci-lint
|
||||
golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary. If wrong version is installed, it will be overwritten.
|
||||
|
|
|
|||
|
|
@ -159,8 +159,10 @@ func (a *APIController) WebhookHandler(w http.ResponseWriter, r *http.Request) {
|
|||
switch event {
|
||||
case runnerParams.WorkflowJobEvent:
|
||||
a.handleWorkflowJobEvent(ctx, w, r)
|
||||
case runnerParams.PingEvent:
|
||||
// Ignore ping event. We may want to save the ping in the github entity table in the future.
|
||||
default:
|
||||
slog.InfoContext(ctx, "ignoring unknown event", "gh_event", util.SanitizeLogEntry(string(event)))
|
||||
slog.DebugContext(ctx, "ignoring unknown event", "gh_event", util.SanitizeLogEntry(string(event)))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -39,6 +39,6 @@ var (
|
|||
// URLsRequired is returned if the controller does not have the required URLs
|
||||
URLsRequired = APIErrorResponse{
|
||||
Error: "urls_required",
|
||||
Details: "Missing required URLs. Make sure you update the metadata, callback and webhook URLs",
|
||||
Details: "Missing required URLs. Make sure you update the metadata and callback URLs",
|
||||
}
|
||||
)
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ func (u *urlsRequired) Middleware(next http.Handler) http.Handler {
|
|||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
ctrlInfo, err := u.store.ControllerInfo()
|
||||
if err != nil || ctrlInfo.WebhookURL == "" || ctrlInfo.MetadataURL == "" || ctrlInfo.CallbackURL == "" {
|
||||
if err != nil || ctrlInfo.MetadataURL == "" || ctrlInfo.CallbackURL == "" {
|
||||
w.Header().Add("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusConflict)
|
||||
if err := json.NewEncoder(w).Encode(params.URLsRequired); err != nil {
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ import (
|
|||
|
||||
apiClientController "github.com/cloudbase/garm/client/controller"
|
||||
apiClientControllerInfo "github.com/cloudbase/garm/client/controller_info"
|
||||
"github.com/cloudbase/garm/cmd/garm-cli/common"
|
||||
"github.com/cloudbase/garm/params"
|
||||
)
|
||||
|
||||
|
|
@ -164,6 +165,10 @@ func renderControllerInfoTable(info params.ControllerInfo) string {
|
|||
}
|
||||
|
||||
func formatInfo(info params.ControllerInfo) error {
|
||||
if outputFormat == common.OutputFormatJSON {
|
||||
printAsJSON(info)
|
||||
return nil
|
||||
}
|
||||
fmt.Println(renderControllerInfoTable(info))
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
|
||||
apiClientEnterprises "github.com/cloudbase/garm/client/enterprises"
|
||||
"github.com/cloudbase/garm/cmd/garm-cli/common"
|
||||
"github.com/cloudbase/garm/params"
|
||||
)
|
||||
|
||||
|
|
@ -182,6 +183,8 @@ func init() {
|
|||
enterpriseAddCmd.Flags().StringVar(&enterpriseCreds, "credentials", "", "Credentials name. See credentials list.")
|
||||
enterpriseAddCmd.Flags().StringVar(&poolBalancerType, "pool-balancer-type", string(params.PoolBalancerTypeRoundRobin), "The balancing strategy to use when creating runners in pools matching requested labels.")
|
||||
|
||||
enterpriseListCmd.Flags().BoolVarP(&long, "long", "l", false, "Include additional info.")
|
||||
|
||||
enterpriseAddCmd.MarkFlagRequired("credentials") //nolint
|
||||
enterpriseAddCmd.MarkFlagRequired("name") //nolint
|
||||
enterpriseUpdateCmd.Flags().StringVar(&enterpriseWebhookSecret, "webhook-secret", "", "The webhook secret for this enterprise")
|
||||
|
|
@ -200,22 +203,39 @@ func init() {
|
|||
}
|
||||
|
||||
func formatEnterprises(enterprises []params.Enterprise) {
|
||||
if outputFormat == common.OutputFormatJSON {
|
||||
printAsJSON(enterprises)
|
||||
return
|
||||
}
|
||||
t := table.NewWriter()
|
||||
header := table.Row{"ID", "Name", "Endpoint", "Credentials name", "Pool Balancer Type", "Pool mgr running"}
|
||||
if long {
|
||||
header = append(header, "Created At", "Updated At")
|
||||
}
|
||||
t.AppendHeader(header)
|
||||
for _, val := range enterprises {
|
||||
t.AppendRow(table.Row{val.ID, val.Name, val.Endpoint.Name, val.Credentials.Name, val.GetBalancerType(), val.PoolManagerStatus.IsRunning})
|
||||
row := table.Row{val.ID, val.Name, val.Endpoint.Name, val.Credentials.Name, val.GetBalancerType(), val.PoolManagerStatus.IsRunning}
|
||||
if long {
|
||||
row = append(row, val.CreatedAt, val.UpdatedAt)
|
||||
}
|
||||
t.AppendRow(row)
|
||||
t.AppendSeparator()
|
||||
}
|
||||
fmt.Println(t.Render())
|
||||
}
|
||||
|
||||
func formatOneEnterprise(enterprise params.Enterprise) {
|
||||
if outputFormat == common.OutputFormatJSON {
|
||||
printAsJSON(enterprise)
|
||||
return
|
||||
}
|
||||
t := table.NewWriter()
|
||||
rowConfigAutoMerge := table.RowConfig{AutoMerge: true}
|
||||
header := table.Row{"Field", "Value"}
|
||||
t.AppendHeader(header)
|
||||
t.AppendRow(table.Row{"ID", enterprise.ID})
|
||||
t.AppendRow(table.Row{"Created At", enterprise.CreatedAt})
|
||||
t.AppendRow(table.Row{"Updated At", enterprise.UpdatedAt})
|
||||
t.AppendRow(table.Row{"Name", enterprise.Name})
|
||||
t.AppendRow(table.Row{"Endpoint", enterprise.Endpoint.Name})
|
||||
t.AppendRow(table.Row{"Pool balancer type", enterprise.GetBalancerType()})
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
|
||||
apiClientCreds "github.com/cloudbase/garm/client/credentials"
|
||||
"github.com/cloudbase/garm/cmd/garm-cli/common"
|
||||
"github.com/cloudbase/garm/params"
|
||||
)
|
||||
|
||||
|
|
@ -222,6 +223,8 @@ func init() {
|
|||
githubCredentialsUpdateCmd.Flags().Int64Var(&credentialsAppID, "app-id", 0, "If the credential is an app, the app ID")
|
||||
githubCredentialsUpdateCmd.Flags().StringVar(&credentialsPrivateKeyPath, "private-key-path", "", "If the credential is an app, the path to the private key file")
|
||||
|
||||
githubCredentialsListCmd.Flags().BoolVarP(&long, "long", "l", false, "Include additional info.")
|
||||
|
||||
githubCredentialsUpdateCmd.MarkFlagsMutuallyExclusive("pat-oauth-token", "app-installation-id")
|
||||
githubCredentialsUpdateCmd.MarkFlagsMutuallyExclusive("pat-oauth-token", "app-id")
|
||||
githubCredentialsUpdateCmd.MarkFlagsMutuallyExclusive("pat-oauth-token", "private-key-path")
|
||||
|
|
@ -342,22 +345,39 @@ func parseCredentialsUpdateParams() (params.UpdateGithubCredentialsParams, error
|
|||
}
|
||||
|
||||
func formatGithubCredentials(creds []params.GithubCredentials) {
|
||||
if outputFormat == common.OutputFormatJSON {
|
||||
printAsJSON(creds)
|
||||
return
|
||||
}
|
||||
t := table.NewWriter()
|
||||
header := table.Row{"ID", "Name", "Description", "Base URL", "API URL", "Upload URL", "Type"}
|
||||
if long {
|
||||
header = append(header, "Created At", "Updated At")
|
||||
}
|
||||
t.AppendHeader(header)
|
||||
for _, val := range creds {
|
||||
t.AppendRow(table.Row{val.ID, val.Name, val.Description, val.BaseURL, val.APIBaseURL, val.UploadBaseURL, val.AuthType})
|
||||
row := table.Row{val.ID, val.Name, val.Description, val.BaseURL, val.APIBaseURL, val.UploadBaseURL, val.AuthType}
|
||||
if long {
|
||||
row = append(row, val.CreatedAt, val.UpdatedAt)
|
||||
}
|
||||
t.AppendRow(row)
|
||||
t.AppendSeparator()
|
||||
}
|
||||
fmt.Println(t.Render())
|
||||
}
|
||||
|
||||
func formatOneGithubCredential(cred params.GithubCredentials) {
|
||||
if outputFormat == common.OutputFormatJSON {
|
||||
printAsJSON(cred)
|
||||
return
|
||||
}
|
||||
t := table.NewWriter()
|
||||
header := table.Row{"Field", "Value"}
|
||||
t.AppendHeader(header)
|
||||
|
||||
t.AppendRow(table.Row{"ID", cred.ID})
|
||||
t.AppendRow(table.Row{"Created At", cred.CreatedAt})
|
||||
t.AppendRow(table.Row{"Updated At", cred.UpdatedAt})
|
||||
t.AppendRow(table.Row{"Name", cred.Name})
|
||||
t.AppendRow(table.Row{"Description", cred.Description})
|
||||
t.AppendRow(table.Row{"Base URL", cred.BaseURL})
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
|
||||
apiClientEndpoints "github.com/cloudbase/garm/client/endpoints"
|
||||
"github.com/cloudbase/garm/cmd/garm-cli/common"
|
||||
"github.com/cloudbase/garm/params"
|
||||
)
|
||||
|
||||
|
|
@ -188,6 +189,8 @@ func init() {
|
|||
githubEndpointCreateCmd.Flags().StringVar(&endpointAPIBaseURL, "api-base-url", "", "API Base URL of the GitHub endpoint")
|
||||
githubEndpointCreateCmd.Flags().StringVar(&endpointCACertPath, "ca-cert-path", "", "CA Cert Path of the GitHub endpoint")
|
||||
|
||||
githubEndpointListCmd.Flags().BoolVarP(&long, "long", "l", false, "Include additional info.")
|
||||
|
||||
githubEndpointCreateCmd.MarkFlagRequired("name")
|
||||
githubEndpointCreateCmd.MarkFlagRequired("base-url")
|
||||
githubEndpointCreateCmd.MarkFlagRequired("api-base-url")
|
||||
|
|
@ -250,21 +253,39 @@ func parseCreateParams() (params.CreateGithubEndpointParams, error) {
|
|||
}
|
||||
|
||||
func formatEndpoints(endpoints params.GithubEndpoints) {
|
||||
if outputFormat == common.OutputFormatJSON {
|
||||
printAsJSON(endpoints)
|
||||
return
|
||||
}
|
||||
t := table.NewWriter()
|
||||
header := table.Row{"Name", "Base URL", "Description"}
|
||||
if long {
|
||||
header = append(header, "Created At", "Updated At")
|
||||
}
|
||||
t.AppendHeader(header)
|
||||
for _, val := range endpoints {
|
||||
t.AppendRow([]interface{}{val.Name, val.BaseURL, val.Description})
|
||||
row := table.Row{val.Name, val.BaseURL, val.Description}
|
||||
if long {
|
||||
row = append(row, val.CreatedAt, val.UpdatedAt)
|
||||
}
|
||||
t.AppendRow(row)
|
||||
t.AppendSeparator()
|
||||
}
|
||||
fmt.Println(t.Render())
|
||||
}
|
||||
|
||||
func formatOneEndpoint(endpoint params.GithubEndpoint) {
|
||||
if outputFormat == common.OutputFormatJSON {
|
||||
printAsJSON(endpoint)
|
||||
return
|
||||
}
|
||||
t := table.NewWriter()
|
||||
header := table.Row{"Field", "Value"}
|
||||
t.AppendHeader(header)
|
||||
t.AppendRow([]interface{}{"Name", endpoint.Name})
|
||||
t.AppendRow([]interface{}{"Description", endpoint.Description})
|
||||
t.AppendRow([]interface{}{"Created At", endpoint.CreatedAt})
|
||||
t.AppendRow([]interface{}{"Updated At", endpoint.UpdatedAt})
|
||||
t.AppendRow([]interface{}{"Base URL", endpoint.BaseURL})
|
||||
t.AppendRow([]interface{}{"Upload URL", endpoint.UploadBaseURL})
|
||||
t.AppendRow([]interface{}{"API Base URL", endpoint.APIBaseURL})
|
||||
|
|
|
|||
|
|
@ -115,7 +115,7 @@ garm-cli init --name=dev --url=https://runner.example.com --username=admin --pas
|
|||
}
|
||||
|
||||
controllerInfoResponse, err := apiCli.Controller.UpdateController(updateUrlsReq, authToken)
|
||||
renderResponseMessage(response.Payload, controllerInfoResponse.Payload, err)
|
||||
renderResponseMessage(response.Payload, controllerInfoResponse, err)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
|
@ -204,10 +204,7 @@ func renderUserTable(user params.User) string {
|
|||
return t.Render()
|
||||
}
|
||||
|
||||
func renderResponseMessage(user params.User, controllerInfo params.ControllerInfo, err error) {
|
||||
userTable := renderUserTable(user)
|
||||
controllerInfoTable := renderControllerInfoTable(controllerInfo)
|
||||
|
||||
func renderResponseMessage(user params.User, controllerInfo *apiClientController.UpdateControllerOK, controllerURLUpdateErr error) {
|
||||
headerMsg := `Congrats! Your controller is now initialized.
|
||||
|
||||
Following are the details of the admin user and details about the controller.
|
||||
|
|
@ -244,11 +241,13 @@ you must set them up by running:
|
|||
See the help message for garm-cli controller update for more information.
|
||||
`
|
||||
var ctrlMsg string
|
||||
if err != nil {
|
||||
ctrlMsg = fmt.Sprintf(controllerErrorMsg, err)
|
||||
if controllerURLUpdateErr != nil || controllerInfo == nil {
|
||||
ctrlMsg = fmt.Sprintf(controllerErrorMsg, controllerURLUpdateErr)
|
||||
} else {
|
||||
controllerInfoTable := renderControllerInfoTable(controllerInfo.Payload)
|
||||
ctrlMsg = fmt.Sprintf(controllerMsg, controllerInfoTable)
|
||||
}
|
||||
|
||||
userTable := renderUserTable(user)
|
||||
fmt.Printf("%s\n%s\n", fmt.Sprintf(headerMsg, userTable), ctrlMsg)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
|
||||
apiClientJobs "github.com/cloudbase/garm/client/jobs"
|
||||
"github.com/cloudbase/garm/cmd/garm-cli/common"
|
||||
"github.com/cloudbase/garm/params"
|
||||
)
|
||||
|
||||
|
|
@ -57,6 +58,10 @@ var jobsListCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
func formatJobs(jobs []params.Job) {
|
||||
if outputFormat == common.OutputFormatJSON {
|
||||
printAsJSON(jobs)
|
||||
return
|
||||
}
|
||||
t := table.NewWriter()
|
||||
header := table.Row{"ID", "Name", "Status", "Conclusion", "Runner Name", "Repository", "Requested Labels", "Locked by"}
|
||||
t.AppendHeader(header)
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ import (
|
|||
|
||||
"github.com/cloudbase/garm-provider-common/util"
|
||||
apiClientOrgs "github.com/cloudbase/garm/client/organizations"
|
||||
"github.com/cloudbase/garm/cmd/garm-cli/common"
|
||||
"github.com/cloudbase/garm/params"
|
||||
)
|
||||
|
||||
|
|
@ -310,6 +311,7 @@ func init() {
|
|||
orgAddCmd.Flags().BoolVar(&installOrgWebhook, "install-webhook", false, "Install the webhook as part of the add operation.")
|
||||
orgAddCmd.MarkFlagsMutuallyExclusive("webhook-secret", "random-webhook-secret")
|
||||
orgAddCmd.MarkFlagsOneRequired("webhook-secret", "random-webhook-secret")
|
||||
orgListCmd.Flags().BoolVarP(&long, "long", "l", false, "Include additional info.")
|
||||
|
||||
orgAddCmd.MarkFlagRequired("credentials") //nolint
|
||||
orgAddCmd.MarkFlagRequired("name") //nolint
|
||||
|
|
@ -340,26 +342,45 @@ func init() {
|
|||
}
|
||||
|
||||
func formatOrganizations(orgs []params.Organization) {
|
||||
if outputFormat == common.OutputFormatJSON {
|
||||
printAsJSON(orgs)
|
||||
return
|
||||
}
|
||||
t := table.NewWriter()
|
||||
header := table.Row{"ID", "Name", "Endpoint", "Credentials name", "Pool Balancer Type", "Pool mgr running"}
|
||||
if long {
|
||||
header = append(header, "Created At", "Updated At")
|
||||
}
|
||||
t.AppendHeader(header)
|
||||
for _, val := range orgs {
|
||||
t.AppendRow(table.Row{val.ID, val.Name, val.Endpoint.Name, val.CredentialsName, val.GetBalancerType(), val.PoolManagerStatus.IsRunning})
|
||||
row := table.Row{val.ID, val.Name, val.Endpoint.Name, val.CredentialsName, val.GetBalancerType(), val.PoolManagerStatus.IsRunning}
|
||||
if long {
|
||||
row = append(row, val.CreatedAt, val.UpdatedAt)
|
||||
}
|
||||
t.AppendRow(row)
|
||||
t.AppendSeparator()
|
||||
}
|
||||
fmt.Println(t.Render())
|
||||
}
|
||||
|
||||
func formatOneOrganization(org params.Organization) {
|
||||
if outputFormat == common.OutputFormatJSON {
|
||||
printAsJSON(org)
|
||||
return
|
||||
}
|
||||
t := table.NewWriter()
|
||||
rowConfigAutoMerge := table.RowConfig{AutoMerge: true}
|
||||
header := table.Row{"Field", "Value"}
|
||||
t.AppendHeader(header)
|
||||
t.AppendRow(table.Row{"ID", org.ID})
|
||||
t.AppendRow(table.Row{"Created At", org.CreatedAt})
|
||||
t.AppendRow(table.Row{"Updated At", org.UpdatedAt})
|
||||
t.AppendRow(table.Row{"Name", org.Name})
|
||||
t.AppendRow(table.Row{"Endpoint", org.Endpoint.Name})
|
||||
t.AppendRow(table.Row{"Pool balancer type", org.GetBalancerType()})
|
||||
t.AppendRow(table.Row{"Credentials", org.CredentialsName})
|
||||
t.AppendRow(table.Row{"Created at", org.CreatedAt})
|
||||
t.AppendRow(table.Row{"Updated at", org.UpdatedAt})
|
||||
t.AppendRow(table.Row{"Pool manager running", org.PoolManagerStatus.IsRunning})
|
||||
if !org.PoolManagerStatus.IsRunning {
|
||||
t.AppendRow(table.Row{"Failure reason", org.PoolManagerStatus.FailureReason})
|
||||
|
|
|
|||
|
|
@ -29,6 +29,7 @@ import (
|
|||
apiClientOrgs "github.com/cloudbase/garm/client/organizations"
|
||||
apiClientPools "github.com/cloudbase/garm/client/pools"
|
||||
apiClientRepos "github.com/cloudbase/garm/client/repositories"
|
||||
"github.com/cloudbase/garm/cmd/garm-cli/common"
|
||||
"github.com/cloudbase/garm/params"
|
||||
)
|
||||
|
||||
|
|
@ -385,6 +386,7 @@ func init() {
|
|||
poolListCmd.Flags().StringVarP(&poolOrganization, "org", "o", "", "List all pools within this organization.")
|
||||
poolListCmd.Flags().StringVarP(&poolEnterprise, "enterprise", "e", "", "List all pools within this enterprise.")
|
||||
poolListCmd.Flags().BoolVarP(&poolAll, "all", "a", false, "List all pools, regardless of org or repo.")
|
||||
poolListCmd.Flags().BoolVarP(&long, "long", "l", false, "Include additional info.")
|
||||
poolListCmd.MarkFlagsMutuallyExclusive("repo", "org", "all", "enterprise")
|
||||
|
||||
poolUpdateCmd.Flags().StringVar(&poolImage, "image", "", "The provider-specific image name to use for runners in this pool.")
|
||||
|
|
@ -466,8 +468,18 @@ func asRawMessage(data []byte) (json.RawMessage, error) {
|
|||
}
|
||||
|
||||
func formatPools(pools []params.Pool) {
|
||||
if outputFormat == common.OutputFormatJSON {
|
||||
printAsJSON(pools)
|
||||
return
|
||||
}
|
||||
t := table.NewWriter()
|
||||
header := table.Row{"ID", "Image", "Flavor", "Tags", "Belongs to", "Level", "Enabled", "Runner Prefix", "Priority"}
|
||||
t.SetColumnConfigs([]table.ColumnConfig{
|
||||
{Number: 2, WidthMax: 40},
|
||||
})
|
||||
header := table.Row{"ID", "Image", "Flavor", "Tags", "Belongs to", "Enabled"}
|
||||
if long {
|
||||
header = append(header, "Level", "Created At", "Updated at", "Runner Prefix", "Priority")
|
||||
}
|
||||
t.AppendHeader(header)
|
||||
|
||||
for _, pool := range pools {
|
||||
|
|
@ -489,13 +501,21 @@ func formatPools(pools []params.Pool) {
|
|||
belongsTo = pool.EnterpriseName
|
||||
level = "enterprise"
|
||||
}
|
||||
t.AppendRow(table.Row{pool.ID, pool.Image, pool.Flavor, strings.Join(tags, " "), belongsTo, level, pool.Enabled, pool.GetRunnerPrefix(), pool.Priority})
|
||||
row := table.Row{pool.ID, pool.Image, pool.Flavor, strings.Join(tags, " "), belongsTo, pool.Enabled}
|
||||
if long {
|
||||
row = append(row, level, pool.CreatedAt, pool.UpdatedAt, pool.GetRunnerPrefix(), pool.Priority)
|
||||
}
|
||||
t.AppendRow(row)
|
||||
t.AppendSeparator()
|
||||
}
|
||||
fmt.Println(t.Render())
|
||||
}
|
||||
|
||||
func formatOnePool(pool params.Pool) {
|
||||
if outputFormat == common.OutputFormatJSON {
|
||||
printAsJSON(pool)
|
||||
return
|
||||
}
|
||||
t := table.NewWriter()
|
||||
rowConfigAutoMerge := table.RowConfig{AutoMerge: true}
|
||||
|
||||
|
|
@ -523,6 +543,8 @@ func formatOnePool(pool params.Pool) {
|
|||
|
||||
t.AppendHeader(header)
|
||||
t.AppendRow(table.Row{"ID", pool.ID})
|
||||
t.AppendRow(table.Row{"Created At", pool.CreatedAt})
|
||||
t.AppendRow(table.Row{"Updated At", pool.UpdatedAt})
|
||||
t.AppendRow(table.Row{"Provider Name", pool.ProviderName})
|
||||
t.AppendRow(table.Row{"Priority", pool.Priority})
|
||||
t.AppendRow(table.Row{"Image", pool.Image})
|
||||
|
|
|
|||
|
|
@ -239,6 +239,10 @@ func init() {
|
|||
}
|
||||
|
||||
func formatProfiles(profiles []config.Manager) {
|
||||
if outputFormat == common.OutputFormatJSON {
|
||||
printAsJSON(profiles)
|
||||
return
|
||||
}
|
||||
t := table.NewWriter()
|
||||
header := table.Row{"Name", "Base URL"}
|
||||
t.AppendHeader(header)
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
|
||||
apiClientProviders "github.com/cloudbase/garm/client/providers"
|
||||
"github.com/cloudbase/garm/cmd/garm-cli/common"
|
||||
"github.com/cloudbase/garm/params"
|
||||
)
|
||||
|
||||
|
|
@ -64,6 +65,10 @@ func init() {
|
|||
}
|
||||
|
||||
func formatProviders(providers []params.Provider) {
|
||||
if outputFormat == common.OutputFormatJSON {
|
||||
printAsJSON(providers)
|
||||
return
|
||||
}
|
||||
t := table.NewWriter()
|
||||
header := table.Row{"Name", "Description", "Type"}
|
||||
t.AppendHeader(header)
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ import (
|
|||
|
||||
"github.com/cloudbase/garm-provider-common/util"
|
||||
apiClientRepos "github.com/cloudbase/garm/client/repositories"
|
||||
"github.com/cloudbase/garm/cmd/garm-cli/common"
|
||||
"github.com/cloudbase/garm/params"
|
||||
)
|
||||
|
||||
|
|
@ -315,6 +316,8 @@ func init() {
|
|||
repoAddCmd.MarkFlagsMutuallyExclusive("webhook-secret", "random-webhook-secret")
|
||||
repoAddCmd.MarkFlagsOneRequired("webhook-secret", "random-webhook-secret")
|
||||
|
||||
repoListCmd.Flags().BoolVarP(&long, "long", "l", false, "Include additional info.")
|
||||
|
||||
repoAddCmd.MarkFlagRequired("credentials") //nolint
|
||||
repoAddCmd.MarkFlagRequired("owner") //nolint
|
||||
repoAddCmd.MarkFlagRequired("name") //nolint
|
||||
|
|
@ -346,22 +349,39 @@ func init() {
|
|||
}
|
||||
|
||||
func formatRepositories(repos []params.Repository) {
|
||||
if outputFormat == common.OutputFormatJSON {
|
||||
printAsJSON(repos)
|
||||
return
|
||||
}
|
||||
t := table.NewWriter()
|
||||
header := table.Row{"ID", "Owner", "Name", "Endpoint", "Credentials name", "Pool Balancer Type", "Pool mgr running"}
|
||||
if long {
|
||||
header = append(header, "Created At", "Updated At")
|
||||
}
|
||||
t.AppendHeader(header)
|
||||
for _, val := range repos {
|
||||
t.AppendRow(table.Row{val.ID, val.Owner, val.Name, val.Endpoint.Name, val.CredentialsName, val.GetBalancerType(), val.PoolManagerStatus.IsRunning})
|
||||
row := table.Row{val.ID, val.Owner, val.Name, val.Endpoint.Name, val.CredentialsName, val.GetBalancerType(), val.PoolManagerStatus.IsRunning}
|
||||
if long {
|
||||
row = append(row, val.CreatedAt, val.UpdatedAt)
|
||||
}
|
||||
t.AppendRow(row)
|
||||
t.AppendSeparator()
|
||||
}
|
||||
fmt.Println(t.Render())
|
||||
}
|
||||
|
||||
func formatOneRepository(repo params.Repository) {
|
||||
if outputFormat == common.OutputFormatJSON {
|
||||
printAsJSON(repo)
|
||||
return
|
||||
}
|
||||
t := table.NewWriter()
|
||||
rowConfigAutoMerge := table.RowConfig{AutoMerge: true}
|
||||
header := table.Row{"Field", "Value"}
|
||||
t.AppendHeader(header)
|
||||
t.AppendRow(table.Row{"ID", repo.ID})
|
||||
t.AppendRow(table.Row{"Created At", repo.CreatedAt})
|
||||
t.AppendRow(table.Row{"Updated At", repo.UpdatedAt})
|
||||
t.AppendRow(table.Row{"Owner", repo.Owner})
|
||||
t.AppendRow(table.Row{"Name", repo.Name})
|
||||
t.AppendRow(table.Row{"Endpoint", repo.Endpoint.Name})
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
|
|
@ -25,6 +26,7 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
|
||||
apiClient "github.com/cloudbase/garm/client"
|
||||
"github.com/cloudbase/garm/cmd/garm-cli/common"
|
||||
"github.com/cloudbase/garm/cmd/garm-cli/config"
|
||||
"github.com/cloudbase/garm/params"
|
||||
)
|
||||
|
|
@ -37,7 +39,8 @@ var (
|
|||
needsInit bool
|
||||
debug bool
|
||||
poolBalancerType string
|
||||
errNeedsInitError = fmt.Errorf("please log into a garm installation first")
|
||||
outputFormat common.OutputFormat = common.OutputFormatTable
|
||||
errNeedsInitError = fmt.Errorf("please log into a garm installation first")
|
||||
)
|
||||
|
||||
// rootCmd represents the base command when called without any subcommands
|
||||
|
|
@ -51,6 +54,8 @@ var rootCmd = &cobra.Command{
|
|||
// This is called by main.main(). It only needs to happen once to the rootCmd.
|
||||
func Execute() {
|
||||
rootCmd.PersistentFlags().BoolVar(&debug, "debug", false, "Enable debug on all API calls")
|
||||
rootCmd.PersistentFlags().Var(&outputFormat, "format", "Output format (table, json)")
|
||||
|
||||
cobra.OnInitialize(initConfig)
|
||||
|
||||
err := rootCmd.Execute()
|
||||
|
|
@ -113,3 +118,12 @@ func formatOneHookInfo(hook params.HookInfo) {
|
|||
})
|
||||
fmt.Println(t.Render())
|
||||
}
|
||||
|
||||
func printAsJSON(value interface{}) {
|
||||
asJs, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to marshal value to json: %s", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println(string(asJs))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ import (
|
|||
apiClientInstances "github.com/cloudbase/garm/client/instances"
|
||||
apiClientOrgs "github.com/cloudbase/garm/client/organizations"
|
||||
apiClientRepos "github.com/cloudbase/garm/client/repositories"
|
||||
"github.com/cloudbase/garm/cmd/garm-cli/common"
|
||||
"github.com/cloudbase/garm/params"
|
||||
)
|
||||
|
||||
|
|
@ -205,7 +206,7 @@ func init() {
|
|||
runnerListCmd.Flags().StringVarP(&runnerOrganization, "org", "o", "", "List all runners from all pools within this organization.")
|
||||
runnerListCmd.Flags().StringVarP(&runnerEnterprise, "enterprise", "e", "", "List all runners from all pools within this enterprise.")
|
||||
runnerListCmd.Flags().BoolVarP(&runnerAll, "all", "a", false, "List all runners, regardless of org or repo.")
|
||||
runnerListCmd.Flags().BoolVarP(&long, "long", "l", false, "Include information about tasks.")
|
||||
runnerListCmd.Flags().BoolVarP(&long, "long", "l", false, "Include additional info.")
|
||||
runnerListCmd.MarkFlagsMutuallyExclusive("repo", "org", "enterprise", "all")
|
||||
|
||||
runnerDeleteCmd.Flags().BoolVarP(&forceRemove, "force-remove-runner", "f", false, "Forcefully remove a runner. If set to true, GARM will ignore provider errors when removing the runner.")
|
||||
|
|
@ -222,18 +223,25 @@ func init() {
|
|||
}
|
||||
|
||||
func formatInstances(param []params.Instance, detailed bool) {
|
||||
if outputFormat == common.OutputFormatJSON {
|
||||
printAsJSON(param)
|
||||
return
|
||||
}
|
||||
t := table.NewWriter()
|
||||
header := table.Row{"Nr", "Name", "Status", "Runner Status", "Pool ID"}
|
||||
if detailed {
|
||||
header = append(header, "Job Name", "Started At", "Run ID", "Repository")
|
||||
header = append(header, "Created At", "Updated At", "Job Name", "Started At", "Run ID", "Repository")
|
||||
}
|
||||
t.AppendHeader(header)
|
||||
|
||||
for idx, inst := range param {
|
||||
row := table.Row{idx + 1, inst.Name, inst.Status, inst.RunnerStatus, inst.PoolID}
|
||||
if detailed && inst.Job != nil {
|
||||
repo := fmt.Sprintf("%s/%s", inst.Job.RepositoryOwner, inst.Job.RepositoryName)
|
||||
row = append(row, inst.Job.Name, inst.Job.StartedAt, inst.Job.RunID, repo)
|
||||
if detailed {
|
||||
row = append(row, inst.CreatedAt, inst.UpdatedAt)
|
||||
if inst.Job != nil {
|
||||
repo := fmt.Sprintf("%s/%s", inst.Job.RepositoryOwner, inst.Job.RepositoryName)
|
||||
row = append(row, inst.Job.Name, inst.Job.StartedAt, inst.Job.RunID, repo)
|
||||
}
|
||||
}
|
||||
t.AppendRow(row)
|
||||
t.AppendSeparator()
|
||||
|
|
@ -242,12 +250,18 @@ func formatInstances(param []params.Instance, detailed bool) {
|
|||
}
|
||||
|
||||
func formatSingleInstance(instance params.Instance) {
|
||||
if outputFormat == common.OutputFormatJSON {
|
||||
printAsJSON(instance)
|
||||
return
|
||||
}
|
||||
t := table.NewWriter()
|
||||
|
||||
header := table.Row{"Field", "Value"}
|
||||
|
||||
t.AppendHeader(header)
|
||||
t.AppendRow(table.Row{"ID", instance.ID}, table.RowConfig{AutoMerge: false})
|
||||
t.AppendRow(table.Row{"Created At", instance.CreatedAt})
|
||||
t.AppendRow(table.Row{"Updated At", instance.UpdatedAt})
|
||||
t.AppendRow(table.Row{"Provider ID", instance.ProviderID}, table.RowConfig{AutoMerge: false})
|
||||
t.AppendRow(table.Row{"Name", instance.Name}, table.RowConfig{AutoMerge: false})
|
||||
t.AppendRow(table.Row{"OS Type", instance.OSType}, table.RowConfig{AutoMerge: false})
|
||||
|
|
|
|||
31
cmd/garm-cli/common/cobra.go
Normal file
31
cmd/garm-cli/common/cobra.go
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
package common
|
||||
|
||||
import "fmt"
|
||||
|
||||
type OutputFormat string
|
||||
|
||||
const (
|
||||
OutputFormatTable OutputFormat = "table"
|
||||
OutputFormatJSON OutputFormat = "json"
|
||||
)
|
||||
|
||||
func (o *OutputFormat) String() string {
|
||||
if o == nil {
|
||||
return ""
|
||||
}
|
||||
return string(*o)
|
||||
}
|
||||
|
||||
func (o *OutputFormat) Set(value string) error {
|
||||
switch value {
|
||||
case "table", "json":
|
||||
*o = OutputFormat(value)
|
||||
default:
|
||||
return fmt.Errorf("allowed formats are: json, table")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *OutputFormat) Type() string {
|
||||
return "string"
|
||||
}
|
||||
|
|
@ -358,9 +358,6 @@ func (g *Github) Validate() error {
|
|||
if g.Name == "" {
|
||||
return fmt.Errorf("missing credentials name")
|
||||
}
|
||||
if g.Description == "" {
|
||||
return fmt.Errorf("missing credentials description")
|
||||
}
|
||||
|
||||
if g.APIBaseURL != "" {
|
||||
if _, err := url.ParseRequestURI(g.APIBaseURL); err != nil {
|
||||
|
|
@ -551,7 +548,8 @@ func (d *Database) Validate() error {
|
|||
|
||||
// SQLite is the config entry for the sqlite3 section
|
||||
type SQLite struct {
|
||||
DBFile string `toml:"db_file" json:"db-file"`
|
||||
DBFile string `toml:"db_file" json:"db-file"`
|
||||
BusyTimeoutSeconds int `toml:"busy_timeout_seconds" json:"busy-timeout-seconds"`
|
||||
}
|
||||
|
||||
func (s *SQLite) Validate() error {
|
||||
|
|
@ -571,7 +569,12 @@ func (s *SQLite) Validate() error {
|
|||
}
|
||||
|
||||
func (s *SQLite) ConnectionString() (string, error) {
|
||||
return fmt.Sprintf("%s?_journal_mode=WAL&_foreign_keys=ON", s.DBFile), nil
|
||||
connectionString := fmt.Sprintf("%s?_journal_mode=WAL&_foreign_keys=ON", s.DBFile)
|
||||
if s.BusyTimeoutSeconds > 0 {
|
||||
timeout := s.BusyTimeoutSeconds * 1000
|
||||
connectionString = fmt.Sprintf("%s&_busy_timeout=%d", connectionString, timeout)
|
||||
}
|
||||
return connectionString, nil
|
||||
}
|
||||
|
||||
// MySQL is the config entry for the mysql section
|
||||
|
|
|
|||
|
|
@ -389,6 +389,12 @@ func TestGormParams(t *testing.T) {
|
|||
require.Equal(t, SQLiteBackend, dbType)
|
||||
require.Equal(t, filepath.Join(dir, "garm.db?_journal_mode=WAL&_foreign_keys=ON"), uri)
|
||||
|
||||
cfg.SQLite.BusyTimeoutSeconds = 5
|
||||
dbType, uri, err = cfg.GormParams()
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, SQLiteBackend, dbType)
|
||||
require.Equal(t, filepath.Join(dir, "garm.db?_journal_mode=WAL&_foreign_keys=ON&_busy_timeout=5000"), uri)
|
||||
|
||||
cfg.DbBackend = MySQLBackend
|
||||
cfg.MySQL = getMySQLDefaultConfig()
|
||||
cfg.SQLite = SQLite{}
|
||||
|
|
@ -667,15 +673,6 @@ func TestGithubConfig(t *testing.T) {
|
|||
},
|
||||
errString: "missing credentials name",
|
||||
},
|
||||
{
|
||||
name: "Description is empty",
|
||||
cfg: Github{
|
||||
Name: "dummy_creds",
|
||||
Description: "",
|
||||
OAuth2Token: "bogus",
|
||||
},
|
||||
errString: "missing credentials description",
|
||||
},
|
||||
{
|
||||
name: "OAuth token is set in the PAT section",
|
||||
cfg: Github{
|
||||
|
|
|
|||
|
|
@ -29,6 +29,10 @@ import (
|
|||
// whatever programming language you wish, while still remaining compatible
|
||||
// with garm.
|
||||
type External struct {
|
||||
// InterfaceVersion is the version of the interface that the external
|
||||
// provider implements. This is used to ensure compatibility between
|
||||
// the external provider and garm.
|
||||
InterfaceVersion string `toml:"interface_version" json:"interface-version"`
|
||||
// ConfigFile is the path on disk to a file which will be passed to
|
||||
// the external binary as an environment variable: GARM_PROVIDER_CONFIG
|
||||
// You can use this file for any configuration you need to do for the
|
||||
|
|
|
|||
|
|
@ -507,7 +507,7 @@ func (s *EnterpriseTestSuite) TestCreateEnterprisePoolDBFetchTagErr() {
|
|||
WithArgs(s.Fixtures.Enterprises[0].ID, 1).
|
||||
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
|
||||
s.Fixtures.SQLMock.
|
||||
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
|
||||
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
|
||||
WillReturnError(fmt.Errorf("mocked fetching tag error"))
|
||||
|
||||
entity, err := s.Fixtures.Enterprises[0].GetEntity()
|
||||
|
|
@ -527,7 +527,7 @@ func (s *EnterpriseTestSuite) TestCreateEnterprisePoolDBAddingPoolErr() {
|
|||
WithArgs(s.Fixtures.Enterprises[0].ID, 1).
|
||||
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
|
||||
s.Fixtures.SQLMock.
|
||||
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
|
||||
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
|
||||
WillReturnRows(sqlmock.NewRows([]string{"linux"}))
|
||||
s.Fixtures.SQLMock.
|
||||
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
|
||||
|
|
@ -555,7 +555,7 @@ func (s *EnterpriseTestSuite) TestCreateEnterprisePoolDBSaveTagErr() {
|
|||
WithArgs(s.Fixtures.Enterprises[0].ID, 1).
|
||||
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
|
||||
s.Fixtures.SQLMock.
|
||||
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
|
||||
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
|
||||
WillReturnRows(sqlmock.NewRows([]string{"linux"}))
|
||||
s.Fixtures.SQLMock.
|
||||
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
|
||||
|
|
@ -586,7 +586,7 @@ func (s *EnterpriseTestSuite) TestCreateEnterprisePoolDBFetchPoolErr() {
|
|||
WithArgs(s.Fixtures.Enterprises[0].ID, 1).
|
||||
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
|
||||
s.Fixtures.SQLMock.
|
||||
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
|
||||
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
|
||||
WillReturnRows(sqlmock.NewRows([]string{"linux"}))
|
||||
s.Fixtures.SQLMock.
|
||||
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
|
||||
|
|
|
|||
|
|
@ -55,6 +55,8 @@ func (s *sqlDatabase) sqlToCommonGithubCredentials(creds GithubCredentials) (par
|
|||
UploadBaseURL: creds.Endpoint.UploadBaseURL,
|
||||
CABundle: creds.Endpoint.CACertBundle,
|
||||
AuthType: creds.AuthType,
|
||||
CreatedAt: creds.CreatedAt,
|
||||
UpdatedAt: creds.UpdatedAt,
|
||||
Endpoint: ep,
|
||||
CredentialsPayload: data,
|
||||
}
|
||||
|
|
@ -94,6 +96,8 @@ func (s *sqlDatabase) sqlToCommonGithubEndpoint(ep GithubEndpoint) (params.Githu
|
|||
BaseURL: ep.BaseURL,
|
||||
UploadBaseURL: ep.UploadBaseURL,
|
||||
CACertBundle: ep.CACertBundle,
|
||||
CreatedAt: ep.CreatedAt,
|
||||
UpdatedAt: ep.UpdatedAt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -84,7 +84,8 @@ func (s *sqlDatabase) paramsJobToWorkflowJob(ctx context.Context, job params.Job
|
|||
if job.RunnerName != "" {
|
||||
instance, err := s.getInstanceByName(s.ctx, job.RunnerName)
|
||||
if err != nil {
|
||||
slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to get instance by name")
|
||||
// This usually is very normal as not all jobs run on our runners.
|
||||
slog.DebugContext(ctx, "failed to get instance by name", "instance_name", job.RunnerName)
|
||||
} else {
|
||||
workflofJob.InstanceID = &instance.ID
|
||||
}
|
||||
|
|
@ -244,7 +245,8 @@ func (s *sqlDatabase) CreateOrUpdateJob(ctx context.Context, job params.Job) (pa
|
|||
if err == nil {
|
||||
workflowJob.InstanceID = &instance.ID
|
||||
} else {
|
||||
slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to get instance by name")
|
||||
// This usually is very normal as not all jobs run on our runners.
|
||||
slog.DebugContext(ctx, "failed to get instance by name", "instance_name", job.RunnerName)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -509,7 +509,7 @@ func (s *OrgTestSuite) TestCreateOrganizationPoolDBFetchTagErr() {
|
|||
WithArgs(s.Fixtures.Orgs[0].ID, 1).
|
||||
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
|
||||
s.Fixtures.SQLMock.
|
||||
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
|
||||
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
|
||||
WillReturnError(fmt.Errorf("mocked fetching tag error"))
|
||||
|
||||
entity, err := s.Fixtures.Orgs[0].GetEntity()
|
||||
|
|
@ -530,7 +530,7 @@ func (s *OrgTestSuite) TestCreateOrganizationPoolDBAddingPoolErr() {
|
|||
WithArgs(s.Fixtures.Orgs[0].ID, 1).
|
||||
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
|
||||
s.Fixtures.SQLMock.
|
||||
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
|
||||
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
|
||||
WillReturnRows(sqlmock.NewRows([]string{"linux"}))
|
||||
s.Fixtures.SQLMock.
|
||||
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
|
||||
|
|
@ -558,7 +558,7 @@ func (s *OrgTestSuite) TestCreateOrganizationPoolDBSaveTagErr() {
|
|||
WithArgs(s.Fixtures.Orgs[0].ID, 1).
|
||||
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
|
||||
s.Fixtures.SQLMock.
|
||||
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
|
||||
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
|
||||
WillReturnRows(sqlmock.NewRows([]string{"linux"}))
|
||||
s.Fixtures.SQLMock.
|
||||
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
|
||||
|
|
@ -589,7 +589,7 @@ func (s *OrgTestSuite) TestCreateOrganizationPoolDBFetchPoolErr() {
|
|||
WithArgs(s.Fixtures.Orgs[0].ID, 1).
|
||||
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
|
||||
s.Fixtures.SQLMock.
|
||||
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
|
||||
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
|
||||
WillReturnRows(sqlmock.NewRows([]string{"linux"}))
|
||||
s.Fixtures.SQLMock.
|
||||
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
|
||||
|
|
|
|||
|
|
@ -206,7 +206,7 @@ func (s *sqlDatabase) findPoolByTags(id string, poolType params.GithubEntityType
|
|||
}
|
||||
|
||||
var pools []Pool
|
||||
where := fmt.Sprintf("tags.name in ? and %s = ? and enabled = true", fieldName)
|
||||
where := fmt.Sprintf("tags.name COLLATE NOCASE in ? and %s = ? and enabled = true", fieldName)
|
||||
q := s.conn.Joins("JOIN pool_tags on pool_tags.pool_id=pools.id").
|
||||
Joins("JOIN tags on tags.id=pool_tags.tag_id").
|
||||
Group("pools.id").
|
||||
|
|
|
|||
|
|
@ -551,7 +551,7 @@ func (s *RepoTestSuite) TestCreateRepositoryPoolDBFetchTagErr() {
|
|||
WithArgs(s.Fixtures.Repos[0].ID, 1).
|
||||
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
|
||||
s.Fixtures.SQLMock.
|
||||
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
|
||||
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
|
||||
WillReturnError(fmt.Errorf("mocked fetching tag error"))
|
||||
|
||||
entity, err := s.Fixtures.Repos[0].GetEntity()
|
||||
|
|
@ -573,7 +573,7 @@ func (s *RepoTestSuite) TestCreateRepositoryPoolDBAddingPoolErr() {
|
|||
WithArgs(s.Fixtures.Repos[0].ID, 1).
|
||||
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
|
||||
s.Fixtures.SQLMock.
|
||||
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
|
||||
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
|
||||
WillReturnRows(sqlmock.NewRows([]string{"linux"}))
|
||||
s.Fixtures.SQLMock.
|
||||
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
|
||||
|
|
@ -602,7 +602,7 @@ func (s *RepoTestSuite) TestCreateRepositoryPoolDBSaveTagErr() {
|
|||
WithArgs(s.Fixtures.Repos[0].ID, 1).
|
||||
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
|
||||
s.Fixtures.SQLMock.
|
||||
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
|
||||
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
|
||||
WillReturnRows(sqlmock.NewRows([]string{"linux"}))
|
||||
s.Fixtures.SQLMock.
|
||||
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
|
||||
|
|
@ -633,7 +633,7 @@ func (s *RepoTestSuite) TestCreateRepositoryPoolDBFetchPoolErr() {
|
|||
WithArgs(s.Fixtures.Repos[0].ID, 1).
|
||||
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
|
||||
s.Fixtures.SQLMock.
|
||||
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
|
||||
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
|
||||
WillReturnRows(sqlmock.NewRows([]string{"linux"}))
|
||||
s.Fixtures.SQLMock.
|
||||
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
|
||||
|
|
|
|||
|
|
@ -65,6 +65,7 @@ func (s *sqlDatabase) sqlToParamsInstance(instance Instance) (params.Instance, e
|
|||
MetadataURL: instance.MetadataURL,
|
||||
StatusMessages: []params.StatusMessage{},
|
||||
CreateAttempt: instance.CreateAttempt,
|
||||
CreatedAt: instance.CreatedAt,
|
||||
UpdatedAt: instance.UpdatedAt,
|
||||
TokenFetched: instance.TokenFetched,
|
||||
JitConfiguration: jitConfig,
|
||||
|
|
@ -127,6 +128,8 @@ func (s *sqlDatabase) sqlToCommonOrganization(org Organization, detailed bool) (
|
|||
WebhookSecret: string(secret),
|
||||
PoolBalancerType: org.PoolBalancerType,
|
||||
Endpoint: endpoint,
|
||||
CreatedAt: org.CreatedAt,
|
||||
UpdatedAt: org.UpdatedAt,
|
||||
}
|
||||
|
||||
if org.CredentialsID != nil {
|
||||
|
|
@ -175,6 +178,8 @@ func (s *sqlDatabase) sqlToCommonEnterprise(enterprise Enterprise, detailed bool
|
|||
Pools: make([]params.Pool, len(enterprise.Pools)),
|
||||
WebhookSecret: string(secret),
|
||||
PoolBalancerType: enterprise.PoolBalancerType,
|
||||
CreatedAt: enterprise.CreatedAt,
|
||||
UpdatedAt: enterprise.UpdatedAt,
|
||||
Endpoint: endpoint,
|
||||
}
|
||||
|
||||
|
|
@ -224,6 +229,8 @@ func (s *sqlDatabase) sqlToCommonPool(pool Pool) (params.Pool, error) {
|
|||
ExtraSpecs: json.RawMessage(pool.ExtraSpecs),
|
||||
GitHubRunnerGroup: pool.GitHubRunnerGroup,
|
||||
Priority: pool.Priority,
|
||||
CreatedAt: pool.CreatedAt,
|
||||
UpdatedAt: pool.UpdatedAt,
|
||||
}
|
||||
|
||||
if pool.RepoID != nil {
|
||||
|
|
@ -285,6 +292,8 @@ func (s *sqlDatabase) sqlToCommonRepository(repo Repository, detailed bool) (par
|
|||
Pools: make([]params.Pool, len(repo.Pools)),
|
||||
WebhookSecret: string(secret),
|
||||
PoolBalancerType: repo.PoolBalancerType,
|
||||
CreatedAt: repo.CreatedAt,
|
||||
UpdatedAt: repo.UpdatedAt,
|
||||
Endpoint: endpoint,
|
||||
}
|
||||
|
||||
|
|
@ -331,7 +340,7 @@ func (s *sqlDatabase) sqlToParamsUser(user User) params.User {
|
|||
|
||||
func (s *sqlDatabase) getOrCreateTag(tx *gorm.DB, tagName string) (Tag, error) {
|
||||
var tag Tag
|
||||
q := tx.Where("name = ?", tagName).First(&tag)
|
||||
q := tx.Where("name = ? COLLATE NOCASE", tagName).First(&tag)
|
||||
if q.Error == nil {
|
||||
return tag, nil
|
||||
}
|
||||
|
|
@ -402,7 +411,7 @@ func (s *sqlDatabase) updatePool(tx *gorm.DB, pool Pool, param params.UpdatePool
|
|||
}
|
||||
|
||||
tags := []Tag{}
|
||||
if param.Tags != nil && len(param.Tags) > 0 {
|
||||
if len(param.Tags) > 0 {
|
||||
for _, val := range param.Tags {
|
||||
t, err := s.getOrCreateTag(tx, val)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -8,15 +8,7 @@ Performance is often important when running GitHub action runners with garm. Thi
|
|||
|
||||
When a new instance is created by garm, it usually downloads the latest available GitHub action runner binary, installs the requirements and starts it afterwards. This can be a time consuming task that quickly adds up when a lot of instances are created by garm throughout the day. Therefore it is recommended to include the GitHub action runner binary inside of the used image.
|
||||
|
||||
There are two ways to do that:
|
||||
|
||||
1. Add the extracted runner to `/opt/cache/actions-runner/latest` in which case, garm won't do any version checking and will blindly trust that whatever you put there is indeed the latest. This is useful if you want to run a pre-release of the runner or have your own patches applied to it. Also GitHub runners have an auto-update mechanism. When it detects that a new version is available, it updates itself to the latest version.
|
||||
|
||||
2. Add the extracted runner to `/opt/cache/actions-runner/$VERSION` where `$VERSION` is the version of the runner. In this case, if what garm fetches from GitHub is different than what you bundled in the image, it will download and install the version indicated by GitHub.
|
||||
|
||||
Note, when bundling the runner with your image, you will have to download it, extract it to one of the above mentioned locations and also run the `./bin/installdependencies.sh` inside the extracted folder. All dependencies needed to run the runner must be pre-installed when bundling.
|
||||
|
||||
Example steps:
|
||||
Example steps for setting a cached runner on a linux image in LXD:
|
||||
|
||||
```bash
|
||||
# Create a temporary instance from your base image
|
||||
|
|
@ -26,27 +18,29 @@ lxc launch <BASE_IMAGE> temp
|
|||
lxc exec temp -- bash
|
||||
|
||||
# Get and install the runner
|
||||
mkdir -p /opt/cache/actions-runner/latest
|
||||
cd /opt/cache/actions-runner/latest
|
||||
curl -o actions-runner-linux-x64-2.305.0.tar.gz -L https://github.com/actions/runner/releases/download/v2.305.0/actions-runner-linux-x64-2.305.0.tar.gz
|
||||
tar xzf ./actions-runner-linux-x64-2.305.0.tar.gz
|
||||
./bin/installdependencies.sh
|
||||
mkdir -p /home/runner/actions-runner
|
||||
cd /home/runner/actions-runner
|
||||
curl -O -L https://github.com/actions/runner/releases/download/v2.320.0/actions-runner-linux-x64-2.320.0.tar.gz
|
||||
# Extract the installer
|
||||
tar xzf ./actions-runner-linux-x64-2.320.0.tar.gz
|
||||
|
||||
# Exit the container
|
||||
exit
|
||||
|
||||
# Stop the instance and publish it as a new image
|
||||
lxc stop temp
|
||||
lxc publish temp --alias BASE_IMAGE-2.305.0
|
||||
lxc publish temp --alias BASE_IMAGE-2.320.0
|
||||
|
||||
# Delete the temporary instance
|
||||
lxc delete temp
|
||||
|
||||
# Update garm to use the new image
|
||||
garm-cli pool update <POOL_ID> \
|
||||
--image=BASE_IMAGE-2.305.0
|
||||
--image=BASE_IMAGE-2.320.0
|
||||
```
|
||||
|
||||
You can read more about cached runners in the [Using Cached Runners](https://github.com/cloudbase/garm/blob/main/doc/using_cached_runners.md) documentation.
|
||||
|
||||
### Disable updates
|
||||
|
||||
By default garm configures the `cloud-init` process of a new instance to update packages on startup. To prevent this from happening (and therefore reduce the time needed to start an instance) garm can be configured accordingly.
|
||||
|
|
|
|||
52
doc/using_cached_runners.md
Normal file
52
doc/using_cached_runners.md
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
# Using Cached Runners
|
||||
|
||||
## GitHub Action Runners and GARM
|
||||
|
||||
When a new instance is created by garm, it usually downloads the latest available GitHub action runner binary, installs the requirements and starts it afterwards. This can be a time consuming task that quickly adds up when a lot of instances are created by garm throughout the day. Therefore it is recommended to include the GitHub action runner binary inside of the used image.
|
||||
|
||||
GARM supports cached runners on Linux and Windows images, in a simple manner. GARM verifies if the runner path exists (`C:\actions-runner` or `/home/runner/actions-runner`) on the chosen image, thus knowing if it needs to create the path and download the runner or use the existent runner. In order to simplify setup and validation of the runner, the check is based on the user properly creating, downloading and installing the runner in the predefined path on the target OS.
|
||||
|
||||
>**NOTE:** More about these paths will be presented below in the sections for each target OS.
|
||||
|
||||
### Cached Runners on Linux Images
|
||||
|
||||
On a Linux image, the cached runner is expected by GARM to be setup in a static predefined way. It expects the cached runner to be installed in the `/home/runner/actions-runner` directory. Thus, the user needs to configure its custom image properly in order for GARM to use the cached runner and not download the latest available GitHub action runner binary.
|
||||
|
||||
In order to configure a cached GitHub actions runner to work with GARM, the following steps need to be followed:
|
||||
|
||||
1. The `actions-runner`directory needs to be created inside the `/home/runner` directory (home path for the garm runner)
|
||||
2. Download the wanted version of the runner package
|
||||
3. Extract the installer inside the `actions-runner` directory
|
||||
|
||||
> **NOTE:** These are based on the steps described on the [actions/runner](https://github.com/actions/runner/releases) repository about installing the GitHub action runner on the Linux x64. The full list of commands looks like this:
|
||||
|
||||
```bash
|
||||
# Create a folder
|
||||
mkdir actions-runner && cd actions-runner
|
||||
# Download the latest runner package
|
||||
curl -O -L https://github.com/actions/runner/releases/download/v2.320.0/actions-runner-linux-x64-2.320.0.tar.gz
|
||||
# Extract the installer
|
||||
tar xzf ./actions-runner-linux-x64-2.320.0.tar.gz
|
||||
```
|
||||
|
||||
### Cached Runners on Windows Images
|
||||
|
||||
On a Windows image, the cached runner is expected by GARM to be setup in a static predefined way. It expects the cached runner to be installed in the `C:\actions-runner\` folder. Thus, the user needs to configure its custom image properly in order for GARM to use the cached runner and not download the latest available GitHub action runner binary.
|
||||
|
||||
In order to configure a cached GitHub actions runner to work with GARM, the following steps need to be followed:
|
||||
|
||||
1. Create the folder `actions-runner` inside the root folder (`C:\`).
|
||||
2. Download the wanted version of runner package
|
||||
3. Extract the installer in the folder created at step 1 (`C:\actions-runner\`)
|
||||
|
||||
> **NOTE:** These are based on the steps described on the [actions/runner](https://github.com/actions/runner/releases) repository about installing the GitHub action runner on the Windows x64. The full list of commands looks like this:
|
||||
|
||||
```powershell
|
||||
# Create a folder under the drive root
|
||||
mkdir \actions-runner ; cd \actions-runner
|
||||
# Download the latest runner package
|
||||
Invoke-WebRequest -Uri https://github.com/actions/runner/releases/download/v2.320.0/actions-runner-win-x64-2.320.0.zip -OutFile actions-runner-win-x64-2.320.0.zip
|
||||
# Extract the installer
|
||||
Add-Type -AssemblyName System.IO.Compression.FileSystem ;
|
||||
[System.IO.Compression.ZipFile]::ExtractToDirectory("$PWD\actions-runner-win-x64-2.320.0.zip", "$PWD")
|
||||
```
|
||||
85
go.mod
85
go.mod
|
|
@ -1,42 +1,42 @@
|
|||
module github.com/cloudbase/garm
|
||||
|
||||
go 1.22
|
||||
go 1.23.0
|
||||
|
||||
toolchain go1.22.3
|
||||
toolchain go1.23.6
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.3.2
|
||||
github.com/bradleyfalzon/ghinstallation/v2 v2.10.0
|
||||
github.com/cloudbase/garm-provider-common v0.1.3
|
||||
github.com/BurntSushi/toml v1.5.0
|
||||
github.com/bradleyfalzon/ghinstallation/v2 v2.15.0
|
||||
github.com/cloudbase/garm-provider-common v0.1.4
|
||||
github.com/felixge/httpsnoop v1.0.4
|
||||
github.com/go-openapi/errors v0.22.0
|
||||
github.com/go-openapi/errors v0.22.1
|
||||
github.com/go-openapi/runtime v0.28.0
|
||||
github.com/go-openapi/strfmt v0.23.0
|
||||
github.com/go-openapi/swag v0.23.0
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1
|
||||
github.com/go-openapi/swag v0.23.1
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2
|
||||
github.com/google/go-github/v57 v57.0.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gorilla/handlers v1.5.2
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/gorilla/websocket v1.5.4-0.20240702125206-a62d9d2a8413
|
||||
github.com/jedib0t/go-pretty/v6 v6.5.8
|
||||
github.com/juju/clock v1.0.3
|
||||
github.com/juju/retry v1.0.0
|
||||
github.com/jedib0t/go-pretty/v6 v6.6.7
|
||||
github.com/juju/clock v1.1.1
|
||||
github.com/juju/retry v1.0.1
|
||||
github.com/manifoldco/promptui v0.9.0
|
||||
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/spf13/cobra v1.8.0
|
||||
github.com/stretchr/testify v1.9.0
|
||||
golang.org/x/crypto v0.25.0
|
||||
golang.org/x/oauth2 v0.19.0
|
||||
golang.org/x/sync v0.7.0
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
golang.org/x/crypto v0.37.0
|
||||
golang.org/x/oauth2 v0.29.0
|
||||
golang.org/x/sync v0.13.0
|
||||
gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
||||
gorm.io/datatypes v1.2.0
|
||||
gorm.io/driver/mysql v1.5.6
|
||||
gorm.io/driver/sqlite v1.5.5
|
||||
gorm.io/gorm v1.25.9
|
||||
gorm.io/datatypes v1.2.5
|
||||
gorm.io/driver/mysql v1.5.7
|
||||
gorm.io/driver/sqlite v1.5.7
|
||||
gorm.io/gorm v1.25.12
|
||||
)
|
||||
|
||||
require (
|
||||
|
|
@ -46,17 +46,17 @@ require (
|
|||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/chzyer/readline v1.5.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/analysis v0.23.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.1 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||
github.com/go-openapi/loads v0.22.0 // indirect
|
||||
github.com/go-openapi/spec v0.21.0 // indirect
|
||||
github.com/go-openapi/validate v0.24.0 // indirect
|
||||
github.com/go-sql-driver/mysql v1.8.1 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
|
||||
github.com/google/go-github/v60 v60.0.0 // indirect
|
||||
github.com/go-sql-driver/mysql v1.9.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
|
||||
github.com/google/go-github/v71 v71.0.0 // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
|
|
@ -65,28 +65,31 @@ require (
|
|||
github.com/juju/errors v1.0.0 // indirect
|
||||
github.com/juju/loggo v1.0.0 // indirect
|
||||
github.com/juju/testing v1.0.2 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mailru/easyjson v0.9.0 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.22 // indirect
|
||||
github.com/minio/sio v0.4.0 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.28 // indirect
|
||||
github.com/minio/sio v0.4.1 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.53.0 // indirect
|
||||
github.com/prometheus/procfs v0.13.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.63.0 // indirect
|
||||
github.com/prometheus/procfs v0.16.0 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569 // indirect
|
||||
go.mongodb.org/mongo-driver v1.15.0 // indirect
|
||||
go.opentelemetry.io/otel v1.25.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.25.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.25.0 // indirect
|
||||
golang.org/x/net v0.27.0 // indirect
|
||||
golang.org/x/sys v0.22.0 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
go.mongodb.org/mongo-driver v1.17.3 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/otel v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.35.0 // indirect
|
||||
golang.org/x/net v0.39.0 // indirect
|
||||
golang.org/x/sys v0.32.0 // indirect
|
||||
golang.org/x/text v0.24.0 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
|
|
|||
196
go.sum
196
go.sum
|
|
@ -1,13 +1,13 @@
|
|||
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
|
||||
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
|
||||
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
|
||||
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
|
||||
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bradleyfalzon/ghinstallation/v2 v2.10.0 h1:XWuWBRFEpqVrHepQob9yPS3Xg4K3Wr9QCx4fu8HbUNg=
|
||||
github.com/bradleyfalzon/ghinstallation/v2 v2.10.0/go.mod h1:qoGA4DxWPaYTgVCrmEspVSjlTu4WYAiSxMIhorMRXXc=
|
||||
github.com/bradleyfalzon/ghinstallation/v2 v2.15.0 h1:7r2rPUM04rgszMP0U1UZ1M5VoVVIlsaBSnpABfYxcQY=
|
||||
github.com/bradleyfalzon/ghinstallation/v2 v2.15.0/go.mod h1:PoH9Vhy82OeRFZfxsVrk3mfQhVkEzou9OOwPOsEhiXE=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
|
|
@ -19,25 +19,25 @@ github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObk
|
|||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04=
|
||||
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
|
||||
github.com/cloudbase/garm-provider-common v0.1.3 h1:8pHSRs2ljwLHgtDrge68dZ7ILUW97VF5h2ZA2fQubGQ=
|
||||
github.com/cloudbase/garm-provider-common v0.1.3/go.mod h1:VIJzbcg5iwyD4ac99tnnwcActfwibn/VOt2MYOFjf2c=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cloudbase/garm-provider-common v0.1.4 h1:spRjl0PV4r8vKaCTNp6xBQbRKfls/cmbBEl/i/eGWSo=
|
||||
github.com/cloudbase/garm-provider-common v0.1.4/go.mod h1:sK26i2NpjjAjhanNKiWw8iPkqt+XeohTKpFnEP7JdZ4=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
|
||||
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU=
|
||||
github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo=
|
||||
github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w=
|
||||
github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE=
|
||||
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
|
||||
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
|
||||
github.com/go-openapi/errors v0.22.1 h1:kslMRRnK7NCb/CvR1q1VWuEQCEIsBGn5GgKD9e+HYhU=
|
||||
github.com/go-openapi/errors v0.22.1/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0=
|
||||
github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic=
|
||||
github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk=
|
||||
github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
|
||||
github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
|
||||
github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco=
|
||||
|
|
@ -48,28 +48,28 @@ github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9Z
|
|||
github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk=
|
||||
github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c=
|
||||
github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4=
|
||||
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
|
||||
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
|
||||
github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU=
|
||||
github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0=
|
||||
github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58=
|
||||
github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ=
|
||||
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
|
||||
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
|
||||
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/go-sql-driver/mysql v1.9.2 h1:4cNKDYQ1I84SXslGddlsrMhc8k4LeDVj6Ad6WRjiHuU=
|
||||
github.com/go-sql-driver/mysql v1.9.2/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA=
|
||||
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
||||
github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A=
|
||||
github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/go-github/v57 v57.0.0 h1:L+Y3UPTY8ALM8x+TV0lg+IEBI+upibemtBD8Q9u7zHs=
|
||||
github.com/google/go-github/v57 v57.0.0/go.mod h1:s0omdnye0hvK/ecLvpsGfJMiRt85PimQh4oygmLIxHw=
|
||||
github.com/google/go-github/v60 v60.0.0 h1:oLG98PsLauFvvu4D/YPxq374jhSxFYdzQGNCyONLfn8=
|
||||
github.com/google/go-github/v60 v60.0.0/go.mod h1:ByhX2dP9XT9o/ll2yXAu2VD8l5eNVg8hD4Cr0S/LmQk=
|
||||
github.com/google/go-github/v71 v71.0.0 h1:Zi16OymGKZZMm8ZliffVVJ/Q9YZreDKONCr+WUd0Z30=
|
||||
github.com/google/go-github/v71 v71.0.0/go.mod h1:URZXObp2BLlMjwu0O8g4y6VBneUj2bCHgnI8FfgZ51M=
|
||||
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
|
||||
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
|
|
@ -84,12 +84,14 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2
|
|||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgx/v5 v5.3.0 h1:/NQi8KHMpKWHInxXesC8yD4DhkXPrVhmnwYkjp9AmBA=
|
||||
github.com/jackc/pgx/v5 v5.3.0/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8=
|
||||
github.com/jedib0t/go-pretty/v6 v6.5.8 h1:8BCzJdSvUbaDuRba4YVh+SKMGcAAKdkcF3SVFbrHAtQ=
|
||||
github.com/jedib0t/go-pretty/v6 v6.5.8/go.mod h1:zbn98qrYlh95FIhwwsbIip0LYpwSG8SUOScs+v9/t0E=
|
||||
github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 h1:L0QtFUgDarD7Fpv9jeVMgy/+Ec0mtnmYuImjTz6dtDA=
|
||||
github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw=
|
||||
github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
|
||||
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
|
||||
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/jedib0t/go-pretty/v6 v6.6.7 h1:m+LbHpm0aIAPLzLbMfn8dc3Ht8MW7lsSO4MPItz/Uuo=
|
||||
github.com/jedib0t/go-pretty/v6 v6.6.7/go.mod h1:YwC5CE4fJ1HFUDeivSV1r//AmANFHyqczZk+U6BDALU=
|
||||
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
|
||||
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
||||
|
|
@ -97,41 +99,47 @@ github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/
|
|||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a/go.mod h1:UJSiEoRfvx3hP73CvoARgeLjaIOjybY9vj8PUPPFGeU=
|
||||
github.com/juju/clock v1.0.3 h1:yJHIsWXeU8j3QcBdiess09SzfiXRRrsjKPn2whnMeds=
|
||||
github.com/juju/clock v1.0.3/go.mod h1:HIBvJ8kiV/n7UHwKuCkdYL4l/MDECztHR2sAvWDxxf0=
|
||||
github.com/juju/clock v1.1.1 h1:NvgHG9DQmOpBevgt6gzkyimdWBooLXDy1cQn89qJzBI=
|
||||
github.com/juju/clock v1.1.1/go.mod h1:HIBvJ8kiV/n7UHwKuCkdYL4l/MDECztHR2sAvWDxxf0=
|
||||
github.com/juju/errors v1.0.0 h1:yiq7kjCLll1BiaRuNY53MGI0+EQ3rF6GB+wvboZDefM=
|
||||
github.com/juju/errors v1.0.0/go.mod h1:B5x9thDqx0wIMH3+aLIMP9HjItInYWObRovoCFM5Qe8=
|
||||
github.com/juju/loggo v1.0.0 h1:Y6ZMQOGR9Aj3BGkiWx7HBbIx6zNwNkxhVNOHU2i1bl0=
|
||||
github.com/juju/loggo v1.0.0/go.mod h1:NIXFioti1SmKAlKNuUwbMenNdef59IF52+ZzuOmHYkg=
|
||||
github.com/juju/retry v1.0.0 h1:Tb1hFdDSPGLH/BGdYQOF7utQ9lA0ouVJX2imqgJK6tk=
|
||||
github.com/juju/retry v1.0.0/go.mod h1:SssN1eYeK3A2qjnFGTiVMbdzGJ2BfluaJblJXvuvgqA=
|
||||
github.com/juju/retry v1.0.1 h1:EVwOPq273wO1o0BCU7Ay7XE/bNb+bTNYsCK6y+BboAk=
|
||||
github.com/juju/retry v1.0.1/go.mod h1:SssN1eYeK3A2qjnFGTiVMbdzGJ2BfluaJblJXvuvgqA=
|
||||
github.com/juju/testing v1.0.2 h1:OR90RqCd9CJONxXamZAjLknpZdtqDyxqW8IwCbgw3i4=
|
||||
github.com/juju/testing v1.0.2/go.mod h1:h3Vd2rzB57KrdsBEy6R7bmSKPzP76BnNavt7i8PerwQ=
|
||||
github.com/juju/utils/v3 v3.0.0 h1:Gg3n63mGPbBuoXCo+EPJuMi44hGZfloI8nlCIebHu2Q=
|
||||
github.com/juju/utils/v3 v3.0.0/go.mod h1:8csUcj1VRkfjNIRzBFWzLFCMLwLqsRWvkmhfVAUwbC4=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/lunixbochs/vtclean v0.0.0-20160125035106-4fbf7632a2c6/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
|
||||
github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
|
||||
github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA=
|
||||
github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg=
|
||||
github.com/mattn/go-colorable v0.0.6/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-isatty v0.0.0-20160806122752-66b8e73f3f5c/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
||||
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
|
||||
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/microsoft/go-mssqldb v0.17.0 h1:Fto83dMZPnYv1Zwx5vHHxpNraeEaUlQ/hhHLgZiaenE=
|
||||
github.com/microsoft/go-mssqldb v0.17.0/go.mod h1:OkoNGhGEs8EZqchVTtochlXruEhEOaO4S0d2sB5aeGQ=
|
||||
github.com/minio/sio v0.4.0 h1:u4SWVEm5lXSqU42ZWawV0D9I5AZ5YMmo2RXpEQ/kRhc=
|
||||
github.com/minio/sio v0.4.0/go.mod h1:oBSjJeGbBdRMZZwna07sX9EFzZy+ywu5aofRiV1g79I=
|
||||
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-sqlite3 v1.14.28 h1:ThEiQrnbtumT+QMknw63Befp/ce/nUPgBPMlRFEum7A=
|
||||
github.com/mattn/go-sqlite3 v1.14.28/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/microsoft/go-mssqldb v1.7.2 h1:CHkFJiObW7ItKTJfHo1QX7QBBD1iV+mn1eOyRP3b/PA=
|
||||
github.com/microsoft/go-mssqldb v1.7.2/go.mod h1:kOvZKUdrhhFQmxLZqbwUV0rHkNkZpthMITIb2Ko1IoA=
|
||||
github.com/minio/sio v0.4.1 h1:EMe3YBC1nf+sRQia65Rutxi+Z554XPV0dt8BIBA+a/0=
|
||||
github.com/minio/sio v0.4.1/go.mod h1:oBSjJeGbBdRMZZwna07sX9EFzZy+ywu5aofRiV1g79I=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA=
|
||||
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8=
|
||||
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
|
||||
|
|
@ -143,61 +151,63 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
|||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
|
||||
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE=
|
||||
github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U=
|
||||
github.com/prometheus/procfs v0.13.0 h1:GqzLlQyfsPbaEHaQkO7tbDlriv/4o5Hudv6OXHGKX7o=
|
||||
github.com/prometheus/procfs v0.13.0/go.mod h1:cd4PFCR54QLnGKPaKGA6l+cfuNXtht43ZKY6tow0Y1g=
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k=
|
||||
github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18=
|
||||
github.com/prometheus/procfs v0.16.0 h1:xh6oHhKwnOJKMYiYBDWmkHqQPyiY40sny36Cmx2bbsM=
|
||||
github.com/prometheus/procfs v0.16.0/go.mod h1:8veyXUu3nGP7oaCxhX6yeaM5u4stL2FeMXnCqhDthZg=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
||||
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
|
||||
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
|
||||
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
|
||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569 h1:xzABM9let0HLLqFypcxvLmlvEciCHL7+Lv+4vwZqecI=
|
||||
github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569/go.mod h1:2Ly+NIftZN4de9zRmENdYbvPQeaVIYKWpLFStLFEBgI=
|
||||
go.mongodb.org/mongo-driver v1.15.0 h1:rJCKC8eEliewXjZGf0ddURtl7tTVy1TK3bfl0gkUSLc=
|
||||
go.mongodb.org/mongo-driver v1.15.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
|
||||
go.opentelemetry.io/otel v1.25.0 h1:gldB5FfhRl7OJQbUHt/8s0a7cE8fbsPAtdpRaApKy4k=
|
||||
go.opentelemetry.io/otel v1.25.0/go.mod h1:Wa2ds5NOXEMkCmUou1WA7ZBfLTHWIsp034OVD7AO+Vg=
|
||||
go.opentelemetry.io/otel/metric v1.25.0 h1:LUKbS7ArpFL/I2jJHdJcqMGxkRdxpPHE0VU/D4NuEwA=
|
||||
go.opentelemetry.io/otel/metric v1.25.0/go.mod h1:rkDLUSd2lC5lq2dFNrX9LGAbINP5B7WBkC78RXCpH5s=
|
||||
go.mongodb.org/mongo-driver v1.17.3 h1:TQyXhnsWfWtgAhMtOgtYHMTkZIfBTpMTsMnd9ZBeHxQ=
|
||||
go.mongodb.org/mongo-driver v1.17.3/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
|
||||
go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
|
||||
go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M=
|
||||
go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=
|
||||
go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw=
|
||||
go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg=
|
||||
go.opentelemetry.io/otel/trace v1.25.0 h1:tqukZGLwQYRIFtSQM2u2+yfMVTgGVeqRLPUYx1Dq6RM=
|
||||
go.opentelemetry.io/otel/trace v1.25.0/go.mod h1:hCCs70XM/ljO+BeQkyFnbK28SBIJ/Emuha+ccrCRT7I=
|
||||
golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
|
||||
golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
|
||||
golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys=
|
||||
golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
|
||||
golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg=
|
||||
golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs=
|
||||
go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=
|
||||
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
|
||||
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
|
||||
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
|
||||
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
|
||||
golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98=
|
||||
golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
|
||||
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
|
||||
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
|
||||
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
|
||||
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0 h1:FVCohIoYO7IJoDDVpV2pdq7SgrMH6wHnuTyrdrxJNoY=
|
||||
gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0/go.mod h1:OdE7CF6DbADk7lN8LIKRzRJTTZXIjtWgA5THM5lhBAw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
|
@ -210,16 +220,16 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
|||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gorm.io/datatypes v1.2.0 h1:5YT+eokWdIxhJgWHdrb2zYUimyk0+TaFth+7a0ybzco=
|
||||
gorm.io/datatypes v1.2.0/go.mod h1:o1dh0ZvjIjhH/bngTpypG6lVRJ5chTBxE09FH/71k04=
|
||||
gorm.io/driver/mysql v1.5.6 h1:Ld4mkIickM+EliaQZQx3uOJDJHtrd70MxAUqWqlx3Y8=
|
||||
gorm.io/driver/mysql v1.5.6/go.mod h1:sEtPWMiqiN1N1cMXoXmBbd8C6/l+TESwriotuRRpkDM=
|
||||
gorm.io/datatypes v1.2.5 h1:9UogU3jkydFVW1bIVVeoYsTpLRgwDVW3rHfJG6/Ek9I=
|
||||
gorm.io/datatypes v1.2.5/go.mod h1:I5FUdlKpLb5PMqeMQhm30CQ6jXP8Rj89xkTeCSAaAD4=
|
||||
gorm.io/driver/mysql v1.5.7 h1:MndhOPYOfEp2rHKgkZIhJ16eVUIRf2HmzgoPmh7FCWo=
|
||||
gorm.io/driver/mysql v1.5.7/go.mod h1:sEtPWMiqiN1N1cMXoXmBbd8C6/l+TESwriotuRRpkDM=
|
||||
gorm.io/driver/postgres v1.5.0 h1:u2FXTy14l45qc3UeCJ7QaAXZmZfDDv0YrthvmRq1l0U=
|
||||
gorm.io/driver/postgres v1.5.0/go.mod h1:FUZXzO+5Uqg5zzwzv4KK49R8lvGIyscBOqYrtI1Ce9A=
|
||||
gorm.io/driver/sqlite v1.5.5 h1:7MDMtUZhV065SilG62E0MquljeArQZNfJnjd9i9gx3E=
|
||||
gorm.io/driver/sqlite v1.5.5/go.mod h1:6NgQ7sQWAIFsPrJJl1lSNSu2TABh0ZZ/zm5fosATavE=
|
||||
gorm.io/driver/sqlserver v1.4.1 h1:t4r4r6Jam5E6ejqP7N82qAJIJAht27EGT41HyPfXRw0=
|
||||
gorm.io/driver/sqlserver v1.4.1/go.mod h1:DJ4P+MeZbc5rvY58PnmN1Lnyvb5gw5NPzGshHDnJLig=
|
||||
gorm.io/driver/sqlite v1.5.7 h1:8NvsrhP0ifM7LX9G4zPB97NwovUakUxc+2V2uuf3Z1I=
|
||||
gorm.io/driver/sqlite v1.5.7/go.mod h1:U+J8craQU6Fzkcvu8oLeAQmi50TkwPEhHDEjQZXDah4=
|
||||
gorm.io/driver/sqlserver v1.5.4 h1:xA+Y1KDNspv79q43bPyjDMUgHoYHLhXYmdFcYPobg8g=
|
||||
gorm.io/driver/sqlserver v1.5.4/go.mod h1:+frZ/qYmuna11zHPlh5oc2O6ZA/lS88Keb0XSH1Zh/g=
|
||||
gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
|
||||
gorm.io/gorm v1.25.9 h1:wct0gxZIELDk8+ZqF/MVnHLkA1rvYlBWUMv2EdsK1g8=
|
||||
gorm.io/gorm v1.25.9/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
|
||||
gorm.io/gorm v1.25.12 h1:I0u8i2hWQItBq1WfE0o2+WuL9+8L21K9e2HHSTE/0f8=
|
||||
gorm.io/gorm v1.25.12/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ=
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ const (
|
|||
// WorkflowJobEvent is the event set in the webhook payload from github
|
||||
// when a workflow_job hook is sent.
|
||||
WorkflowJobEvent Event = "workflow_job"
|
||||
PingEvent Event = "ping"
|
||||
)
|
||||
|
||||
// WorkflowJob holds the payload sent by github when a workload_job is sent.
|
||||
|
|
|
|||
272
params/params.go
272
params/params.go
|
|
@ -128,10 +128,10 @@ func (e GithubEntityType) String() string {
|
|||
}
|
||||
|
||||
type StatusMessage struct {
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
Message string `json:"message"`
|
||||
EventType EventType `json:"event_type"`
|
||||
EventLevel EventLevel `json:"event_level"`
|
||||
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
Message string `json:"message,omitempty"`
|
||||
EventType EventType `json:"event_type,omitempty"`
|
||||
EventLevel EventLevel `json:"event_level,omitempty"`
|
||||
}
|
||||
|
||||
type Instance struct {
|
||||
|
|
@ -144,7 +144,7 @@ type Instance struct {
|
|||
ProviderID string `json:"provider_id,omitempty"`
|
||||
|
||||
// AgentID is the github runner agent ID.
|
||||
AgentID int64 `json:"agent_id"`
|
||||
AgentID int64 `json:"agent_id,omitempty"`
|
||||
|
||||
// Name is the name associated with an instance. Depending on
|
||||
// the provider, this may or may not be useful in the context of
|
||||
|
|
@ -186,12 +186,15 @@ type Instance struct {
|
|||
// up.
|
||||
StatusMessages []StatusMessage `json:"status_messages,omitempty"`
|
||||
|
||||
// CreatedAt is the timestamp of the creation of this runner.
|
||||
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
|
||||
// UpdatedAt is the timestamp of the last update to this runner.
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||
|
||||
// GithubRunnerGroup is the github runner group to which the runner belongs.
|
||||
// The runner group must be created by someone with access to the enterprise.
|
||||
GitHubRunnerGroup string `json:"github-runner-group"`
|
||||
GitHubRunnerGroup string `json:"github-runner-group,omitempty"`
|
||||
|
||||
// Job is the current job that is being serviced by this runner.
|
||||
Job *Job `json:"job,omitempty"`
|
||||
|
|
@ -217,21 +220,21 @@ func (i Instance) GetID() string {
|
|||
type Instances []Instance
|
||||
|
||||
type BootstrapInstance struct {
|
||||
Name string `json:"name"`
|
||||
Tools []*github.RunnerApplicationDownload `json:"tools"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Tools []*github.RunnerApplicationDownload `json:"tools,omitempty"`
|
||||
// RepoURL is the URL the github runner agent needs to configure itself.
|
||||
RepoURL string `json:"repo_url"`
|
||||
RepoURL string `json:"repo_url,omitempty"`
|
||||
// CallbackUrl is the URL where the instance can send a post, signaling
|
||||
// progress or status.
|
||||
CallbackURL string `json:"callback-url"`
|
||||
CallbackURL string `json:"callback-url,omitempty"`
|
||||
// MetadataURL is the URL where instances can fetch information needed to set themselves up.
|
||||
MetadataURL string `json:"metadata-url"`
|
||||
MetadataURL string `json:"metadata-url,omitempty"`
|
||||
// InstanceToken is the token that needs to be set by the instance in the headers
|
||||
// in order to send updated back to the garm via CallbackURL.
|
||||
InstanceToken string `json:"instance-token"`
|
||||
InstanceToken string `json:"instance-token,omitempty"`
|
||||
// SSHKeys are the ssh public keys we may want to inject inside the runners, if the
|
||||
// provider supports it.
|
||||
SSHKeys []string `json:"ssh-keys"`
|
||||
SSHKeys []string `json:"ssh-keys,omitempty"`
|
||||
// ExtraSpecs is an opaque raw json that gets sent to the provider
|
||||
// as part of the bootstrap params for instances. It can contain
|
||||
// any kind of data needed by providers. The contents of this field means
|
||||
|
|
@ -242,69 +245,71 @@ type BootstrapInstance struct {
|
|||
// GitHubRunnerGroup is the github runner group in which the newly installed runner
|
||||
// should be added to. The runner group must be created by someone with access to the
|
||||
// enterprise.
|
||||
GitHubRunnerGroup string `json:"github-runner-group"`
|
||||
GitHubRunnerGroup string `json:"github-runner-group,omitempty"`
|
||||
|
||||
// CACertBundle is a CA certificate bundle which will be sent to instances and which
|
||||
// will tipically be installed as a system wide trusted root CA. by either cloud-init
|
||||
// or whatever mechanism the provider will use to set up the runner.
|
||||
CACertBundle []byte `json:"ca-cert-bundle"`
|
||||
CACertBundle []byte `json:"ca-cert-bundle,omitempty"`
|
||||
|
||||
// OSArch is the target OS CPU architecture of the runner.
|
||||
OSArch commonParams.OSArch `json:"arch"`
|
||||
OSArch commonParams.OSArch `json:"arch,omitempty"`
|
||||
|
||||
// OSType is the target OS platform of the runner (windows, linux).
|
||||
OSType commonParams.OSType `json:"os_type"`
|
||||
OSType commonParams.OSType `json:"os_type,omitempty"`
|
||||
|
||||
// Flavor is the platform specific abstraction that defines what resources will be allocated
|
||||
// to the runner (CPU, RAM, disk space, etc). This field is meaningful to the provider which
|
||||
// handles the actual creation.
|
||||
Flavor string `json:"flavor"`
|
||||
Flavor string `json:"flavor,omitempty"`
|
||||
|
||||
// Image is the platform specific identifier of the operating system template that will be used
|
||||
// to spin up a new machine.
|
||||
Image string `json:"image"`
|
||||
Image string `json:"image,omitempty"`
|
||||
|
||||
// Labels are a list of github runner labels that will be added to the runner.
|
||||
Labels []string `json:"labels"`
|
||||
Labels []string `json:"labels,omitempty"`
|
||||
|
||||
// PoolID is the ID of the garm pool to which this runner belongs.
|
||||
PoolID string `json:"pool_id"`
|
||||
PoolID string `json:"pool_id,omitempty"`
|
||||
|
||||
// UserDataOptions are the options for the user data generation.
|
||||
UserDataOptions UserDataOptions `json:"user_data_options"`
|
||||
UserDataOptions UserDataOptions `json:"user_data_options,omitempty"`
|
||||
}
|
||||
|
||||
type UserDataOptions struct {
|
||||
DisableUpdatesOnBoot bool `json:"disable_updates_on_boot"`
|
||||
ExtraPackages []string `json:"extra_packages"`
|
||||
DisableUpdatesOnBoot bool `json:"disable_updates_on_boot,omitempty"`
|
||||
ExtraPackages []string `json:"extra_packages,omitempty"`
|
||||
}
|
||||
|
||||
type Tag struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
ID string `json:"id,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
}
|
||||
|
||||
type Pool struct {
|
||||
RunnerPrefix
|
||||
|
||||
ID string `json:"id"`
|
||||
ProviderName string `json:"provider_name"`
|
||||
MaxRunners uint `json:"max_runners"`
|
||||
MinIdleRunners uint `json:"min_idle_runners"`
|
||||
Image string `json:"image"`
|
||||
Flavor string `json:"flavor"`
|
||||
OSType commonParams.OSType `json:"os_type"`
|
||||
OSArch commonParams.OSArch `json:"os_arch"`
|
||||
Tags []Tag `json:"tags"`
|
||||
Enabled bool `json:"enabled"`
|
||||
Instances []Instance `json:"instances"`
|
||||
ID string `json:"id,omitempty"`
|
||||
ProviderName string `json:"provider_name,omitempty"`
|
||||
MaxRunners uint `json:"max_runners,omitempty"`
|
||||
MinIdleRunners uint `json:"min_idle_runners,omitempty"`
|
||||
Image string `json:"image,omitempty"`
|
||||
Flavor string `json:"flavor,omitempty"`
|
||||
OSType commonParams.OSType `json:"os_type,omitempty"`
|
||||
OSArch commonParams.OSArch `json:"os_arch,omitempty"`
|
||||
Tags []Tag `json:"tags,omitempty"`
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
Instances []Instance `json:"instances,omitempty"`
|
||||
RepoID string `json:"repo_id,omitempty"`
|
||||
RepoName string `json:"repo_name,omitempty"`
|
||||
OrgID string `json:"org_id,omitempty"`
|
||||
OrgName string `json:"org_name,omitempty"`
|
||||
EnterpriseID string `json:"enterprise_id,omitempty"`
|
||||
EnterpriseName string `json:"enterprise_name,omitempty"`
|
||||
RunnerBootstrapTimeout uint `json:"runner_bootstrap_timeout"`
|
||||
RunnerBootstrapTimeout uint `json:"runner_bootstrap_timeout,omitempty"`
|
||||
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||
// ExtraSpecs is an opaque raw json that gets sent to the provider
|
||||
// as part of the bootstrap params for instances. It can contain
|
||||
// any kind of data needed by providers. The contents of this field means
|
||||
|
|
@ -313,12 +318,12 @@ type Pool struct {
|
|||
ExtraSpecs json.RawMessage `json:"extra_specs,omitempty"`
|
||||
// GithubRunnerGroup is the github runner group in which the runners will be added.
|
||||
// The runner group must be created by someone with access to the enterprise.
|
||||
GitHubRunnerGroup string `json:"github-runner-group"`
|
||||
GitHubRunnerGroup string `json:"github-runner-group,omitempty"`
|
||||
|
||||
// Priority is the priority of the pool. The higher the number, the higher the priority.
|
||||
// When fetching matching pools for a set of tags, the result will be sorted in descending
|
||||
// order of priority.
|
||||
Priority uint `json:"priority"`
|
||||
Priority uint `json:"priority,omitempty"`
|
||||
}
|
||||
|
||||
func (p Pool) GithubEntity() (GithubEntity, error) {
|
||||
|
|
@ -383,19 +388,21 @@ func (p *Pool) HasRequiredLabels(set []string) bool {
|
|||
type Pools []Pool
|
||||
|
||||
type Repository struct {
|
||||
ID string `json:"id"`
|
||||
Owner string `json:"owner"`
|
||||
Name string `json:"name"`
|
||||
ID string `json:"id,omitempty"`
|
||||
Owner string `json:"owner,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Pools []Pool `json:"pool,omitempty"`
|
||||
// CredentialName is the name of the credentials associated with the enterprise.
|
||||
// This field is now deprecated. Use CredentialsID instead. This field will be
|
||||
// removed in v0.2.0.
|
||||
CredentialsName string `json:"credentials_name,omitempty"`
|
||||
CredentialsID uint `json:"credentials_id"`
|
||||
Credentials GithubCredentials `json:"credentials"`
|
||||
CredentialsID uint `json:"credentials_id,omitempty"`
|
||||
Credentials GithubCredentials `json:"credentials,omitempty"`
|
||||
PoolManagerStatus PoolManagerStatus `json:"pool_manager_status,omitempty"`
|
||||
PoolBalancerType PoolBalancerType `json:"pool_balancing_type"`
|
||||
Endpoint GithubEndpoint `json:"endpoint"`
|
||||
PoolBalancerType PoolBalancerType `json:"pool_balancing_type,omitempty"`
|
||||
Endpoint GithubEndpoint `json:"endpoint,omitempty"`
|
||||
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||
// Do not serialize sensitive info.
|
||||
WebhookSecret string `json:"-"`
|
||||
}
|
||||
|
|
@ -438,18 +445,20 @@ func (r Repository) String() string {
|
|||
type Repositories []Repository
|
||||
|
||||
type Organization struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
ID string `json:"id,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Pools []Pool `json:"pool,omitempty"`
|
||||
// CredentialName is the name of the credentials associated with the enterprise.
|
||||
// This field is now deprecated. Use CredentialsID instead. This field will be
|
||||
// removed in v0.2.0.
|
||||
CredentialsName string `json:"credentials_name,omitempty"`
|
||||
Credentials GithubCredentials `json:"credentials"`
|
||||
CredentialsID uint `json:"credentials_id"`
|
||||
Credentials GithubCredentials `json:"credentials,omitempty"`
|
||||
CredentialsID uint `json:"credentials_id,omitempty"`
|
||||
PoolManagerStatus PoolManagerStatus `json:"pool_manager_status,omitempty"`
|
||||
PoolBalancerType PoolBalancerType `json:"pool_balancing_type"`
|
||||
Endpoint GithubEndpoint `json:"endpoint"`
|
||||
PoolBalancerType PoolBalancerType `json:"pool_balancing_type,omitempty"`
|
||||
Endpoint GithubEndpoint `json:"endpoint,omitempty"`
|
||||
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||
// Do not serialize sensitive info.
|
||||
WebhookSecret string `json:"-"`
|
||||
}
|
||||
|
|
@ -487,18 +496,20 @@ func (o Organization) GetBalancerType() PoolBalancerType {
|
|||
type Organizations []Organization
|
||||
|
||||
type Enterprise struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
ID string `json:"id,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Pools []Pool `json:"pool,omitempty"`
|
||||
// CredentialName is the name of the credentials associated with the enterprise.
|
||||
// This field is now deprecated. Use CredentialsID instead. This field will be
|
||||
// removed in v0.2.0.
|
||||
CredentialsName string `json:"credentials_name,omitempty"`
|
||||
Credentials GithubCredentials `json:"credentials"`
|
||||
CredentialsID uint `json:"credentials_id"`
|
||||
Credentials GithubCredentials `json:"credentials,omitempty"`
|
||||
CredentialsID uint `json:"credentials_id,omitempty"`
|
||||
PoolManagerStatus PoolManagerStatus `json:"pool_manager_status,omitempty"`
|
||||
PoolBalancerType PoolBalancerType `json:"pool_balancing_type"`
|
||||
Endpoint GithubEndpoint `json:"endpoint"`
|
||||
PoolBalancerType PoolBalancerType `json:"pool_balancing_type,omitempty"`
|
||||
Endpoint GithubEndpoint `json:"endpoint,omitempty"`
|
||||
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||
// Do not serialize sensitive info.
|
||||
WebhookSecret string `json:"-"`
|
||||
}
|
||||
|
|
@ -537,14 +548,14 @@ type Enterprises []Enterprise
|
|||
|
||||
// Users holds information about a particular user
|
||||
type User struct {
|
||||
ID string `json:"id"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
Email string `json:"email"`
|
||||
Username string `json:"username"`
|
||||
FullName string `json:"full_name"`
|
||||
Enabled bool `json:"enabled"`
|
||||
IsAdmin bool `json:"is_admin"`
|
||||
ID string `json:"id,omitempty"`
|
||||
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||
Email string `json:"email,omitempty"`
|
||||
Username string `json:"username,omitempty"`
|
||||
FullName string `json:"full_name,omitempty"`
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
IsAdmin bool `json:"is_admin,omitempty"`
|
||||
// Do not serialize sensitive info.
|
||||
Password string `json:"-"`
|
||||
Generation uint `json:"-"`
|
||||
|
|
@ -553,67 +564,69 @@ type User struct {
|
|||
// JWTResponse holds the JWT token returned as a result of a
|
||||
// successful auth
|
||||
type JWTResponse struct {
|
||||
Token string `json:"token"`
|
||||
Token string `json:"token,omitempty"`
|
||||
}
|
||||
|
||||
type ControllerInfo struct {
|
||||
// ControllerID is the unique ID of this controller. This ID gets generated
|
||||
// automatically on controller init.
|
||||
ControllerID uuid.UUID `json:"controller_id"`
|
||||
ControllerID uuid.UUID `json:"controller_id,omitempty"`
|
||||
// Hostname is the hostname of the machine that runs this controller. In the
|
||||
// future, this field will be migrated to a separate table that will keep track
|
||||
// of each the controller nodes that are part of a cluster. This will happen when
|
||||
// we implement controller scale-out capability.
|
||||
Hostname string `json:"hostname"`
|
||||
Hostname string `json:"hostname,omitempty"`
|
||||
// MetadataURL is the public metadata URL of the GARM instance. This URL is used
|
||||
// by instances to fetch information they need to set themselves up. The URL itself
|
||||
// may be made available to runners via a reverse proxy or a load balancer. That
|
||||
// means that the user is responsible for telling GARM what the public URL is, by
|
||||
// setting this field.
|
||||
MetadataURL string `json:"metadata_url"`
|
||||
MetadataURL string `json:"metadata_url,omitempty"`
|
||||
// CallbackURL is the URL where instances can send updates back to the controller.
|
||||
// This URL is used by instances to send status updates back to the controller. The
|
||||
// URL itself may be made available to instances via a reverse proxy or a load balancer.
|
||||
// That means that the user is responsible for telling GARM what the public URL is, by
|
||||
// setting this field.
|
||||
CallbackURL string `json:"callback_url"`
|
||||
CallbackURL string `json:"callback_url,omitempty"`
|
||||
// WebhookURL is the base URL where the controller will receive webhooks from github.
|
||||
// When webhook management is used, this URL is used as a base to which the controller
|
||||
// UUID is appended and which will receive the webhooks.
|
||||
// The URL itself may be made available to instances via a reverse proxy or a load balancer.
|
||||
// That means that the user is responsible for telling GARM what the public URL is, by
|
||||
// setting this field.
|
||||
WebhookURL string `json:"webhook_url"`
|
||||
WebhookURL string `json:"webhook_url,omitempty"`
|
||||
// ControllerWebhookURL is the controller specific URL where webhooks will be received.
|
||||
// This field holds the WebhookURL defined above to which we append the ControllerID.
|
||||
// Functionally it is the same as WebhookURL, but it allows us to safely manage webhooks
|
||||
// from GARM without accidentally removing webhooks from other services or GARM controllers.
|
||||
ControllerWebhookURL string `json:"controller_webhook_url"`
|
||||
ControllerWebhookURL string `json:"controller_webhook_url,omitempty"`
|
||||
// MinimumJobAgeBackoff is the minimum time in seconds that a job must be in queued state
|
||||
// before GARM will attempt to allocate a runner for it. When set to a non zero value,
|
||||
// GARM will ignore the job until the job's age is greater than this value. When using
|
||||
// the min_idle_runners feature of a pool, this gives enough time for potential idle
|
||||
// runners to pick up the job before GARM attempts to allocate a new runner, thus avoiding
|
||||
// the need to potentially scale down runners later.
|
||||
MinimumJobAgeBackoff uint `json:"minimum_job_age_backoff"`
|
||||
MinimumJobAgeBackoff uint `json:"minimum_job_age_backoff,omitempty"`
|
||||
// Version is the version of the GARM controller.
|
||||
Version string `json:"version"`
|
||||
Version string `json:"version,omitempty"`
|
||||
}
|
||||
|
||||
type GithubCredentials struct {
|
||||
ID uint `json:"id"`
|
||||
ID uint `json:"id,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
APIBaseURL string `json:"api_base_url"`
|
||||
UploadBaseURL string `json:"upload_base_url"`
|
||||
BaseURL string `json:"base_url"`
|
||||
APIBaseURL string `json:"api_base_url,omitempty"`
|
||||
UploadBaseURL string `json:"upload_base_url,omitempty"`
|
||||
BaseURL string `json:"base_url,omitempty"`
|
||||
CABundle []byte `json:"ca_bundle,omitempty"`
|
||||
AuthType GithubAuthType `toml:"auth_type" json:"auth-type"`
|
||||
AuthType GithubAuthType `json:"auth-type,omitempty"`
|
||||
|
||||
Repositories []Repository `json:"repositories,omitempty"`
|
||||
Organizations []Organization `json:"organizations,omitempty"`
|
||||
Enterprises []Enterprise `json:"enterprises,omitempty"`
|
||||
Endpoint GithubEndpoint `json:"endpoint"`
|
||||
Endpoint GithubEndpoint `json:"endpoint,omitempty"`
|
||||
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||
|
||||
// Do not serialize sensitive info.
|
||||
CredentialsPayload []byte `json:"-"`
|
||||
|
|
@ -650,6 +663,7 @@ func (g GithubCredentials) GetHTTPClient(ctx context.Context) (*http.Client, err
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create github app installation transport: %w", err)
|
||||
}
|
||||
itr.BaseURL = g.APIBaseURL
|
||||
|
||||
tc = &http.Client{Transport: itr}
|
||||
default:
|
||||
|
|
@ -707,26 +721,26 @@ func (g GithubCredentials) RootCertificateBundle() (CertificateBundle, error) {
|
|||
type Credentials []GithubCredentials
|
||||
|
||||
type Provider struct {
|
||||
Name string `json:"name"`
|
||||
ProviderType ProviderType `json:"type"`
|
||||
Description string `json:"description"`
|
||||
Name string `json:"name,omitempty"`
|
||||
ProviderType ProviderType `json:"type,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
}
|
||||
|
||||
// used by swagger client generated code
|
||||
type Providers []Provider
|
||||
|
||||
type PoolManagerStatus struct {
|
||||
IsRunning bool `json:"running"`
|
||||
IsRunning bool `json:"running,omitempty"`
|
||||
FailureReason string `json:"failure_reason,omitempty"`
|
||||
}
|
||||
|
||||
type RunnerInfo struct {
|
||||
Name string
|
||||
Labels []string
|
||||
Name string `json:"name,omitempty"`
|
||||
Labels []string `json:"labels,omitempty"`
|
||||
}
|
||||
|
||||
type RunnerPrefix struct {
|
||||
Prefix string `json:"runner_prefix"`
|
||||
Prefix string `json:"runner_prefix,omitempty"`
|
||||
}
|
||||
|
||||
func (p RunnerPrefix) GetRunnerPrefix() string {
|
||||
|
|
@ -738,34 +752,34 @@ func (p RunnerPrefix) GetRunnerPrefix() string {
|
|||
|
||||
type Job struct {
|
||||
// ID is the ID of the job.
|
||||
ID int64 `json:"id"`
|
||||
ID int64 `json:"id,omitempty"`
|
||||
// RunID is the ID of the workflow run. A run may have multiple jobs.
|
||||
RunID int64 `json:"run_id"`
|
||||
RunID int64 `json:"run_id,omitempty"`
|
||||
// Action is the specific activity that triggered the event.
|
||||
Action string `json:"action"`
|
||||
Action string `json:"action,omitempty"`
|
||||
// Conclusion is the outcome of the job.
|
||||
// Possible values: "success", "failure", "neutral", "cancelled", "skipped",
|
||||
// "timed_out", "action_required"
|
||||
Conclusion string `json:"conclusion"`
|
||||
Conclusion string `json:"conclusion,omitempty"`
|
||||
// Status is the phase of the lifecycle that the job is currently in.
|
||||
// "queued", "in_progress" and "completed".
|
||||
Status string `json:"status"`
|
||||
Status string `json:"status,omitempty"`
|
||||
// Name is the name if the job that was triggered.
|
||||
Name string `json:"name"`
|
||||
Name string `json:"name,omitempty"`
|
||||
|
||||
StartedAt time.Time
|
||||
CompletedAt time.Time
|
||||
StartedAt time.Time `json:"started_at,omitempty"`
|
||||
CompletedAt time.Time `json:"completed_at,omitempty"`
|
||||
|
||||
GithubRunnerID int64 `json:"runner_id"`
|
||||
RunnerName string `json:"runner_name"`
|
||||
RunnerGroupID int64 `json:"runner_group_id"`
|
||||
RunnerGroupName string `json:"runner_group_name"`
|
||||
GithubRunnerID int64 `json:"runner_id,omitempty"`
|
||||
RunnerName string `json:"runner_name,omitempty"`
|
||||
RunnerGroupID int64 `json:"runner_group_id,omitempty"`
|
||||
RunnerGroupName string `json:"runner_group_name,omitempty"`
|
||||
|
||||
// repository in which the job was triggered.
|
||||
RepositoryName string
|
||||
RepositoryOwner string
|
||||
RepositoryName string `json:"repository_name,omitempty"`
|
||||
RepositoryOwner string `json:"repository_owner,omitempty"`
|
||||
|
||||
Labels []string
|
||||
Labels []string `json:"labels,omitempty"`
|
||||
|
||||
// The entity that received the hook.
|
||||
//
|
||||
|
|
@ -780,30 +794,30 @@ type Job struct {
|
|||
OrgID *uuid.UUID `json:"org_id,omitempty"`
|
||||
EnterpriseID *uuid.UUID `json:"enterprise_id,omitempty"`
|
||||
|
||||
LockedBy uuid.UUID
|
||||
LockedBy uuid.UUID `json:"locked_by,omitempty"`
|
||||
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||
}
|
||||
|
||||
// used by swagger client generated code
|
||||
type Jobs []Job
|
||||
|
||||
type InstallWebhookParams struct {
|
||||
WebhookEndpointType WebhookEndpointType `json:"webhook_endpoint_type"`
|
||||
InsecureSSL bool `json:"insecure_ssl"`
|
||||
WebhookEndpointType WebhookEndpointType `json:"webhook_endpoint_type,omitempty"`
|
||||
InsecureSSL bool `json:"insecure_ssl,omitempty"`
|
||||
}
|
||||
|
||||
type HookInfo struct {
|
||||
ID int64 `json:"id"`
|
||||
URL string `json:"url"`
|
||||
Events []string `json:"events"`
|
||||
Active bool `json:"active"`
|
||||
InsecureSSL bool `json:"insecure_ssl"`
|
||||
ID int64 `json:"id,omitempty"`
|
||||
URL string `json:"url,omitempty"`
|
||||
Events []string `json:"events,omitempty"`
|
||||
Active bool `json:"active,omitempty"`
|
||||
InsecureSSL bool `json:"insecure_ssl,omitempty"`
|
||||
}
|
||||
|
||||
type CertificateBundle struct {
|
||||
RootCertificates map[string][]byte `json:"root_certificates"`
|
||||
RootCertificates map[string][]byte `json:"root_certificates,omitempty"`
|
||||
}
|
||||
|
||||
type UpdateSystemInfoParams struct {
|
||||
|
|
@ -813,12 +827,12 @@ type UpdateSystemInfoParams struct {
|
|||
}
|
||||
|
||||
type GithubEntity struct {
|
||||
Owner string `json:"owner"`
|
||||
Name string `json:"name"`
|
||||
ID string `json:"id"`
|
||||
EntityType GithubEntityType `json:"entity_type"`
|
||||
Credentials GithubCredentials `json:"credentials"`
|
||||
PoolBalancerType PoolBalancerType `json:"pool_balancing_type"`
|
||||
Owner string `json:"owner,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
ID string `json:"id,omitempty"`
|
||||
EntityType GithubEntityType `json:"entity_type,omitempty"`
|
||||
Credentials GithubCredentials `json:"credentials,omitempty"`
|
||||
PoolBalancerType PoolBalancerType `json:"pool_balancing_type,omitempty"`
|
||||
|
||||
WebhookSecret string `json:"-"`
|
||||
}
|
||||
|
|
@ -856,12 +870,14 @@ func (g GithubEntity) String() string {
|
|||
type GithubEndpoints []GithubEndpoint
|
||||
|
||||
type GithubEndpoint struct {
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description"`
|
||||
APIBaseURL string `json:"api_base_url"`
|
||||
UploadBaseURL string `json:"upload_base_url"`
|
||||
BaseURL string `json:"base_url"`
|
||||
CACertBundle []byte `json:"ca_cert_bundle,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
APIBaseURL string `json:"api_base_url,omitempty"`
|
||||
UploadBaseURL string `json:"upload_base_url,omitempty"`
|
||||
BaseURL string `json:"base_url,omitempty"`
|
||||
CACertBundle []byte `json:"ca_cert_bundle,omitempty"`
|
||||
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||
|
||||
Credentials []GithubCredentials `json:"credentials,omitempty"`
|
||||
}
|
||||
|
|
|
|||
|
|
@ -40,11 +40,11 @@ type InstanceRequest struct {
|
|||
}
|
||||
|
||||
type CreateRepoParams struct {
|
||||
Owner string `json:"owner"`
|
||||
Name string `json:"name"`
|
||||
CredentialsName string `json:"credentials_name"`
|
||||
WebhookSecret string `json:"webhook_secret"`
|
||||
PoolBalancerType PoolBalancerType `json:"pool_balancer_type"`
|
||||
Owner string `json:"owner,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
CredentialsName string `json:"credentials_name,omitempty"`
|
||||
WebhookSecret string `json:"webhook_secret,omitempty"`
|
||||
PoolBalancerType PoolBalancerType `json:"pool_balancer_type,omitempty"`
|
||||
}
|
||||
|
||||
func (c *CreateRepoParams) Validate() error {
|
||||
|
|
@ -73,10 +73,10 @@ func (c *CreateRepoParams) Validate() error {
|
|||
}
|
||||
|
||||
type CreateOrgParams struct {
|
||||
Name string `json:"name"`
|
||||
CredentialsName string `json:"credentials_name"`
|
||||
WebhookSecret string `json:"webhook_secret"`
|
||||
PoolBalancerType PoolBalancerType `json:"pool_balancer_type"`
|
||||
Name string `json:"name,omitempty"`
|
||||
CredentialsName string `json:"credentials_name,omitempty"`
|
||||
WebhookSecret string `json:"webhook_secret,omitempty"`
|
||||
PoolBalancerType PoolBalancerType `json:"pool_balancer_type,omitempty"`
|
||||
}
|
||||
|
||||
func (c *CreateOrgParams) Validate() error {
|
||||
|
|
@ -100,10 +100,10 @@ func (c *CreateOrgParams) Validate() error {
|
|||
}
|
||||
|
||||
type CreateEnterpriseParams struct {
|
||||
Name string `json:"name"`
|
||||
CredentialsName string `json:"credentials_name"`
|
||||
WebhookSecret string `json:"webhook_secret"`
|
||||
PoolBalancerType PoolBalancerType `json:"pool_balancer_type"`
|
||||
Name string `json:"name,omitempty"`
|
||||
CredentialsName string `json:"credentials_name,omitempty"`
|
||||
WebhookSecret string `json:"webhook_secret,omitempty"`
|
||||
PoolBalancerType PoolBalancerType `json:"pool_balancer_type,omitempty"`
|
||||
}
|
||||
|
||||
func (c *CreateEnterpriseParams) Validate() error {
|
||||
|
|
@ -128,10 +128,10 @@ func (c *CreateEnterpriseParams) Validate() error {
|
|||
// NewUserParams holds the needed information to create
|
||||
// a new user
|
||||
type NewUserParams struct {
|
||||
Email string `json:"email"`
|
||||
Username string `json:"username"`
|
||||
FullName string `json:"full_name"`
|
||||
Password string `json:"password"`
|
||||
Email string `json:"email,omitempty"`
|
||||
Username string `json:"username,omitempty"`
|
||||
FullName string `json:"full_name,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
IsAdmin bool `json:"-"`
|
||||
Enabled bool `json:"-"`
|
||||
}
|
||||
|
|
@ -144,10 +144,10 @@ type UpdatePoolParams struct {
|
|||
MaxRunners *uint `json:"max_runners,omitempty"`
|
||||
MinIdleRunners *uint `json:"min_idle_runners,omitempty"`
|
||||
RunnerBootstrapTimeout *uint `json:"runner_bootstrap_timeout,omitempty"`
|
||||
Image string `json:"image"`
|
||||
Flavor string `json:"flavor"`
|
||||
OSType commonParams.OSType `json:"os_type"`
|
||||
OSArch commonParams.OSArch `json:"os_arch"`
|
||||
Image string `json:"image,omitempty"`
|
||||
Flavor string `json:"flavor,omitempty"`
|
||||
OSType commonParams.OSType `json:"os_type,omitempty"`
|
||||
OSArch commonParams.OSArch `json:"os_arch,omitempty"`
|
||||
ExtraSpecs json.RawMessage `json:"extra_specs,omitempty"`
|
||||
// GithubRunnerGroup is the github runner group in which the runners of this
|
||||
// pool will be added to.
|
||||
|
|
@ -157,41 +157,41 @@ type UpdatePoolParams struct {
|
|||
}
|
||||
|
||||
type CreateInstanceParams struct {
|
||||
Name string
|
||||
OSType commonParams.OSType
|
||||
OSArch commonParams.OSArch
|
||||
Status commonParams.InstanceStatus
|
||||
RunnerStatus RunnerStatus
|
||||
CallbackURL string
|
||||
MetadataURL string
|
||||
Name string `json:"name,omitempty"`
|
||||
OSType commonParams.OSType `json:"os_type,omitempty"`
|
||||
OSArch commonParams.OSArch `json:"os_arch,omitempty"`
|
||||
Status commonParams.InstanceStatus `json:"status,omitempty"`
|
||||
RunnerStatus RunnerStatus `json:"runner_status,omitempty"`
|
||||
CallbackURL string `json:"callback_url,omitempty"`
|
||||
MetadataURL string `json:"metadata_url,omitempty"`
|
||||
// GithubRunnerGroup is the github runner group to which the runner belongs.
|
||||
// The runner group must be created by someone with access to the enterprise.
|
||||
GitHubRunnerGroup string
|
||||
CreateAttempt int `json:"-"`
|
||||
AgentID int64 `json:"-"`
|
||||
AditionalLabels []string
|
||||
JitConfiguration map[string]string
|
||||
GitHubRunnerGroup string `json:"github-runner-group,omitempty"`
|
||||
CreateAttempt int `json:"-"`
|
||||
AgentID int64 `json:"-"`
|
||||
AditionalLabels []string `json:"aditional_labels,omitempty"`
|
||||
JitConfiguration map[string]string `json:"jit_configuration,omitempty"`
|
||||
}
|
||||
|
||||
type CreatePoolParams struct {
|
||||
RunnerPrefix
|
||||
|
||||
ProviderName string `json:"provider_name"`
|
||||
MaxRunners uint `json:"max_runners"`
|
||||
MinIdleRunners uint `json:"min_idle_runners"`
|
||||
Image string `json:"image"`
|
||||
Flavor string `json:"flavor"`
|
||||
OSType commonParams.OSType `json:"os_type"`
|
||||
OSArch commonParams.OSArch `json:"os_arch"`
|
||||
Tags []string `json:"tags"`
|
||||
Enabled bool `json:"enabled"`
|
||||
RunnerBootstrapTimeout uint `json:"runner_bootstrap_timeout"`
|
||||
ProviderName string `json:"provider_name,omitempty"`
|
||||
MaxRunners uint `json:"max_runners,omitempty"`
|
||||
MinIdleRunners uint `json:"min_idle_runners,omitempty"`
|
||||
Image string `json:"image,omitempty"`
|
||||
Flavor string `json:"flavor,omitempty"`
|
||||
OSType commonParams.OSType `json:"os_type,omitempty"`
|
||||
OSArch commonParams.OSArch `json:"os_arch,omitempty"`
|
||||
Tags []string `json:"tags,omitempty"`
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
RunnerBootstrapTimeout uint `json:"runner_bootstrap_timeout,omitempty"`
|
||||
ExtraSpecs json.RawMessage `json:"extra_specs,omitempty"`
|
||||
// GithubRunnerGroup is the github runner group in which the runners of this
|
||||
// pool will be added to.
|
||||
// The runner group must be created by someone with access to the enterprise.
|
||||
GitHubRunnerGroup string `json:"github-runner-group"`
|
||||
Priority uint `json:"priority"`
|
||||
GitHubRunnerGroup string `json:"github-runner-group,omitempty"`
|
||||
Priority uint `json:"priority,omitempty"`
|
||||
}
|
||||
|
||||
func (p *CreatePoolParams) Validate() error {
|
||||
|
|
@ -242,17 +242,17 @@ type UpdateInstanceParams struct {
|
|||
}
|
||||
|
||||
type UpdateUserParams struct {
|
||||
FullName string `json:"full_name"`
|
||||
Password string `json:"password"`
|
||||
Enabled *bool `json:"enabled"`
|
||||
FullName string `json:"full_name,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
Enabled *bool `json:"enabled,omitempty"`
|
||||
}
|
||||
|
||||
// PasswordLoginParams holds information used during
|
||||
// password authentication, that will be passed to a
|
||||
// password login function
|
||||
type PasswordLoginParams struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Username string `json:"username,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
}
|
||||
|
||||
// Validate checks if the username and password are set
|
||||
|
|
@ -264,14 +264,14 @@ func (p PasswordLoginParams) Validate() error {
|
|||
}
|
||||
|
||||
type UpdateEntityParams struct {
|
||||
CredentialsName string `json:"credentials_name"`
|
||||
WebhookSecret string `json:"webhook_secret"`
|
||||
PoolBalancerType PoolBalancerType `json:"pool_balancer_type"`
|
||||
CredentialsName string `json:"credentials_name,omitempty"`
|
||||
WebhookSecret string `json:"webhook_secret,omitempty"`
|
||||
PoolBalancerType PoolBalancerType `json:"pool_balancer_type,omitempty"`
|
||||
}
|
||||
|
||||
type InstanceUpdateMessage struct {
|
||||
Status RunnerStatus `json:"status"`
|
||||
Message string `json:"message"`
|
||||
Status RunnerStatus `json:"status,omitempty"`
|
||||
Message string `json:"message,omitempty"`
|
||||
AgentID *int64 `json:"agent_id,omitempty"`
|
||||
}
|
||||
|
||||
|
|
@ -401,13 +401,13 @@ func (u UpdateGithubEndpointParams) Validate() error {
|
|||
}
|
||||
|
||||
type GithubPAT struct {
|
||||
OAuth2Token string `json:"oauth2_token"`
|
||||
OAuth2Token string `json:"oauth2_token,omitempty"`
|
||||
}
|
||||
|
||||
type GithubApp struct {
|
||||
AppID int64 `json:"app_id"`
|
||||
InstallationID int64 `json:"installation_id"`
|
||||
PrivateKeyBytes []byte `json:"private_key_bytes"`
|
||||
AppID int64 `json:"app_id,omitempty"`
|
||||
InstallationID int64 `json:"installation_id,omitempty"`
|
||||
PrivateKeyBytes []byte `json:"private_key_bytes,omitempty"`
|
||||
}
|
||||
|
||||
func (g GithubApp) Validate() error {
|
||||
|
|
@ -437,10 +437,10 @@ func (g GithubApp) Validate() error {
|
|||
}
|
||||
|
||||
type CreateGithubCredentialsParams struct {
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
AuthType GithubAuthType `json:"auth_type"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Endpoint string `json:"endpoint,omitempty"`
|
||||
AuthType GithubAuthType `json:"auth_type,omitempty"`
|
||||
PAT GithubPAT `json:"pat,omitempty"`
|
||||
App GithubApp `json:"app,omitempty"`
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ import (
|
|||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
params "github.com/cloudbase/garm/params"
|
||||
"github.com/cloudbase/garm/runner/common"
|
||||
)
|
||||
|
||||
// Provider is an autogenerated mock type for the Provider type
|
||||
|
|
@ -35,7 +36,7 @@ func (_m *Provider) AsParams() params.Provider {
|
|||
}
|
||||
|
||||
// CreateInstance provides a mock function with given fields: ctx, bootstrapParams
|
||||
func (_m *Provider) CreateInstance(ctx context.Context, bootstrapParams garm_provider_commonparams.BootstrapInstance) (garm_provider_commonparams.ProviderInstance, error) {
|
||||
func (_m *Provider) CreateInstance(ctx context.Context, bootstrapParams garm_provider_commonparams.BootstrapInstance, createInstanceParams common.CreateInstanceParams) (garm_provider_commonparams.ProviderInstance, error) {
|
||||
ret := _m.Called(ctx, bootstrapParams)
|
||||
|
||||
if len(ret) == 0 {
|
||||
|
|
@ -63,7 +64,7 @@ func (_m *Provider) CreateInstance(ctx context.Context, bootstrapParams garm_pro
|
|||
}
|
||||
|
||||
// DeleteInstance provides a mock function with given fields: ctx, instance
|
||||
func (_m *Provider) DeleteInstance(ctx context.Context, instance string) error {
|
||||
func (_m *Provider) DeleteInstance(ctx context.Context, instance string, deleteInstanceParams common.DeleteInstanceParams) error {
|
||||
ret := _m.Called(ctx, instance)
|
||||
|
||||
if len(ret) == 0 {
|
||||
|
|
@ -99,7 +100,7 @@ func (_m *Provider) DisableJITConfig() bool {
|
|||
}
|
||||
|
||||
// GetInstance provides a mock function with given fields: ctx, instance
|
||||
func (_m *Provider) GetInstance(ctx context.Context, instance string) (garm_provider_commonparams.ProviderInstance, error) {
|
||||
func (_m *Provider) GetInstance(ctx context.Context, instance string, getInstanceParams common.GetInstanceParams) (garm_provider_commonparams.ProviderInstance, error) {
|
||||
ret := _m.Called(ctx, instance)
|
||||
|
||||
if len(ret) == 0 {
|
||||
|
|
@ -127,7 +128,7 @@ func (_m *Provider) GetInstance(ctx context.Context, instance string) (garm_prov
|
|||
}
|
||||
|
||||
// ListInstances provides a mock function with given fields: ctx, poolID
|
||||
func (_m *Provider) ListInstances(ctx context.Context, poolID string) ([]garm_provider_commonparams.ProviderInstance, error) {
|
||||
func (_m *Provider) ListInstances(ctx context.Context, poolID string, listInstancesParams common.ListInstancesParams) ([]garm_provider_commonparams.ProviderInstance, error) {
|
||||
ret := _m.Called(ctx, poolID)
|
||||
|
||||
if len(ret) == 0 {
|
||||
|
|
@ -157,7 +158,7 @@ func (_m *Provider) ListInstances(ctx context.Context, poolID string) ([]garm_pr
|
|||
}
|
||||
|
||||
// RemoveAllInstances provides a mock function with given fields: ctx
|
||||
func (_m *Provider) RemoveAllInstances(ctx context.Context) error {
|
||||
func (_m *Provider) RemoveAllInstances(ctx context.Context, removeAllInstances common.RemoveAllInstancesParams) error {
|
||||
ret := _m.Called(ctx)
|
||||
|
||||
if len(ret) == 0 {
|
||||
|
|
@ -175,7 +176,7 @@ func (_m *Provider) RemoveAllInstances(ctx context.Context) error {
|
|||
}
|
||||
|
||||
// Start provides a mock function with given fields: ctx, instance
|
||||
func (_m *Provider) Start(ctx context.Context, instance string) error {
|
||||
func (_m *Provider) Start(ctx context.Context, instance string, startParams common.StartParams) error {
|
||||
ret := _m.Called(ctx, instance)
|
||||
|
||||
if len(ret) == 0 {
|
||||
|
|
@ -193,7 +194,7 @@ func (_m *Provider) Start(ctx context.Context, instance string) error {
|
|||
}
|
||||
|
||||
// Stop provides a mock function with given fields: ctx, instance
|
||||
func (_m *Provider) Stop(ctx context.Context, instance string) error {
|
||||
func (_m *Provider) Stop(ctx context.Context, instance string, stopParams common.StopParams) error {
|
||||
ret := _m.Called(ctx, instance)
|
||||
|
||||
if len(ret) == 0 {
|
||||
|
|
|
|||
88
runner/common/params.go
Normal file
88
runner/common/params.go
Normal file
|
|
@ -0,0 +1,88 @@
|
|||
// Copyright 2022 Cloudbase Solutions SRL
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
// License for the specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package common
|
||||
|
||||
import "github.com/cloudbase/garm/params"
|
||||
|
||||
// Constants used for the provider interface version.
|
||||
const (
|
||||
Version010 = "v0.1.0"
|
||||
Version011 = "v0.1.1"
|
||||
)
|
||||
|
||||
// Each struct is a wrapper for the actual parameters struct for a specific version.
|
||||
// Version 0.1.0 doesn't have any specific parameters, so there is no need for a struct for it.
|
||||
type CreateInstanceParams struct {
|
||||
CreateInstanceV011 CreateInstanceV011Params
|
||||
}
|
||||
|
||||
type DeleteInstanceParams struct {
|
||||
DeleteInstanceV011 DeleteInstanceV011Params
|
||||
}
|
||||
|
||||
type GetInstanceParams struct {
|
||||
GetInstanceV011 GetInstanceV011Params
|
||||
}
|
||||
|
||||
type ListInstancesParams struct {
|
||||
ListInstancesV011 ListInstancesV011Params
|
||||
}
|
||||
|
||||
type RemoveAllInstancesParams struct {
|
||||
RemoveAllInstancesV011 RemoveAllInstancesV011Params
|
||||
}
|
||||
|
||||
type StopParams struct {
|
||||
StopV011 StopV011Params
|
||||
}
|
||||
|
||||
type StartParams struct {
|
||||
StartV011 StartV011Params
|
||||
}
|
||||
|
||||
// Struct for the base provider parameters.
|
||||
type ProviderBaseParams struct {
|
||||
PoolInfo params.Pool
|
||||
ControllerInfo params.ControllerInfo
|
||||
}
|
||||
|
||||
// Structs for version v0.1.1.
|
||||
type CreateInstanceV011Params struct {
|
||||
ProviderBaseParams
|
||||
}
|
||||
|
||||
type DeleteInstanceV011Params struct {
|
||||
ProviderBaseParams
|
||||
}
|
||||
|
||||
type GetInstanceV011Params struct {
|
||||
ProviderBaseParams
|
||||
}
|
||||
|
||||
type ListInstancesV011Params struct {
|
||||
ProviderBaseParams
|
||||
}
|
||||
|
||||
type RemoveAllInstancesV011Params struct {
|
||||
ProviderBaseParams
|
||||
}
|
||||
|
||||
type StopV011Params struct {
|
||||
ProviderBaseParams
|
||||
}
|
||||
|
||||
type StartV011Params struct {
|
||||
ProviderBaseParams
|
||||
}
|
||||
|
|
@ -24,19 +24,19 @@ import (
|
|||
//go:generate mockery --all
|
||||
type Provider interface {
|
||||
// CreateInstance creates a new compute instance in the provider.
|
||||
CreateInstance(ctx context.Context, bootstrapParams commonParams.BootstrapInstance) (commonParams.ProviderInstance, error)
|
||||
CreateInstance(ctx context.Context, bootstrapParams commonParams.BootstrapInstance, createInstanceParams CreateInstanceParams) (commonParams.ProviderInstance, error)
|
||||
// Delete instance will delete the instance in a provider.
|
||||
DeleteInstance(ctx context.Context, instance string) error
|
||||
DeleteInstance(ctx context.Context, instance string, deleteInstanceParams DeleteInstanceParams) error
|
||||
// GetInstance will return details about one instance.
|
||||
GetInstance(ctx context.Context, instance string) (commonParams.ProviderInstance, error)
|
||||
GetInstance(ctx context.Context, instance string, getInstanceParams GetInstanceParams) (commonParams.ProviderInstance, error)
|
||||
// ListInstances will list all instances for a provider.
|
||||
ListInstances(ctx context.Context, poolID string) ([]commonParams.ProviderInstance, error)
|
||||
ListInstances(ctx context.Context, poolID string, listInstancesParams ListInstancesParams) ([]commonParams.ProviderInstance, error)
|
||||
// RemoveAllInstances will remove all instances created by this provider.
|
||||
RemoveAllInstances(ctx context.Context) error
|
||||
RemoveAllInstances(ctx context.Context, removeAllInstancesParams RemoveAllInstancesParams) error
|
||||
// Stop shuts down the instance.
|
||||
Stop(ctx context.Context, instance string) error
|
||||
Stop(ctx context.Context, instance string, stopParams StopParams) error
|
||||
// Start boots up an instance.
|
||||
Start(ctx context.Context, instance string) error
|
||||
Start(ctx context.Context, instance string, startParams StartParams) error
|
||||
// DisableJITConfig tells us if the provider explicitly disables JIT configuration and
|
||||
// forces runner registration tokens to be used. This may happen if a provider has not yet
|
||||
// been updated to support JIT configuration.
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ After=network.target
|
|||
|
||||
[Service]
|
||||
ExecStart=/home/{{.RunAsUser}}/actions-runner/runsvc.sh
|
||||
User=runner
|
||||
User={{.RunAsUser}}
|
||||
WorkingDirectory=/home/{{.RunAsUser}}/actions-runner
|
||||
KillMode=process
|
||||
KillSignal=SIGTERM
|
||||
|
|
|
|||
|
|
@ -1,6 +1,15 @@
|
|||
package pool
|
||||
|
||||
import "sync"
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/cloudbase/garm/runner/common"
|
||||
)
|
||||
|
||||
const (
|
||||
maxBackoffSeconds float64 = 1200 // 20 minutes
|
||||
)
|
||||
|
||||
type keyMutex struct {
|
||||
muxes sync.Map
|
||||
|
|
@ -27,3 +36,53 @@ func (k *keyMutex) Unlock(key string, remove bool) {
|
|||
func (k *keyMutex) Delete(key string) {
|
||||
k.muxes.Delete(key)
|
||||
}
|
||||
|
||||
type instanceBackOff struct {
|
||||
backoffSeconds float64
|
||||
lastRecordedFailureTime time.Time
|
||||
mux sync.Mutex
|
||||
}
|
||||
|
||||
type instanceDeleteBackoff struct {
|
||||
muxes sync.Map
|
||||
}
|
||||
|
||||
func (i *instanceDeleteBackoff) ShouldProcess(key string) (bool, time.Time) {
|
||||
backoff, loaded := i.muxes.LoadOrStore(key, &instanceBackOff{})
|
||||
if !loaded {
|
||||
return true, time.Time{}
|
||||
}
|
||||
|
||||
ib := backoff.(*instanceBackOff)
|
||||
ib.mux.Lock()
|
||||
defer ib.mux.Unlock()
|
||||
|
||||
if ib.lastRecordedFailureTime.IsZero() || ib.backoffSeconds == 0 {
|
||||
return true, time.Time{}
|
||||
}
|
||||
|
||||
now := time.Now().UTC()
|
||||
deadline := ib.lastRecordedFailureTime.Add(time.Duration(ib.backoffSeconds) * time.Second)
|
||||
return deadline.After(now), deadline
|
||||
}
|
||||
|
||||
func (i *instanceDeleteBackoff) Delete(key string) {
|
||||
i.muxes.Delete(key)
|
||||
}
|
||||
|
||||
func (i *instanceDeleteBackoff) RecordFailure(key string) {
|
||||
backoff, _ := i.muxes.LoadOrStore(key, &instanceBackOff{})
|
||||
ib := backoff.(*instanceBackOff)
|
||||
ib.mux.Lock()
|
||||
defer ib.mux.Unlock()
|
||||
|
||||
ib.lastRecordedFailureTime = time.Now().UTC()
|
||||
if ib.backoffSeconds == 0 {
|
||||
ib.backoffSeconds = common.PoolConsilitationInterval.Seconds()
|
||||
} else {
|
||||
// Geometric progression of 1.5
|
||||
newBackoff := ib.backoffSeconds * 1.5
|
||||
// Cap the backoff to 20 minutes
|
||||
ib.backoffSeconds = min(newBackoff, maxBackoffSeconds)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,9 +16,11 @@ package pool
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"math"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
|
@ -90,6 +92,7 @@ func NewEntityPoolManager(ctx context.Context, entity params.GithubEntity, insta
|
|||
|
||||
wg := &sync.WaitGroup{}
|
||||
keyMuxes := &keyMutex{}
|
||||
backoff := &instanceDeleteBackoff{}
|
||||
|
||||
repo := &basePoolManager{
|
||||
ctx: ctx,
|
||||
|
|
@ -103,6 +106,7 @@ func NewEntityPoolManager(ctx context.Context, entity params.GithubEntity, insta
|
|||
quit: make(chan struct{}),
|
||||
wg: wg,
|
||||
keyMux: keyMuxes,
|
||||
backoff: backoff,
|
||||
consumer: consumer,
|
||||
}
|
||||
return repo, nil
|
||||
|
|
@ -125,9 +129,20 @@ type basePoolManager struct {
|
|||
managerIsRunning bool
|
||||
managerErrorReason string
|
||||
|
||||
mux sync.Mutex
|
||||
wg *sync.WaitGroup
|
||||
keyMux *keyMutex
|
||||
mux sync.Mutex
|
||||
wg *sync.WaitGroup
|
||||
keyMux *keyMutex
|
||||
backoff *instanceDeleteBackoff
|
||||
}
|
||||
|
||||
func (r *basePoolManager) getProviderBaseParams(pool params.Pool) common.ProviderBaseParams {
|
||||
r.mux.Lock()
|
||||
defer r.mux.Unlock()
|
||||
|
||||
return common.ProviderBaseParams{
|
||||
PoolInfo: pool,
|
||||
ControllerInfo: r.controllerInfo,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *basePoolManager) HandleWorkflowJob(job params.WorkflowJob) error {
|
||||
|
|
@ -135,6 +150,13 @@ func (r *basePoolManager) HandleWorkflowJob(job params.WorkflowJob) error {
|
|||
return errors.Wrap(err, "validating owner")
|
||||
}
|
||||
|
||||
// we see events where the lables seem to be missing. We should ignore these
|
||||
// as we can't know if we should handle them or not.
|
||||
if len(job.WorkflowJob.Labels) == 0 {
|
||||
slog.WarnContext(r.ctx, "job has no labels", "workflow_job", job.WorkflowJob.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
var jobParams params.Job
|
||||
var err error
|
||||
var triggeredBy int64
|
||||
|
|
@ -589,7 +611,12 @@ func (r *basePoolManager) cleanupOrphanedGithubRunners(runners []*github.Runner)
|
|||
slog.DebugContext(
|
||||
r.ctx, "updating instances cache for pool",
|
||||
"pool_id", pool.ID)
|
||||
poolInstances, err = provider.ListInstances(r.ctx, pool.ID)
|
||||
listInstancesParams := common.ListInstancesParams{
|
||||
ListInstancesV011: common.ListInstancesV011Params{
|
||||
ProviderBaseParams: r.getProviderBaseParams(pool),
|
||||
},
|
||||
}
|
||||
poolInstances, err = provider.ListInstances(r.ctx, pool.ID, listInstancesParams)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "fetching instances for pool %s", pool.ID)
|
||||
}
|
||||
|
|
@ -654,7 +681,12 @@ func (r *basePoolManager) cleanupOrphanedGithubRunners(runners []*github.Runner)
|
|||
r.ctx, "instance was found in stopped state; starting",
|
||||
"runner_name", dbInstance.Name)
|
||||
|
||||
if err := provider.Start(r.ctx, dbInstance.ProviderID); err != nil {
|
||||
startParams := common.StartParams{
|
||||
StartV011: common.StartV011Params{
|
||||
ProviderBaseParams: r.getProviderBaseParams(pool),
|
||||
},
|
||||
}
|
||||
if err := provider.Start(r.ctx, dbInstance.ProviderID, startParams); err != nil {
|
||||
return errors.Wrapf(err, "starting instance %s", dbInstance.ProviderID)
|
||||
}
|
||||
return nil
|
||||
|
|
@ -870,7 +902,12 @@ func (r *basePoolManager) addInstanceToProvider(instance params.Instance) error
|
|||
|
||||
defer func() {
|
||||
if instanceIDToDelete != "" {
|
||||
if err := provider.DeleteInstance(r.ctx, instanceIDToDelete); err != nil {
|
||||
deleteInstanceParams := common.DeleteInstanceParams{
|
||||
DeleteInstanceV011: common.DeleteInstanceV011Params{
|
||||
ProviderBaseParams: r.getProviderBaseParams(pool),
|
||||
},
|
||||
}
|
||||
if err := provider.DeleteInstance(r.ctx, instanceIDToDelete, deleteInstanceParams); err != nil {
|
||||
if !errors.Is(err, runnerErrors.ErrNotFound) {
|
||||
slog.With(slog.Any("error", err)).ErrorContext(
|
||||
r.ctx, "failed to cleanup instance",
|
||||
|
|
@ -880,7 +917,12 @@ func (r *basePoolManager) addInstanceToProvider(instance params.Instance) error
|
|||
}
|
||||
}()
|
||||
|
||||
providerInstance, err := provider.CreateInstance(r.ctx, bootstrapArgs)
|
||||
createInstanceParams := common.CreateInstanceParams{
|
||||
CreateInstanceV011: common.CreateInstanceV011Params{
|
||||
ProviderBaseParams: r.getProviderBaseParams(pool),
|
||||
},
|
||||
}
|
||||
providerInstance, err := provider.CreateInstance(r.ctx, bootstrapArgs, createInstanceParams)
|
||||
if err != nil {
|
||||
instanceIDToDelete = instance.Name
|
||||
return errors.Wrap(err, "creating instance")
|
||||
|
|
@ -1316,7 +1358,12 @@ func (r *basePoolManager) deleteInstanceFromProvider(ctx context.Context, instan
|
|||
"runner_name", instance.Name,
|
||||
"provider_id", identifier)
|
||||
|
||||
if err := provider.DeleteInstance(ctx, identifier); err != nil {
|
||||
deleteInstanceParams := common.DeleteInstanceParams{
|
||||
DeleteInstanceV011: common.DeleteInstanceV011Params{
|
||||
ProviderBaseParams: r.getProviderBaseParams(pool),
|
||||
},
|
||||
}
|
||||
if err := provider.DeleteInstance(ctx, identifier, deleteInstanceParams); err != nil {
|
||||
return errors.Wrap(err, "removing instance")
|
||||
}
|
||||
|
||||
|
|
@ -1349,21 +1396,35 @@ func (r *basePoolManager) deletePendingInstances() error {
|
|||
continue
|
||||
}
|
||||
|
||||
currentStatus := instance.Status
|
||||
// Set the status to deleting before launching the goroutine that removes
|
||||
// the runner from the provider (which can take a long time).
|
||||
if _, err := r.setInstanceStatus(instance.Name, commonParams.InstanceDeleting, nil); err != nil {
|
||||
slog.With(slog.Any("error", err)).ErrorContext(
|
||||
r.ctx, "failed to update runner status",
|
||||
"runner_name", instance.Name)
|
||||
shouldProcess, deadline := r.backoff.ShouldProcess(instance.Name)
|
||||
if !shouldProcess {
|
||||
slog.DebugContext(
|
||||
r.ctx, "backoff in effect for instance",
|
||||
"runner_name", instance.Name, "deadline", deadline)
|
||||
r.keyMux.Unlock(instance.Name, false)
|
||||
continue
|
||||
}
|
||||
|
||||
go func(instance params.Instance) (err error) {
|
||||
// Prevent Thundering Herd. Should alleviate some of the database
|
||||
// is locked errors in sqlite3.
|
||||
num, err := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate random number: %w", err)
|
||||
}
|
||||
jitter := time.Duration(num.Int64()) * time.Millisecond
|
||||
time.Sleep(jitter)
|
||||
|
||||
currentStatus := instance.Status
|
||||
deleteMux := false
|
||||
defer func() {
|
||||
r.keyMux.Unlock(instance.Name, deleteMux)
|
||||
if deleteMux {
|
||||
// deleteMux is set only when the instance was successfully removed.
|
||||
// We can use it as a marker to signal that the backoff is no longer
|
||||
// needed.
|
||||
r.backoff.Delete(instance.Name)
|
||||
}
|
||||
}()
|
||||
defer func(instance params.Instance) {
|
||||
if err != nil {
|
||||
|
|
@ -1372,14 +1433,22 @@ func (r *basePoolManager) deletePendingInstances() error {
|
|||
"runner_name", instance.Name)
|
||||
// failed to remove from provider. Set status to previous value, which will retry
|
||||
// the operation.
|
||||
if _, err := r.setInstanceStatus(instance.Name, currentStatus, nil); err != nil {
|
||||
if _, err := r.setInstanceStatus(instance.Name, currentStatus, []byte(err.Error())); err != nil {
|
||||
slog.With(slog.Any("error", err)).ErrorContext(
|
||||
r.ctx, "failed to update runner status",
|
||||
"runner_name", instance.Name)
|
||||
}
|
||||
r.backoff.RecordFailure(instance.Name)
|
||||
}
|
||||
}(instance)
|
||||
|
||||
if _, err := r.setInstanceStatus(instance.Name, commonParams.InstanceDeleting, nil); err != nil {
|
||||
slog.With(slog.Any("error", err)).ErrorContext(
|
||||
r.ctx, "failed to update runner status",
|
||||
"runner_name", instance.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
slog.DebugContext(
|
||||
r.ctx, "removing instance from provider",
|
||||
"runner_name", instance.Name)
|
||||
|
|
|
|||
23
runner/providers/common/common.go
Normal file
23
runner/providers/common/common.go
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
garmErrors "github.com/cloudbase/garm-provider-common/errors"
|
||||
commonParams "github.com/cloudbase/garm-provider-common/params"
|
||||
"github.com/cloudbase/garm/runner/providers/util"
|
||||
)
|
||||
|
||||
func ValidateResult(inst commonParams.ProviderInstance) error {
|
||||
if inst.ProviderID == "" {
|
||||
return garmErrors.NewProviderError("missing provider ID")
|
||||
}
|
||||
|
||||
if inst.Name == "" {
|
||||
return garmErrors.NewProviderError("missing instance name")
|
||||
}
|
||||
|
||||
if !util.IsValidProviderStatus(inst.Status) {
|
||||
return garmErrors.NewProviderError("invalid status returned (%s)", inst.Status)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
332
runner/providers/external/external.go
vendored
332
runner/providers/external/external.go
vendored
|
|
@ -2,334 +2,22 @@ package external
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os/exec"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
garmErrors "github.com/cloudbase/garm-provider-common/errors"
|
||||
"github.com/cloudbase/garm-provider-common/execution"
|
||||
commonParams "github.com/cloudbase/garm-provider-common/params"
|
||||
garmExec "github.com/cloudbase/garm-provider-common/util/exec"
|
||||
"github.com/cloudbase/garm/config"
|
||||
"github.com/cloudbase/garm/metrics"
|
||||
"github.com/cloudbase/garm/params"
|
||||
"github.com/cloudbase/garm/runner/common"
|
||||
v010 "github.com/cloudbase/garm/runner/providers/v0.1.0"
|
||||
v011 "github.com/cloudbase/garm/runner/providers/v0.1.1"
|
||||
)
|
||||
|
||||
var _ common.Provider = (*external)(nil)
|
||||
|
||||
// NewProvider selects the provider based on the interface version
|
||||
func NewProvider(ctx context.Context, cfg *config.Provider, controllerID string) (common.Provider, error) {
|
||||
if cfg.ProviderType != params.ExternalProvider {
|
||||
return nil, garmErrors.NewBadRequestError("invalid provider config")
|
||||
}
|
||||
|
||||
execPath, err := cfg.External.ExecutablePath()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "fetching executable path")
|
||||
}
|
||||
|
||||
envVars := cfg.External.GetEnvironmentVariables()
|
||||
|
||||
return &external{
|
||||
ctx: ctx,
|
||||
controllerID: controllerID,
|
||||
cfg: cfg,
|
||||
execPath: execPath,
|
||||
environmentVariables: envVars,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type external struct {
|
||||
ctx context.Context
|
||||
controllerID string
|
||||
cfg *config.Provider
|
||||
execPath string
|
||||
environmentVariables []string
|
||||
}
|
||||
|
||||
func (e *external) validateResult(inst commonParams.ProviderInstance) error {
|
||||
if inst.ProviderID == "" {
|
||||
return garmErrors.NewProviderError("missing provider ID")
|
||||
}
|
||||
|
||||
if inst.Name == "" {
|
||||
return garmErrors.NewProviderError("missing instance name")
|
||||
}
|
||||
|
||||
if !IsValidProviderStatus(inst.Status) {
|
||||
return garmErrors.NewProviderError("invalid status returned (%s)", inst.Status)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateInstance creates a new compute instance in the provider.
|
||||
func (e *external) CreateInstance(ctx context.Context, bootstrapParams commonParams.BootstrapInstance) (commonParams.ProviderInstance, error) {
|
||||
asEnv := []string{
|
||||
fmt.Sprintf("GARM_COMMAND=%s", execution.CreateInstanceCommand),
|
||||
fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
|
||||
fmt.Sprintf("GARM_POOL_ID=%s", bootstrapParams.PoolID),
|
||||
fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
|
||||
}
|
||||
asEnv = append(asEnv, e.environmentVariables...)
|
||||
|
||||
asJs, err := json.Marshal(bootstrapParams)
|
||||
if err != nil {
|
||||
return commonParams.ProviderInstance{}, errors.Wrap(err, "serializing bootstrap params")
|
||||
}
|
||||
|
||||
metrics.InstanceOperationCount.WithLabelValues(
|
||||
"CreateInstance", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
|
||||
out, err := garmExec.Exec(ctx, e.execPath, asJs, asEnv)
|
||||
if err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"CreateInstance", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return commonParams.ProviderInstance{}, garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
|
||||
}
|
||||
|
||||
var param commonParams.ProviderInstance
|
||||
if err := json.Unmarshal(out, ¶m); err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"CreateInstance", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to decode response from binary: %s", err)
|
||||
}
|
||||
|
||||
if err := e.validateResult(param); err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"CreateInstance", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to validate result: %s", err)
|
||||
}
|
||||
|
||||
retAsJs, _ := json.MarshalIndent(param, "", " ")
|
||||
slog.DebugContext(
|
||||
ctx, "provider returned",
|
||||
"output", string(retAsJs))
|
||||
return param, nil
|
||||
}
|
||||
|
||||
// Delete instance will delete the instance in a provider.
|
||||
func (e *external) DeleteInstance(ctx context.Context, instance string) error {
|
||||
asEnv := []string{
|
||||
fmt.Sprintf("GARM_COMMAND=%s", execution.DeleteInstanceCommand),
|
||||
fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
|
||||
fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
|
||||
fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
|
||||
}
|
||||
asEnv = append(asEnv, e.environmentVariables...)
|
||||
|
||||
metrics.InstanceOperationCount.WithLabelValues(
|
||||
"DeleteInstance", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
_, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
|
||||
if err != nil {
|
||||
var exitErr *exec.ExitError
|
||||
if !errors.As(err, &exitErr) || exitErr.ExitCode() != execution.ExitCodeNotFound {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"DeleteInstance", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetInstance will return details about one instance.
|
||||
func (e *external) GetInstance(ctx context.Context, instance string) (commonParams.ProviderInstance, error) {
|
||||
asEnv := []string{
|
||||
fmt.Sprintf("GARM_COMMAND=%s", execution.GetInstanceCommand),
|
||||
fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
|
||||
fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
|
||||
fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
|
||||
}
|
||||
asEnv = append(asEnv, e.environmentVariables...)
|
||||
|
||||
// nolint:golangci-lint,godox
|
||||
// TODO(gabriel-samfira): handle error types. Of particular interest is to
|
||||
// know when the error is ErrNotFound.
|
||||
metrics.InstanceOperationCount.WithLabelValues(
|
||||
"GetInstance", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
out, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
|
||||
if err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"GetInstance", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return commonParams.ProviderInstance{}, garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
|
||||
}
|
||||
|
||||
var param commonParams.ProviderInstance
|
||||
if err := json.Unmarshal(out, ¶m); err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"GetInstance", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to decode response from binary: %s", err)
|
||||
}
|
||||
|
||||
if err := e.validateResult(param); err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"GetInstance", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to validate result: %s", err)
|
||||
}
|
||||
|
||||
return param, nil
|
||||
}
|
||||
|
||||
// ListInstances will list all instances for a provider.
|
||||
func (e *external) ListInstances(ctx context.Context, poolID string) ([]commonParams.ProviderInstance, error) {
|
||||
asEnv := []string{
|
||||
fmt.Sprintf("GARM_COMMAND=%s", execution.ListInstancesCommand),
|
||||
fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
|
||||
fmt.Sprintf("GARM_POOL_ID=%s", poolID),
|
||||
fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
|
||||
}
|
||||
asEnv = append(asEnv, e.environmentVariables...)
|
||||
|
||||
metrics.InstanceOperationCount.WithLabelValues(
|
||||
"ListInstances", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
|
||||
out, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
|
||||
if err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"ListInstances", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return []commonParams.ProviderInstance{}, garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
|
||||
}
|
||||
|
||||
var param []commonParams.ProviderInstance
|
||||
if err := json.Unmarshal(out, ¶m); err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"ListInstances", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return []commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to decode response from binary: %s", err)
|
||||
}
|
||||
|
||||
ret := make([]commonParams.ProviderInstance, len(param))
|
||||
for idx, inst := range param {
|
||||
if err := e.validateResult(inst); err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"ListInstances", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return []commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to validate result: %s", err)
|
||||
}
|
||||
ret[idx] = inst
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// RemoveAllInstances will remove all instances created by this provider.
|
||||
func (e *external) RemoveAllInstances(ctx context.Context) error {
|
||||
asEnv := []string{
|
||||
fmt.Sprintf("GARM_COMMAND=%s", execution.RemoveAllInstancesCommand),
|
||||
fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
|
||||
fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
|
||||
}
|
||||
asEnv = append(asEnv, e.environmentVariables...)
|
||||
|
||||
metrics.InstanceOperationCount.WithLabelValues(
|
||||
"RemoveAllInstances", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
|
||||
_, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
|
||||
if err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"RemoveAllInstances", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop shuts down the instance.
|
||||
func (e *external) Stop(ctx context.Context, instance string) error {
|
||||
asEnv := []string{
|
||||
fmt.Sprintf("GARM_COMMAND=%s", execution.StopInstanceCommand),
|
||||
fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
|
||||
fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
|
||||
fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
|
||||
}
|
||||
asEnv = append(asEnv, e.environmentVariables...)
|
||||
|
||||
metrics.InstanceOperationCount.WithLabelValues(
|
||||
"Stop", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
_, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
|
||||
if err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"Stop", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start boots up an instance.
|
||||
func (e *external) Start(ctx context.Context, instance string) error {
|
||||
asEnv := []string{
|
||||
fmt.Sprintf("GARM_COMMAND=%s", execution.StartInstanceCommand),
|
||||
fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
|
||||
fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
|
||||
fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
|
||||
}
|
||||
asEnv = append(asEnv, e.environmentVariables...)
|
||||
|
||||
metrics.InstanceOperationCount.WithLabelValues(
|
||||
"Start", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
|
||||
_, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
|
||||
if err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"Start", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *external) AsParams() params.Provider {
|
||||
return params.Provider{
|
||||
Name: e.cfg.Name,
|
||||
Description: e.cfg.Description,
|
||||
ProviderType: e.cfg.ProviderType,
|
||||
switch cfg.External.InterfaceVersion {
|
||||
case common.Version010, "":
|
||||
return v010.NewProvider(ctx, cfg, controllerID)
|
||||
case common.Version011:
|
||||
return v011.NewProvider(ctx, cfg, controllerID)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported interface version: %s", cfg.External.InterfaceVersion)
|
||||
}
|
||||
}
|
||||
|
||||
// DisableJITConfig tells us if the provider explicitly disables JIT configuration and
|
||||
// forces runner registration tokens to be used. This may happen if a provider has not yet
|
||||
// been updated to support JIT configuration.
|
||||
func (e *external) DisableJITConfig() bool {
|
||||
if e.cfg == nil {
|
||||
return false
|
||||
}
|
||||
return e.cfg.DisableJITConfig
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
package external
|
||||
package util
|
||||
|
||||
import (
|
||||
commonParams "github.com/cloudbase/garm-provider-common/params"
|
||||
326
runner/providers/v0.1.0/external.go
Normal file
326
runner/providers/v0.1.0/external.go
Normal file
|
|
@ -0,0 +1,326 @@
|
|||
package v010
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os/exec"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
garmErrors "github.com/cloudbase/garm-provider-common/errors"
|
||||
commonExecution "github.com/cloudbase/garm-provider-common/execution/common"
|
||||
commonParams "github.com/cloudbase/garm-provider-common/params"
|
||||
garmExec "github.com/cloudbase/garm-provider-common/util/exec"
|
||||
"github.com/cloudbase/garm/config"
|
||||
"github.com/cloudbase/garm/metrics"
|
||||
"github.com/cloudbase/garm/params"
|
||||
"github.com/cloudbase/garm/runner/common"
|
||||
commonExternal "github.com/cloudbase/garm/runner/providers/common"
|
||||
)
|
||||
|
||||
var _ common.Provider = (*external)(nil)
|
||||
|
||||
// NewProvider creates a legacy external provider.
|
||||
func NewProvider(ctx context.Context, cfg *config.Provider, controllerID string) (common.Provider, error) {
|
||||
if cfg.ProviderType != params.ExternalProvider {
|
||||
return nil, garmErrors.NewBadRequestError("invalid provider config")
|
||||
}
|
||||
|
||||
execPath, err := cfg.External.ExecutablePath()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "fetching executable path")
|
||||
}
|
||||
|
||||
// Set GARM_INTERFACE_VERSION to the version of the interface that the external
|
||||
// provider implements. This is used to ensure compatibility between the external
|
||||
// provider and garm
|
||||
|
||||
envVars := cfg.External.GetEnvironmentVariables()
|
||||
envVars = append(envVars, fmt.Sprintf("GARM_INTERFACE_VERSION=%s", common.Version010))
|
||||
|
||||
return &external{
|
||||
ctx: ctx,
|
||||
controllerID: controllerID,
|
||||
cfg: cfg,
|
||||
execPath: execPath,
|
||||
environmentVariables: envVars,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type external struct {
|
||||
ctx context.Context
|
||||
controllerID string
|
||||
cfg *config.Provider
|
||||
execPath string
|
||||
environmentVariables []string
|
||||
}
|
||||
|
||||
// CreateInstance creates a new compute instance in the provider.
|
||||
func (e *external) CreateInstance(ctx context.Context, bootstrapParams commonParams.BootstrapInstance, _ common.CreateInstanceParams) (commonParams.ProviderInstance, error) {
|
||||
asEnv := []string{
|
||||
fmt.Sprintf("GARM_COMMAND=%s", commonExecution.CreateInstanceCommand),
|
||||
fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
|
||||
fmt.Sprintf("GARM_POOL_ID=%s", bootstrapParams.PoolID),
|
||||
fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
|
||||
}
|
||||
asEnv = append(asEnv, e.environmentVariables...)
|
||||
|
||||
asJs, err := json.Marshal(bootstrapParams)
|
||||
if err != nil {
|
||||
return commonParams.ProviderInstance{}, errors.Wrap(err, "serializing bootstrap params")
|
||||
}
|
||||
|
||||
metrics.InstanceOperationCount.WithLabelValues(
|
||||
"CreateInstance", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
|
||||
out, err := garmExec.Exec(ctx, e.execPath, asJs, asEnv)
|
||||
if err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"CreateInstance", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return commonParams.ProviderInstance{}, garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
|
||||
}
|
||||
|
||||
var param commonParams.ProviderInstance
|
||||
if err := json.Unmarshal(out, ¶m); err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"CreateInstance", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to decode response from binary: %s", err)
|
||||
}
|
||||
|
||||
if err := commonExternal.ValidateResult(param); err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"CreateInstance", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to validate result: %s", err)
|
||||
}
|
||||
|
||||
retAsJs, _ := json.MarshalIndent(param, "", " ")
|
||||
slog.DebugContext(
|
||||
ctx, "provider returned",
|
||||
"output", string(retAsJs))
|
||||
return param, nil
|
||||
}
|
||||
|
||||
// Delete instance will delete the instance in a provider.
|
||||
func (e *external) DeleteInstance(ctx context.Context, instance string, _ common.DeleteInstanceParams) error {
|
||||
asEnv := []string{
|
||||
fmt.Sprintf("GARM_COMMAND=%s", commonExecution.DeleteInstanceCommand),
|
||||
fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
|
||||
fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
|
||||
fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
|
||||
}
|
||||
asEnv = append(asEnv, e.environmentVariables...)
|
||||
|
||||
metrics.InstanceOperationCount.WithLabelValues(
|
||||
"DeleteInstance", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
_, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
|
||||
if err != nil {
|
||||
var exitErr *exec.ExitError
|
||||
if !errors.As(err, &exitErr) || exitErr.ExitCode() != commonExecution.ExitCodeNotFound {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"DeleteInstance", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetInstance will return details about one instance.
|
||||
func (e *external) GetInstance(ctx context.Context, instance string, _ common.GetInstanceParams) (commonParams.ProviderInstance, error) {
|
||||
asEnv := []string{
|
||||
fmt.Sprintf("GARM_COMMAND=%s", commonExecution.GetInstanceCommand),
|
||||
fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
|
||||
fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
|
||||
fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
|
||||
}
|
||||
asEnv = append(asEnv, e.environmentVariables...)
|
||||
|
||||
// nolint:golangci-lint,godox
|
||||
// TODO(gabriel-samfira): handle error types. Of particular interest is to
|
||||
// know when the error is ErrNotFound.
|
||||
metrics.InstanceOperationCount.WithLabelValues(
|
||||
"GetInstance", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
out, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
|
||||
if err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"GetInstance", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return commonParams.ProviderInstance{}, garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
|
||||
}
|
||||
|
||||
var param commonParams.ProviderInstance
|
||||
if err := json.Unmarshal(out, ¶m); err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"GetInstance", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to decode response from binary: %s", err)
|
||||
}
|
||||
|
||||
if err := commonExternal.ValidateResult(param); err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"GetInstance", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to validate result: %s", err)
|
||||
}
|
||||
|
||||
return param, nil
|
||||
}
|
||||
|
||||
// ListInstances will list all instances for a provider.
|
||||
func (e *external) ListInstances(ctx context.Context, poolID string, _ common.ListInstancesParams) ([]commonParams.ProviderInstance, error) {
|
||||
asEnv := []string{
|
||||
fmt.Sprintf("GARM_COMMAND=%s", commonExecution.ListInstancesCommand),
|
||||
fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
|
||||
fmt.Sprintf("GARM_POOL_ID=%s", poolID),
|
||||
fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
|
||||
}
|
||||
asEnv = append(asEnv, e.environmentVariables...)
|
||||
|
||||
metrics.InstanceOperationCount.WithLabelValues(
|
||||
"ListInstances", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
|
||||
out, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
|
||||
if err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"ListInstances", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return []commonParams.ProviderInstance{}, garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
|
||||
}
|
||||
|
||||
var param []commonParams.ProviderInstance
|
||||
if err := json.Unmarshal(out, ¶m); err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"ListInstances", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return []commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to decode response from binary: %s", err)
|
||||
}
|
||||
|
||||
ret := make([]commonParams.ProviderInstance, len(param))
|
||||
for idx, inst := range param {
|
||||
if err := commonExternal.ValidateResult(inst); err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"ListInstances", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return []commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to validate result: %s", err)
|
||||
}
|
||||
ret[idx] = inst
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// RemoveAllInstances will remove all instances created by this provider.
|
||||
func (e *external) RemoveAllInstances(ctx context.Context, _ common.RemoveAllInstancesParams) error {
|
||||
asEnv := []string{
|
||||
fmt.Sprintf("GARM_COMMAND=%s", commonExecution.RemoveAllInstancesCommand),
|
||||
fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
|
||||
fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
|
||||
}
|
||||
asEnv = append(asEnv, e.environmentVariables...)
|
||||
|
||||
metrics.InstanceOperationCount.WithLabelValues(
|
||||
"RemoveAllInstances", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
|
||||
_, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
|
||||
if err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"RemoveAllInstances", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop shuts down the instance.
|
||||
func (e *external) Stop(ctx context.Context, instance string, _ common.StopParams) error {
|
||||
asEnv := []string{
|
||||
fmt.Sprintf("GARM_COMMAND=%s", commonExecution.StopInstanceCommand),
|
||||
fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
|
||||
fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
|
||||
fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
|
||||
}
|
||||
asEnv = append(asEnv, e.environmentVariables...)
|
||||
|
||||
metrics.InstanceOperationCount.WithLabelValues(
|
||||
"Stop", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
_, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
|
||||
if err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"Stop", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start boots up an instance.
|
||||
func (e *external) Start(ctx context.Context, instance string, _ common.StartParams) error {
|
||||
asEnv := []string{
|
||||
fmt.Sprintf("GARM_COMMAND=%s", commonExecution.StartInstanceCommand),
|
||||
fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
|
||||
fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
|
||||
fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
|
||||
}
|
||||
asEnv = append(asEnv, e.environmentVariables...)
|
||||
|
||||
metrics.InstanceOperationCount.WithLabelValues(
|
||||
"Start", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
|
||||
_, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
|
||||
if err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"Start", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *external) AsParams() params.Provider {
|
||||
return params.Provider{
|
||||
Name: e.cfg.Name,
|
||||
Description: e.cfg.Description,
|
||||
ProviderType: e.cfg.ProviderType,
|
||||
}
|
||||
}
|
||||
|
||||
// DisableJITConfig tells us if the provider explicitly disables JIT configuration and
|
||||
// forces runner registration tokens to be used. This may happen if a provider has not yet
|
||||
// been updated to support JIT configuration.
|
||||
func (e *external) DisableJITConfig() bool {
|
||||
if e.cfg == nil {
|
||||
return false
|
||||
}
|
||||
return e.cfg.DisableJITConfig
|
||||
}
|
||||
386
runner/providers/v0.1.1/external.go
Normal file
386
runner/providers/v0.1.1/external.go
Normal file
|
|
@ -0,0 +1,386 @@
|
|||
package v011
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os/exec"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
garmErrors "github.com/cloudbase/garm-provider-common/errors"
|
||||
commonExecution "github.com/cloudbase/garm-provider-common/execution/common"
|
||||
commonParams "github.com/cloudbase/garm-provider-common/params"
|
||||
garmExec "github.com/cloudbase/garm-provider-common/util/exec"
|
||||
"github.com/cloudbase/garm/config"
|
||||
"github.com/cloudbase/garm/metrics"
|
||||
"github.com/cloudbase/garm/params"
|
||||
"github.com/cloudbase/garm/runner/common"
|
||||
commonExternal "github.com/cloudbase/garm/runner/providers/common"
|
||||
)
|
||||
|
||||
var _ common.Provider = (*external)(nil)
|
||||
|
||||
func NewProvider(ctx context.Context, cfg *config.Provider, controllerID string) (common.Provider, error) {
|
||||
if cfg.ProviderType != params.ExternalProvider {
|
||||
return nil, garmErrors.NewBadRequestError("invalid provider config")
|
||||
}
|
||||
|
||||
execPath, err := cfg.External.ExecutablePath()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "fetching executable path")
|
||||
}
|
||||
|
||||
// Set GARM_INTERFACE_VERSION to the version of the interface that the external
|
||||
// provider implements. This is used to ensure compatibility between the external
|
||||
// provider and garm
|
||||
envVars := cfg.External.GetEnvironmentVariables()
|
||||
envVars = append(envVars, fmt.Sprintf("GARM_INTERFACE_VERSION=%s", cfg.External.InterfaceVersion))
|
||||
|
||||
return &external{
|
||||
ctx: ctx,
|
||||
controllerID: controllerID,
|
||||
cfg: cfg,
|
||||
execPath: execPath,
|
||||
environmentVariables: envVars,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type external struct {
|
||||
ctx context.Context
|
||||
cfg *config.Provider
|
||||
controllerID string
|
||||
execPath string
|
||||
environmentVariables []string
|
||||
}
|
||||
|
||||
// CreateInstance creates a new compute instance in the provider.
|
||||
func (e *external) CreateInstance(ctx context.Context, bootstrapParams commonParams.BootstrapInstance, _ common.CreateInstanceParams) (commonParams.ProviderInstance, error) {
|
||||
extraspecs := bootstrapParams.ExtraSpecs
|
||||
extraspecsValue, err := json.Marshal(extraspecs)
|
||||
if err != nil {
|
||||
return commonParams.ProviderInstance{}, errors.Wrap(err, "serializing extraspecs")
|
||||
}
|
||||
// Encode the extraspecs as base64 to avoid issues with special characters.
|
||||
base64EncodedExtraSpecs := base64.StdEncoding.EncodeToString(extraspecsValue)
|
||||
asEnv := []string{
|
||||
fmt.Sprintf("GARM_COMMAND=%s", commonExecution.CreateInstanceCommand),
|
||||
fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
|
||||
fmt.Sprintf("GARM_POOL_ID=%s", bootstrapParams.PoolID),
|
||||
fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
|
||||
fmt.Sprintf("GARM_POOL_EXTRASPECS=%s", base64EncodedExtraSpecs),
|
||||
}
|
||||
asEnv = append(asEnv, e.environmentVariables...)
|
||||
|
||||
asJs, err := json.Marshal(bootstrapParams)
|
||||
if err != nil {
|
||||
return commonParams.ProviderInstance{}, errors.Wrap(err, "serializing bootstrap params")
|
||||
}
|
||||
|
||||
metrics.InstanceOperationCount.WithLabelValues(
|
||||
"CreateInstance", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
|
||||
out, err := garmExec.Exec(ctx, e.execPath, asJs, asEnv)
|
||||
if err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"CreateInstance", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return commonParams.ProviderInstance{}, garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
|
||||
}
|
||||
|
||||
var param commonParams.ProviderInstance
|
||||
if err := json.Unmarshal(out, ¶m); err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"CreateInstance", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to decode response from binary: %s", err)
|
||||
}
|
||||
|
||||
if err := commonExternal.ValidateResult(param); err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"CreateInstance", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to validate result: %s", err)
|
||||
}
|
||||
|
||||
retAsJs, _ := json.MarshalIndent(param, "", " ")
|
||||
slog.DebugContext(
|
||||
ctx, "provider returned",
|
||||
"output", string(retAsJs))
|
||||
return param, nil
|
||||
}
|
||||
|
||||
// Delete instance will delete the instance in a provider.
|
||||
func (e *external) DeleteInstance(ctx context.Context, instance string, deleteInstanceParams common.DeleteInstanceParams) error {
|
||||
extraspecs := deleteInstanceParams.DeleteInstanceV011.PoolInfo.ExtraSpecs
|
||||
extraspecsValue, err := json.Marshal(extraspecs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "serializing extraspecs")
|
||||
}
|
||||
// Encode the extraspecs as base64 to avoid issues with special characters.
|
||||
base64EncodedExtraSpecs := base64.StdEncoding.EncodeToString(extraspecsValue)
|
||||
asEnv := []string{
|
||||
fmt.Sprintf("GARM_COMMAND=%s", commonExecution.DeleteInstanceCommand),
|
||||
fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
|
||||
fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
|
||||
fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
|
||||
fmt.Sprintf("GARM_POOL_ID=%s", deleteInstanceParams.DeleteInstanceV011.PoolInfo.ID),
|
||||
fmt.Sprintf("GARM_POOL_EXTRASPECS=%s", base64EncodedExtraSpecs),
|
||||
}
|
||||
asEnv = append(asEnv, e.environmentVariables...)
|
||||
|
||||
metrics.InstanceOperationCount.WithLabelValues(
|
||||
"DeleteInstance", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
_, err = garmExec.Exec(ctx, e.execPath, nil, asEnv)
|
||||
if err != nil {
|
||||
var exitErr *exec.ExitError
|
||||
if !errors.As(err, &exitErr) || exitErr.ExitCode() != commonExecution.ExitCodeNotFound {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"DeleteInstance", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetInstance will return details about one instance.
|
||||
func (e *external) GetInstance(ctx context.Context, instance string, getInstanceParams common.GetInstanceParams) (commonParams.ProviderInstance, error) {
|
||||
extraspecs := getInstanceParams.GetInstanceV011.PoolInfo.ExtraSpecs
|
||||
extraspecsValue, err := json.Marshal(extraspecs)
|
||||
if err != nil {
|
||||
return commonParams.ProviderInstance{}, errors.Wrap(err, "serializing extraspecs")
|
||||
}
|
||||
// Encode the extraspecs as base64 to avoid issues with special characters.
|
||||
base64EncodedExtraSpecs := base64.StdEncoding.EncodeToString(extraspecsValue)
|
||||
asEnv := []string{
|
||||
fmt.Sprintf("GARM_COMMAND=%s", commonExecution.GetInstanceCommand),
|
||||
fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
|
||||
fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
|
||||
fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
|
||||
fmt.Sprintf("GARM_POOL_ID=%s", getInstanceParams.GetInstanceV011.PoolInfo.ID),
|
||||
fmt.Sprintf("GARM_POOL_EXTRASPECS=%s", base64EncodedExtraSpecs),
|
||||
}
|
||||
asEnv = append(asEnv, e.environmentVariables...)
|
||||
|
||||
// nolint:golangci-lint,godox
|
||||
// TODO(gabriel-samfira): handle error types. Of particular interest is to
|
||||
// know when the error is ErrNotFound.
|
||||
metrics.InstanceOperationCount.WithLabelValues(
|
||||
"GetInstance", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
out, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
|
||||
if err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"GetInstance", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return commonParams.ProviderInstance{}, garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
|
||||
}
|
||||
|
||||
var param commonParams.ProviderInstance
|
||||
if err := json.Unmarshal(out, ¶m); err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"GetInstance", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to decode response from binary: %s", err)
|
||||
}
|
||||
|
||||
if err := commonExternal.ValidateResult(param); err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"GetInstance", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to validate result: %s", err)
|
||||
}
|
||||
|
||||
return param, nil
|
||||
}
|
||||
|
||||
// ListInstances will list all instances for a provider.
|
||||
func (e *external) ListInstances(ctx context.Context, poolID string, listInstancesParams common.ListInstancesParams) ([]commonParams.ProviderInstance, error) {
|
||||
extraspecs := listInstancesParams.ListInstancesV011.PoolInfo.ExtraSpecs
|
||||
extraspecsValue, err := json.Marshal(extraspecs)
|
||||
if err != nil {
|
||||
return []commonParams.ProviderInstance{}, errors.Wrap(err, "serializing extraspecs")
|
||||
}
|
||||
// Encode the extraspecs as base64 to avoid issues with special characters.
|
||||
base64EncodedExtraSpecs := base64.StdEncoding.EncodeToString(extraspecsValue)
|
||||
asEnv := []string{
|
||||
fmt.Sprintf("GARM_COMMAND=%s", commonExecution.ListInstancesCommand),
|
||||
fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
|
||||
fmt.Sprintf("GARM_POOL_ID=%s", poolID),
|
||||
fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
|
||||
fmt.Sprintf("GARM_POOL_EXTRASPECS=%s", base64EncodedExtraSpecs),
|
||||
}
|
||||
asEnv = append(asEnv, e.environmentVariables...)
|
||||
|
||||
metrics.InstanceOperationCount.WithLabelValues(
|
||||
"ListInstances", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
|
||||
out, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
|
||||
if err == nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"ListInstances", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return []commonParams.ProviderInstance{}, garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
|
||||
}
|
||||
|
||||
var param []commonParams.ProviderInstance
|
||||
if err := json.Unmarshal(out, ¶m); err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"ListInstances", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return []commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to decode response from binary: %s", err)
|
||||
}
|
||||
|
||||
ret := make([]commonParams.ProviderInstance, len(param))
|
||||
for idx, inst := range param {
|
||||
if err := commonExternal.ValidateResult(inst); err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"ListInstances", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return []commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to validate result: %s", err)
|
||||
}
|
||||
ret[idx] = inst
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// RemoveAllInstances will remove all instances created by this provider.
|
||||
func (e *external) RemoveAllInstances(ctx context.Context, removeAllInstances common.RemoveAllInstancesParams) error {
|
||||
extraspecs := removeAllInstances.RemoveAllInstancesV011.PoolInfo.ExtraSpecs
|
||||
extraspecsValue, err := json.Marshal(extraspecs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "serializing extraspecs")
|
||||
}
|
||||
// Encode the extraspecs as base64 to avoid issues with special characters.
|
||||
base64EncodedExtraSpecs := base64.StdEncoding.EncodeToString(extraspecsValue)
|
||||
asEnv := []string{
|
||||
fmt.Sprintf("GARM_COMMAND=%s", commonExecution.RemoveAllInstancesCommand),
|
||||
fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
|
||||
fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
|
||||
fmt.Sprintf("GARM_POOL_ID=%s", removeAllInstances.RemoveAllInstancesV011.PoolInfo.ID),
|
||||
fmt.Sprintf("GARM_POOL_EXTRASPECS=%s", base64EncodedExtraSpecs),
|
||||
}
|
||||
asEnv = append(asEnv, e.environmentVariables...)
|
||||
|
||||
metrics.InstanceOperationCount.WithLabelValues(
|
||||
"RemoveAllInstances", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
|
||||
_, err = garmExec.Exec(ctx, e.execPath, nil, asEnv)
|
||||
if err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"RemoveAllInstances", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop shuts down the instance.
|
||||
func (e *external) Stop(ctx context.Context, instance string, stopParams common.StopParams) error {
|
||||
extraspecs := stopParams.StopV011.PoolInfo.ExtraSpecs
|
||||
extraspecsValue, err := json.Marshal(extraspecs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "serializing extraspecs")
|
||||
}
|
||||
// Encode the extraspecs as base64 to avoid issues with special characters.
|
||||
base64EncodedExtraSpecs := base64.StdEncoding.EncodeToString(extraspecsValue)
|
||||
asEnv := []string{
|
||||
fmt.Sprintf("GARM_COMMAND=%s", commonExecution.StopInstanceCommand),
|
||||
fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
|
||||
fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
|
||||
fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
|
||||
fmt.Sprintf("GARM_POOL_ID=%s", stopParams.StopV011.PoolInfo.ID),
|
||||
fmt.Sprintf("GARM_POOL_EXTRASPECS=%s", base64EncodedExtraSpecs),
|
||||
}
|
||||
asEnv = append(asEnv, e.environmentVariables...)
|
||||
|
||||
metrics.InstanceOperationCount.WithLabelValues(
|
||||
"Stop", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
_, err = garmExec.Exec(ctx, e.execPath, nil, asEnv)
|
||||
if err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"Stop", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start boots up an instance.
|
||||
func (e *external) Start(ctx context.Context, instance string, startParams common.StartParams) error {
|
||||
extraspecs := startParams.StartV011.PoolInfo.ExtraSpecs
|
||||
extraspecsValue, err := json.Marshal(extraspecs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "serializing extraspecs")
|
||||
}
|
||||
// Encode the extraspecs as base64 to avoid issues with special characters.
|
||||
base64EncodedExtraSpecs := base64.StdEncoding.EncodeToString(extraspecsValue)
|
||||
asEnv := []string{
|
||||
fmt.Sprintf("GARM_COMMAND=%s", commonExecution.StartInstanceCommand),
|
||||
fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
|
||||
fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
|
||||
fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
|
||||
fmt.Sprintf("GARM_POOL_ID=%s", startParams.StartV011.PoolInfo.ID),
|
||||
fmt.Sprintf("GARM_POOL_EXTRASPECS=%s", base64EncodedExtraSpecs),
|
||||
}
|
||||
asEnv = append(asEnv, e.environmentVariables...)
|
||||
|
||||
metrics.InstanceOperationCount.WithLabelValues(
|
||||
"Start", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
|
||||
_, err = garmExec.Exec(ctx, e.execPath, nil, asEnv)
|
||||
if err != nil {
|
||||
metrics.InstanceOperationFailedCount.WithLabelValues(
|
||||
"Start", // label: operation
|
||||
e.cfg.Name, // label: provider
|
||||
).Inc()
|
||||
return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *external) AsParams() params.Provider {
|
||||
return params.Provider{
|
||||
Name: e.cfg.Name,
|
||||
Description: e.cfg.Description,
|
||||
ProviderType: e.cfg.ProviderType,
|
||||
}
|
||||
}
|
||||
|
||||
// DisableJITConfig tells us if the provider explicitly disables JIT configuration and
|
||||
// forces runner registration tokens to be used. This may happen if a provider has not yet
|
||||
// been updated to support JIT configuration.
|
||||
func (e *external) DisableJITConfig() bool {
|
||||
if e.cfg == nil {
|
||||
return false
|
||||
}
|
||||
return e.cfg.DisableJITConfig
|
||||
}
|
||||
|
|
@ -205,7 +205,7 @@ func (r *Runner) UpdateRepository(ctx context.Context, repoID string, param para
|
|||
|
||||
poolMgr, err := r.poolManagerCtrl.GetRepoPoolManager(repo)
|
||||
if err != nil {
|
||||
return params.Repository{}, fmt.Errorf("failed to get pool manager: %w", err)
|
||||
return params.Repository{}, errors.Wrap(err, "getting pool manager")
|
||||
}
|
||||
|
||||
repo.PoolManagerStatus = poolMgr.Status()
|
||||
|
|
@ -219,7 +219,7 @@ func (r *Runner) CreateRepoPool(ctx context.Context, repoID string, param params
|
|||
|
||||
createPoolParams, err := r.appendTagsToCreatePoolParams(param)
|
||||
if err != nil {
|
||||
return params.Pool{}, fmt.Errorf("failed to append tags to create pool params: %w", err)
|
||||
return params.Pool{}, errors.Wrap(err, "appending tags to create pool params")
|
||||
}
|
||||
|
||||
if createPoolParams.RunnerBootstrapTimeout == 0 {
|
||||
|
|
@ -233,7 +233,7 @@ func (r *Runner) CreateRepoPool(ctx context.Context, repoID string, param params
|
|||
|
||||
pool, err := r.store.CreateEntityPool(ctx, entity, createPoolParams)
|
||||
if err != nil {
|
||||
return params.Pool{}, fmt.Errorf("failed to create pool: %w", err)
|
||||
return params.Pool{}, errors.Wrap(err, "creating pool")
|
||||
}
|
||||
|
||||
return pool, nil
|
||||
|
|
|
|||
|
|
@ -373,7 +373,7 @@ func (s *RepoTestSuite) TestUpdateRepositoryPoolMgrFailed() {
|
|||
_, err := s.Runner.UpdateRepository(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.UpdateRepoParams)
|
||||
|
||||
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
|
||||
s.Require().Equal(fmt.Sprintf("failed to get pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
|
||||
s.Require().Equal(fmt.Sprintf("getting pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
|
||||
}
|
||||
|
||||
func (s *RepoTestSuite) TestUpdateRepositoryCreateRepoPoolMgrFailed() {
|
||||
|
|
@ -382,7 +382,7 @@ func (s *RepoTestSuite) TestUpdateRepositoryCreateRepoPoolMgrFailed() {
|
|||
_, err := s.Runner.UpdateRepository(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.UpdateRepoParams)
|
||||
|
||||
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
|
||||
s.Require().Equal(fmt.Sprintf("failed to get pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
|
||||
s.Require().Equal(fmt.Sprintf("getting pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
|
||||
}
|
||||
|
||||
func (s *RepoTestSuite) TestCreateRepoPool() {
|
||||
|
|
@ -415,7 +415,7 @@ func (s *RepoTestSuite) TestCreateRepoPoolFetchPoolParamsFailed() {
|
|||
|
||||
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
|
||||
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
|
||||
s.Require().Regexp("failed to append tags to create pool params: no such provider not-existent-provider-name", err.Error())
|
||||
s.Require().Regexp("appending tags to create pool params: no such provider not-existent-provider-name", err.Error())
|
||||
}
|
||||
|
||||
func (s *RepoTestSuite) TestGetRepoPoolByID() {
|
||||
|
|
|
|||
|
|
@ -37,14 +37,4 @@ description = "external test provider"
|
|||
provider_type = "external"
|
||||
[provider.external]
|
||||
config_file = "${GARM_CONFIG_DIR}/test-provider/config"
|
||||
provider_executable = "${GARM_CONFIG_DIR}/test-provider/garm-external-provider"
|
||||
|
||||
[[github]]
|
||||
name = "${CREDENTIALS_NAME}"
|
||||
description = "GARM GitHub OAuth token"
|
||||
oauth2_token = "${GH_TOKEN}"
|
||||
|
||||
[[github]]
|
||||
name = "${CREDENTIALS_NAME}-clone"
|
||||
description = "GARM GitHub OAuth token - clone"
|
||||
oauth2_token = "${GH_TOKEN}"
|
||||
provider_executable = "${GARM_CONFIG_DIR}/test-provider/garm-external-provider"
|
||||
|
|
@ -5,7 +5,7 @@ secure_boot = false
|
|||
project_name = "default"
|
||||
[image_remotes]
|
||||
[image_remotes.ubuntu]
|
||||
addr = "https://cloud-images.ubuntu.com/releases"
|
||||
addr = "${LXD_REMOTE_SERVER}"
|
||||
public = true
|
||||
protocol = "simplestreams"
|
||||
skip_verify = false
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ func (suite *GarmSuite) TestExternalProvider() {
|
|||
MaxRunners: 2,
|
||||
MinIdleRunners: 0,
|
||||
Flavor: "default",
|
||||
Image: "ubuntu:22.04",
|
||||
Image: "ubuntu:24.04",
|
||||
OSType: commonParams.Linux,
|
||||
OSArch: commonParams.Amd64,
|
||||
ProviderName: "test_external",
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ func (suite *GarmSuite) TestOrganizations() {
|
|||
MaxRunners: 2,
|
||||
MinIdleRunners: 0,
|
||||
Flavor: "default",
|
||||
Image: "ubuntu:22.04",
|
||||
Image: "ubuntu:24.04",
|
||||
OSType: commonParams.Linux,
|
||||
OSArch: commonParams.Amd64,
|
||||
ProviderName: "lxd_local",
|
||||
|
|
|
|||
|
|
@ -57,7 +57,7 @@ func (suite *GarmSuite) TestRepositories() {
|
|||
MaxRunners: 2,
|
||||
MinIdleRunners: 0,
|
||||
Flavor: "default",
|
||||
Image: "ubuntu:22.04",
|
||||
Image: "ubuntu:24.04",
|
||||
OSType: commonParams.Linux,
|
||||
OSArch: commonParams.Amd64,
|
||||
ProviderName: "lxd_local",
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ export RUN_USER=${RUN_USER:-$USER}
|
|||
export GARM_PORT=${GARM_PORT:-"9997"}
|
||||
export GARM_SERVICE_NAME=${GARM_SERVICE_NAME:-"garm"}
|
||||
export GARM_CONFIG_FILE=${GARM_CONFIG_FILE:-"${GARM_CONFIG_DIR}/config.toml"}
|
||||
export LXD_REMOTE_SERVER=${LXD_REMOTE_SERVER:-"https://cloud-images.ubuntu.com/releases"}
|
||||
|
||||
if [ -f "$GITHUB_ENV" ];then
|
||||
echo "export GARM_CONFIG_DIR=${GARM_CONFIG_DIR}" >> $GITHUB_ENV
|
||||
|
|
@ -66,12 +67,12 @@ sudo chown -R $RUN_USER:$RUN_USER ${GARM_CONFIG_DIR}
|
|||
|
||||
export LXD_PROVIDER_EXECUTABLE="$PROVIDER_BIN_DIR/garm-provider-lxd"
|
||||
export LXD_PROVIDER_CONFIG="${GARM_CONFIG_DIR}/garm-provider-lxd.toml"
|
||||
sudo cp $CONFIG_DIR/garm-provider-lxd.toml $LXD_PROVIDER_CONFIG
|
||||
cat $CONFIG_DIR/garm-provider-lxd.toml| envsubst | sudo tee $LXD_PROVIDER_CONFIG > /dev/null
|
||||
|
||||
function clone_and_build_lxd_provider() {
|
||||
git clone https://github.com/cloudbase/garm-provider-lxd ~/garm-provider-lxd
|
||||
pushd ~/garm-provider-lxd
|
||||
go build -o $LXD_PROVIDER_EXECUTABLE
|
||||
CGO_ENABLED=1 go build -o $LXD_PROVIDER_EXECUTABLE
|
||||
popd
|
||||
}
|
||||
|
||||
|
|
|
|||
5
testdata/config.toml
vendored
5
testdata/config.toml
vendored
|
|
@ -97,6 +97,11 @@ time_to_live = "8760h"
|
|||
[database.sqlite3]
|
||||
# Path on disk to the sqlite3 database file.
|
||||
db_file = "/etc/garm/garm.db"
|
||||
# busy_timeout_seconds is an optional parameter that will set the
|
||||
# sqlite3_busy_timeout to the specified value. This is useful when
|
||||
# GARM may be under heavy load and the database is locked by some
|
||||
# other go routine. The default value is 0.
|
||||
busy_timeout_seconds = 5
|
||||
|
||||
# Currently, providers are defined statically in the config. This is due to the fact
|
||||
# that we have not yet added support for storing secrets in something like Barbican
|
||||
|
|
|
|||
4
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
4
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
|
|
@ -3,13 +3,13 @@ reflection interface similar to Go's standard library `json` and `xml` packages.
|
|||
|
||||
Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
|
||||
|
||||
Documentation: https://godocs.io/github.com/BurntSushi/toml
|
||||
Documentation: https://pkg.go.dev/github.com/BurntSushi/toml
|
||||
|
||||
See the [releases page](https://github.com/BurntSushi/toml/releases) for a
|
||||
changelog; this information is also in the git tag annotations (e.g. `git show
|
||||
v0.4.0`).
|
||||
|
||||
This library requires Go 1.13 or newer; add it to your go.mod with:
|
||||
This library requires Go 1.18 or newer; add it to your go.mod with:
|
||||
|
||||
% go get github.com/BurntSushi/toml@latest
|
||||
|
||||
|
|
|
|||
126
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
126
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
|
|
@ -6,7 +6,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"io/fs"
|
||||
"math"
|
||||
"os"
|
||||
"reflect"
|
||||
|
|
@ -18,13 +18,13 @@ import (
|
|||
// Unmarshaler is the interface implemented by objects that can unmarshal a
|
||||
// TOML description of themselves.
|
||||
type Unmarshaler interface {
|
||||
UnmarshalTOML(interface{}) error
|
||||
UnmarshalTOML(any) error
|
||||
}
|
||||
|
||||
// Unmarshal decodes the contents of data in TOML format into a pointer v.
|
||||
//
|
||||
// See [Decoder] for a description of the decoding process.
|
||||
func Unmarshal(data []byte, v interface{}) error {
|
||||
func Unmarshal(data []byte, v any) error {
|
||||
_, err := NewDecoder(bytes.NewReader(data)).Decode(v)
|
||||
return err
|
||||
}
|
||||
|
|
@ -32,12 +32,12 @@ func Unmarshal(data []byte, v interface{}) error {
|
|||
// Decode the TOML data in to the pointer v.
|
||||
//
|
||||
// See [Decoder] for a description of the decoding process.
|
||||
func Decode(data string, v interface{}) (MetaData, error) {
|
||||
func Decode(data string, v any) (MetaData, error) {
|
||||
return NewDecoder(strings.NewReader(data)).Decode(v)
|
||||
}
|
||||
|
||||
// DecodeFile reads the contents of a file and decodes it with [Decode].
|
||||
func DecodeFile(path string, v interface{}) (MetaData, error) {
|
||||
func DecodeFile(path string, v any) (MetaData, error) {
|
||||
fp, err := os.Open(path)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
|
|
@ -46,6 +46,17 @@ func DecodeFile(path string, v interface{}) (MetaData, error) {
|
|||
return NewDecoder(fp).Decode(v)
|
||||
}
|
||||
|
||||
// DecodeFS reads the contents of a file from [fs.FS] and decodes it with
|
||||
// [Decode].
|
||||
func DecodeFS(fsys fs.FS, path string, v any) (MetaData, error) {
|
||||
fp, err := fsys.Open(path)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
defer fp.Close()
|
||||
return NewDecoder(fp).Decode(v)
|
||||
}
|
||||
|
||||
// Primitive is a TOML value that hasn't been decoded into a Go value.
|
||||
//
|
||||
// This type can be used for any value, which will cause decoding to be delayed.
|
||||
|
|
@ -58,7 +69,7 @@ func DecodeFile(path string, v interface{}) (MetaData, error) {
|
|||
// overhead of reflection. They can be useful when you don't know the exact type
|
||||
// of TOML data until runtime.
|
||||
type Primitive struct {
|
||||
undecoded interface{}
|
||||
undecoded any
|
||||
context Key
|
||||
}
|
||||
|
||||
|
|
@ -122,7 +133,7 @@ var (
|
|||
)
|
||||
|
||||
// Decode TOML data in to the pointer `v`.
|
||||
func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
|
||||
func (dec *Decoder) Decode(v any) (MetaData, error) {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.Kind() != reflect.Ptr {
|
||||
s := "%q"
|
||||
|
|
@ -136,8 +147,8 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
|
|||
return MetaData{}, fmt.Errorf("toml: cannot decode to nil value of %q", reflect.TypeOf(v))
|
||||
}
|
||||
|
||||
// Check if this is a supported type: struct, map, interface{}, or something
|
||||
// that implements UnmarshalTOML or UnmarshalText.
|
||||
// Check if this is a supported type: struct, map, any, or something that
|
||||
// implements UnmarshalTOML or UnmarshalText.
|
||||
rv = indirect(rv)
|
||||
rt := rv.Type()
|
||||
if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map &&
|
||||
|
|
@ -148,7 +159,7 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
|
|||
|
||||
// TODO: parser should read from io.Reader? Or at the very least, make it
|
||||
// read from []byte rather than string
|
||||
data, err := ioutil.ReadAll(dec.r)
|
||||
data, err := io.ReadAll(dec.r)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
|
|
@ -179,18 +190,31 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
|
|||
// will only reflect keys that were decoded. Namely, any keys hidden behind a
|
||||
// Primitive will be considered undecoded. Executing this method will update the
|
||||
// undecoded keys in the meta data. (See the example.)
|
||||
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
func (md *MetaData) PrimitiveDecode(primValue Primitive, v any) error {
|
||||
md.context = primValue.context
|
||||
defer func() { md.context = nil }()
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// markDecodedRecursive is a helper to mark any key under the given tmap as
|
||||
// decoded, recursing as needed
|
||||
func markDecodedRecursive(md *MetaData, tmap map[string]any) {
|
||||
for key := range tmap {
|
||||
md.decoded[md.context.add(key).String()] = struct{}{}
|
||||
if tmap, ok := tmap[key].(map[string]any); ok {
|
||||
md.context = append(md.context, key)
|
||||
markDecodedRecursive(md, tmap)
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// unify performs a sort of type unification based on the structure of `rv`,
|
||||
// which is the client representation.
|
||||
//
|
||||
// Any type mismatch produces an error. Finding a type that we don't know
|
||||
// how to handle produces an unsupported type error.
|
||||
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
func (md *MetaData) unify(data any, rv reflect.Value) error {
|
||||
// Special case. Look for a `Primitive` value.
|
||||
// TODO: #76 would make this superfluous after implemented.
|
||||
if rv.Type() == primitiveType {
|
||||
|
|
@ -207,7 +231,21 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
|||
|
||||
rvi := rv.Interface()
|
||||
if v, ok := rvi.(Unmarshaler); ok {
|
||||
return v.UnmarshalTOML(data)
|
||||
err := v.UnmarshalTOML(data)
|
||||
if err != nil {
|
||||
return md.parseErr(err)
|
||||
}
|
||||
// Assume the Unmarshaler decoded everything, so mark all keys under
|
||||
// this table as decoded.
|
||||
if tmap, ok := data.(map[string]any); ok {
|
||||
markDecodedRecursive(md, tmap)
|
||||
}
|
||||
if aot, ok := data.([]map[string]any); ok {
|
||||
for _, tmap := range aot {
|
||||
markDecodedRecursive(md, tmap)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if v, ok := rvi.(encoding.TextUnmarshaler); ok {
|
||||
return md.unifyText(data, v)
|
||||
|
|
@ -227,14 +265,6 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
|||
return md.unifyInt(data, rv)
|
||||
}
|
||||
switch k {
|
||||
case reflect.Ptr:
|
||||
elem := reflect.New(rv.Type().Elem())
|
||||
err := md.unify(data, reflect.Indirect(elem))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rv.Set(elem)
|
||||
return nil
|
||||
case reflect.Struct:
|
||||
return md.unifyStruct(data, rv)
|
||||
case reflect.Map:
|
||||
|
|
@ -258,14 +288,13 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
|||
return md.e("unsupported type %s", rv.Kind())
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
tmap, ok := mapping.(map[string]interface{})
|
||||
func (md *MetaData) unifyStruct(mapping any, rv reflect.Value) error {
|
||||
tmap, ok := mapping.(map[string]any)
|
||||
if !ok {
|
||||
if mapping == nil {
|
||||
return nil
|
||||
}
|
||||
return md.e("type mismatch for %s: expected table but found %T",
|
||||
rv.Type().String(), mapping)
|
||||
return md.e("type mismatch for %s: expected table but found %s", rv.Type().String(), fmtType(mapping))
|
||||
}
|
||||
|
||||
for key, datum := range tmap {
|
||||
|
|
@ -304,14 +333,14 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
|
||||
func (md *MetaData) unifyMap(mapping any, rv reflect.Value) error {
|
||||
keyType := rv.Type().Key().Kind()
|
||||
if keyType != reflect.String && keyType != reflect.Interface {
|
||||
return fmt.Errorf("toml: cannot decode to a map with non-string key type (%s in %q)",
|
||||
keyType, rv.Type())
|
||||
}
|
||||
|
||||
tmap, ok := mapping.(map[string]interface{})
|
||||
tmap, ok := mapping.(map[string]any)
|
||||
if !ok {
|
||||
if tmap == nil {
|
||||
return nil
|
||||
|
|
@ -347,7 +376,7 @@ func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
|
||||
func (md *MetaData) unifyArray(data any, rv reflect.Value) error {
|
||||
datav := reflect.ValueOf(data)
|
||||
if datav.Kind() != reflect.Slice {
|
||||
if !datav.IsValid() {
|
||||
|
|
@ -361,7 +390,7 @@ func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
|
|||
return md.unifySliceArray(datav, rv)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
|
||||
func (md *MetaData) unifySlice(data any, rv reflect.Value) error {
|
||||
datav := reflect.ValueOf(data)
|
||||
if datav.Kind() != reflect.Slice {
|
||||
if !datav.IsValid() {
|
||||
|
|
@ -388,7 +417,7 @@ func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
|
||||
func (md *MetaData) unifyString(data any, rv reflect.Value) error {
|
||||
_, ok := rv.Interface().(json.Number)
|
||||
if ok {
|
||||
if i, ok := data.(int64); ok {
|
||||
|
|
@ -408,7 +437,7 @@ func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
|
|||
return md.badtype("string", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
||||
func (md *MetaData) unifyFloat64(data any, rv reflect.Value) error {
|
||||
rvk := rv.Kind()
|
||||
|
||||
if num, ok := data.(float64); ok {
|
||||
|
|
@ -429,7 +458,7 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
|||
if num, ok := data.(int64); ok {
|
||||
if (rvk == reflect.Float32 && (num < -maxSafeFloat32Int || num > maxSafeFloat32Int)) ||
|
||||
(rvk == reflect.Float64 && (num < -maxSafeFloat64Int || num > maxSafeFloat64Int)) {
|
||||
return md.parseErr(errParseRange{i: num, size: rvk.String()})
|
||||
return md.parseErr(errUnsafeFloat{i: num, size: rvk.String()})
|
||||
}
|
||||
rv.SetFloat(float64(num))
|
||||
return nil
|
||||
|
|
@ -438,7 +467,7 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
|||
return md.badtype("float", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
|
||||
func (md *MetaData) unifyInt(data any, rv reflect.Value) error {
|
||||
_, ok := rv.Interface().(time.Duration)
|
||||
if ok {
|
||||
// Parse as string duration, and fall back to regular integer parsing
|
||||
|
|
@ -481,7 +510,7 @@ func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
|
||||
func (md *MetaData) unifyBool(data any, rv reflect.Value) error {
|
||||
if b, ok := data.(bool); ok {
|
||||
rv.SetBool(b)
|
||||
return nil
|
||||
|
|
@ -489,12 +518,12 @@ func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
|
|||
return md.badtype("boolean", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
|
||||
func (md *MetaData) unifyAnything(data any, rv reflect.Value) error {
|
||||
rv.Set(reflect.ValueOf(data))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error {
|
||||
func (md *MetaData) unifyText(data any, v encoding.TextUnmarshaler) error {
|
||||
var s string
|
||||
switch sdata := data.(type) {
|
||||
case Marshaler:
|
||||
|
|
@ -523,27 +552,29 @@ func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) erro
|
|||
return md.badtype("primitive (string-like)", data)
|
||||
}
|
||||
if err := v.UnmarshalText([]byte(s)); err != nil {
|
||||
return err
|
||||
return md.parseErr(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) badtype(dst string, data interface{}) error {
|
||||
return md.e("incompatible types: TOML value has type %T; destination has type %s", data, dst)
|
||||
func (md *MetaData) badtype(dst string, data any) error {
|
||||
return md.e("incompatible types: TOML value has type %s; destination has type %s", fmtType(data), dst)
|
||||
}
|
||||
|
||||
func (md *MetaData) parseErr(err error) error {
|
||||
k := md.context.String()
|
||||
d := string(md.data)
|
||||
return ParseError{
|
||||
LastKey: k,
|
||||
Position: md.keyInfo[k].pos,
|
||||
Line: md.keyInfo[k].pos.Line,
|
||||
Message: err.Error(),
|
||||
err: err,
|
||||
input: string(md.data),
|
||||
LastKey: k,
|
||||
Position: md.keyInfo[k].pos.withCol(d),
|
||||
Line: md.keyInfo[k].pos.Line,
|
||||
input: d,
|
||||
}
|
||||
}
|
||||
|
||||
func (md *MetaData) e(format string, args ...interface{}) error {
|
||||
func (md *MetaData) e(format string, args ...any) error {
|
||||
f := "toml: "
|
||||
if len(md.context) > 0 {
|
||||
f = fmt.Sprintf("toml: (last key %q): ", md.context)
|
||||
|
|
@ -556,7 +587,7 @@ func (md *MetaData) e(format string, args ...interface{}) error {
|
|||
}
|
||||
|
||||
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
|
||||
func rvalue(v interface{}) reflect.Value {
|
||||
func rvalue(v any) reflect.Value {
|
||||
return indirect(reflect.ValueOf(v))
|
||||
}
|
||||
|
||||
|
|
@ -600,3 +631,8 @@ func isUnifiable(rv reflect.Value) bool {
|
|||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// fmt %T with "interface {}" replaced with "any", which is far more readable.
|
||||
func fmtType(t any) string {
|
||||
return strings.ReplaceAll(fmt.Sprintf("%T", t), "interface {}", "any")
|
||||
}
|
||||
|
|
|
|||
19
vendor/github.com/BurntSushi/toml/decode_go116.go
generated
vendored
19
vendor/github.com/BurntSushi/toml/decode_go116.go
generated
vendored
|
|
@ -1,19 +0,0 @@
|
|||
//go:build go1.16
|
||||
// +build go1.16
|
||||
|
||||
package toml
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
)
|
||||
|
||||
// DecodeFS reads the contents of a file from [fs.FS] and decodes it with
|
||||
// [Decode].
|
||||
func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) {
|
||||
fp, err := fsys.Open(path)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
defer fp.Close()
|
||||
return NewDecoder(fp).Decode(v)
|
||||
}
|
||||
18
vendor/github.com/BurntSushi/toml/deprecated.go
generated
vendored
18
vendor/github.com/BurntSushi/toml/deprecated.go
generated
vendored
|
|
@ -15,15 +15,15 @@ type TextMarshaler encoding.TextMarshaler
|
|||
// Deprecated: use encoding.TextUnmarshaler
|
||||
type TextUnmarshaler encoding.TextUnmarshaler
|
||||
|
||||
// PrimitiveDecode is an alias for MetaData.PrimitiveDecode().
|
||||
//
|
||||
// Deprecated: use MetaData.PrimitiveDecode.
|
||||
func PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
md := MetaData{decoded: make(map[string]struct{})}
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// DecodeReader is an alias for NewDecoder(r).Decode(v).
|
||||
//
|
||||
// Deprecated: use NewDecoder(reader).Decode(&value).
|
||||
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) }
|
||||
func DecodeReader(r io.Reader, v any) (MetaData, error) { return NewDecoder(r).Decode(v) }
|
||||
|
||||
// PrimitiveDecode is an alias for MetaData.PrimitiveDecode().
|
||||
//
|
||||
// Deprecated: use MetaData.PrimitiveDecode.
|
||||
func PrimitiveDecode(primValue Primitive, v any) error {
|
||||
md := MetaData{decoded: make(map[string]struct{})}
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
|
|
|||
3
vendor/github.com/BurntSushi/toml/doc.go
generated
vendored
3
vendor/github.com/BurntSushi/toml/doc.go
generated
vendored
|
|
@ -2,9 +2,6 @@
|
|||
//
|
||||
// This package supports TOML v1.0.0, as specified at https://toml.io
|
||||
//
|
||||
// There is also support for delaying decoding with the Primitive type, and
|
||||
// querying the set of keys in a TOML document with the MetaData type.
|
||||
//
|
||||
// The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator,
|
||||
// and can be used to verify if TOML document is valid. It can also be used to
|
||||
// print the type of each key.
|
||||
|
|
|
|||
91
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
91
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
|
|
@ -2,6 +2,7 @@ package toml
|
|||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
|
@ -76,6 +77,17 @@ type Marshaler interface {
|
|||
MarshalTOML() ([]byte, error)
|
||||
}
|
||||
|
||||
// Marshal returns a TOML representation of the Go value.
|
||||
//
|
||||
// See [Encoder] for a description of the encoding process.
|
||||
func Marshal(v any) ([]byte, error) {
|
||||
buff := new(bytes.Buffer)
|
||||
if err := NewEncoder(buff).Encode(v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buff.Bytes(), nil
|
||||
}
|
||||
|
||||
// Encoder encodes a Go to a TOML document.
|
||||
//
|
||||
// The mapping between Go values and TOML values should be precisely the same as
|
||||
|
|
@ -115,26 +127,21 @@ type Marshaler interface {
|
|||
// NOTE: only exported keys are encoded due to the use of reflection. Unexported
|
||||
// keys are silently discarded.
|
||||
type Encoder struct {
|
||||
// String to use for a single indentation level; default is two spaces.
|
||||
Indent string
|
||||
|
||||
Indent string // string for a single indentation level; default is two spaces.
|
||||
hasWritten bool // written any output to w yet?
|
||||
w *bufio.Writer
|
||||
hasWritten bool // written any output to w yet?
|
||||
}
|
||||
|
||||
// NewEncoder create a new Encoder.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{
|
||||
w: bufio.NewWriter(w),
|
||||
Indent: " ",
|
||||
}
|
||||
return &Encoder{w: bufio.NewWriter(w), Indent: " "}
|
||||
}
|
||||
|
||||
// Encode writes a TOML representation of the Go value to the [Encoder]'s writer.
|
||||
//
|
||||
// An error is returned if the value given cannot be encoded to a valid TOML
|
||||
// document.
|
||||
func (enc *Encoder) Encode(v interface{}) error {
|
||||
func (enc *Encoder) Encode(v any) error {
|
||||
rv := eindirect(reflect.ValueOf(v))
|
||||
err := enc.safeEncode(Key([]string{}), rv)
|
||||
if err != nil {
|
||||
|
|
@ -280,18 +287,30 @@ func (enc *Encoder) eElement(rv reflect.Value) {
|
|||
case reflect.Float32:
|
||||
f := rv.Float()
|
||||
if math.IsNaN(f) {
|
||||
if math.Signbit(f) {
|
||||
enc.wf("-")
|
||||
}
|
||||
enc.wf("nan")
|
||||
} else if math.IsInf(f, 0) {
|
||||
enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
|
||||
if math.Signbit(f) {
|
||||
enc.wf("-")
|
||||
}
|
||||
enc.wf("inf")
|
||||
} else {
|
||||
enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32)))
|
||||
}
|
||||
case reflect.Float64:
|
||||
f := rv.Float()
|
||||
if math.IsNaN(f) {
|
||||
if math.Signbit(f) {
|
||||
enc.wf("-")
|
||||
}
|
||||
enc.wf("nan")
|
||||
} else if math.IsInf(f, 0) {
|
||||
enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
|
||||
if math.Signbit(f) {
|
||||
enc.wf("-")
|
||||
}
|
||||
enc.wf("inf")
|
||||
} else {
|
||||
enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64)))
|
||||
}
|
||||
|
|
@ -304,7 +323,7 @@ func (enc *Encoder) eElement(rv reflect.Value) {
|
|||
case reflect.Interface:
|
||||
enc.eElement(rv.Elem())
|
||||
default:
|
||||
encPanic(fmt.Errorf("unexpected type: %T", rv.Interface()))
|
||||
encPanic(fmt.Errorf("unexpected type: %s", fmtType(rv.Interface())))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -383,31 +402,30 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
|
|||
|
||||
// Sort keys so that we have deterministic output. And write keys directly
|
||||
// underneath this key first, before writing sub-structs or sub-maps.
|
||||
var mapKeysDirect, mapKeysSub []string
|
||||
var mapKeysDirect, mapKeysSub []reflect.Value
|
||||
for _, mapKey := range rv.MapKeys() {
|
||||
k := mapKey.String()
|
||||
if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) {
|
||||
mapKeysSub = append(mapKeysSub, k)
|
||||
mapKeysSub = append(mapKeysSub, mapKey)
|
||||
} else {
|
||||
mapKeysDirect = append(mapKeysDirect, k)
|
||||
mapKeysDirect = append(mapKeysDirect, mapKey)
|
||||
}
|
||||
}
|
||||
|
||||
var writeMapKeys = func(mapKeys []string, trailC bool) {
|
||||
sort.Strings(mapKeys)
|
||||
writeMapKeys := func(mapKeys []reflect.Value, trailC bool) {
|
||||
sort.Slice(mapKeys, func(i, j int) bool { return mapKeys[i].String() < mapKeys[j].String() })
|
||||
for i, mapKey := range mapKeys {
|
||||
val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey)))
|
||||
val := eindirect(rv.MapIndex(mapKey))
|
||||
if isNil(val) {
|
||||
continue
|
||||
}
|
||||
|
||||
if inline {
|
||||
enc.writeKeyValue(Key{mapKey}, val, true)
|
||||
enc.writeKeyValue(Key{mapKey.String()}, val, true)
|
||||
if trailC || i != len(mapKeys)-1 {
|
||||
enc.wf(", ")
|
||||
}
|
||||
} else {
|
||||
enc.encode(key.add(mapKey), val)
|
||||
enc.encode(key.add(mapKey.String()), val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -422,8 +440,6 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
|
|||
}
|
||||
}
|
||||
|
||||
const is32Bit = (32 << (^uint(0) >> 63)) == 32
|
||||
|
||||
func pointerTo(t reflect.Type) reflect.Type {
|
||||
if t.Kind() == reflect.Ptr {
|
||||
return pointerTo(t.Elem())
|
||||
|
|
@ -458,15 +474,14 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
|||
|
||||
frv := eindirect(rv.Field(i))
|
||||
|
||||
if is32Bit {
|
||||
// Copy so it works correct on 32bit archs; not clear why this
|
||||
// is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4
|
||||
// This also works fine on 64bit, but 32bit archs are somewhat
|
||||
// rare and this is a wee bit faster.
|
||||
copyStart := make([]int, len(start))
|
||||
copy(copyStart, start)
|
||||
start = copyStart
|
||||
}
|
||||
// Need to make a copy because ... ehm, I don't know why... I guess
|
||||
// allocating a new array can cause it to fail(?)
|
||||
//
|
||||
// Done for: https://github.com/BurntSushi/toml/issues/430
|
||||
// Previously only on 32bit for: https://github.com/BurntSushi/toml/issues/314
|
||||
copyStart := make([]int, len(start))
|
||||
copy(copyStart, start)
|
||||
start = copyStart
|
||||
|
||||
// Treat anonymous struct fields with tag names as though they are
|
||||
// not anonymous, like encoding/json does.
|
||||
|
|
@ -488,7 +503,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
|||
}
|
||||
addFields(rt, rv, nil)
|
||||
|
||||
writeFields := func(fields [][]int) {
|
||||
writeFields := func(fields [][]int, totalFields int) {
|
||||
for _, fieldIndex := range fields {
|
||||
fieldType := rt.FieldByIndex(fieldIndex)
|
||||
fieldVal := rv.FieldByIndex(fieldIndex)
|
||||
|
|
@ -518,7 +533,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
|||
|
||||
if inline {
|
||||
enc.writeKeyValue(Key{keyName}, fieldVal, true)
|
||||
if fieldIndex[0] != len(fields)-1 {
|
||||
if fieldIndex[0] != totalFields-1 {
|
||||
enc.wf(", ")
|
||||
}
|
||||
} else {
|
||||
|
|
@ -530,8 +545,10 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
|||
if inline {
|
||||
enc.wf("{")
|
||||
}
|
||||
writeFields(fieldsDirect)
|
||||
writeFields(fieldsSub)
|
||||
|
||||
l := len(fieldsDirect) + len(fieldsSub)
|
||||
writeFields(fieldsDirect, l)
|
||||
writeFields(fieldsSub, l)
|
||||
if inline {
|
||||
enc.wf("}")
|
||||
}
|
||||
|
|
@ -712,7 +729,7 @@ func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
|
|||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) wf(format string, v ...interface{}) {
|
||||
func (enc *Encoder) wf(format string, v ...any) {
|
||||
_, err := fmt.Fprintf(enc.w, format, v...)
|
||||
if err != nil {
|
||||
encPanic(err)
|
||||
|
|
|
|||
168
vendor/github.com/BurntSushi/toml/error.go
generated
vendored
168
vendor/github.com/BurntSushi/toml/error.go
generated
vendored
|
|
@ -67,21 +67,36 @@ type ParseError struct {
|
|||
// Position of an error.
|
||||
type Position struct {
|
||||
Line int // Line number, starting at 1.
|
||||
Col int // Error column, starting at 1.
|
||||
Start int // Start of error, as byte offset starting at 0.
|
||||
Len int // Lenght in bytes.
|
||||
Len int // Length of the error in bytes.
|
||||
}
|
||||
|
||||
func (p Position) withCol(tomlFile string) Position {
|
||||
var (
|
||||
pos int
|
||||
lines = strings.Split(tomlFile, "\n")
|
||||
)
|
||||
for i := range lines {
|
||||
ll := len(lines[i]) + 1 // +1 for the removed newline
|
||||
if pos+ll >= p.Start {
|
||||
p.Col = p.Start - pos + 1
|
||||
if p.Col < 1 { // Should never happen, but just in case.
|
||||
p.Col = 1
|
||||
}
|
||||
break
|
||||
}
|
||||
pos += ll
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func (pe ParseError) Error() string {
|
||||
msg := pe.Message
|
||||
if msg == "" { // Error from errorf()
|
||||
msg = pe.err.Error()
|
||||
}
|
||||
|
||||
if pe.LastKey == "" {
|
||||
return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg)
|
||||
return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, pe.Message)
|
||||
}
|
||||
return fmt.Sprintf("toml: line %d (last key %q): %s",
|
||||
pe.Position.Line, pe.LastKey, msg)
|
||||
pe.Position.Line, pe.LastKey, pe.Message)
|
||||
}
|
||||
|
||||
// ErrorWithPosition returns the error with detailed location context.
|
||||
|
|
@ -92,35 +107,37 @@ func (pe ParseError) ErrorWithPosition() string {
|
|||
return pe.Error()
|
||||
}
|
||||
|
||||
var (
|
||||
lines = strings.Split(pe.input, "\n")
|
||||
col = pe.column(lines)
|
||||
b = new(strings.Builder)
|
||||
)
|
||||
|
||||
msg := pe.Message
|
||||
if msg == "" {
|
||||
msg = pe.err.Error()
|
||||
}
|
||||
|
||||
// TODO: don't show control characters as literals? This may not show up
|
||||
// well everywhere.
|
||||
|
||||
var (
|
||||
lines = strings.Split(pe.input, "\n")
|
||||
b = new(strings.Builder)
|
||||
)
|
||||
if pe.Position.Len == 1 {
|
||||
fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n",
|
||||
msg, pe.Position.Line, col+1)
|
||||
pe.Message, pe.Position.Line, pe.Position.Col)
|
||||
} else {
|
||||
fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n",
|
||||
msg, pe.Position.Line, col, col+pe.Position.Len)
|
||||
pe.Message, pe.Position.Line, pe.Position.Col, pe.Position.Col+pe.Position.Len-1)
|
||||
}
|
||||
if pe.Position.Line > 2 {
|
||||
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, lines[pe.Position.Line-3])
|
||||
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, expandTab(lines[pe.Position.Line-3]))
|
||||
}
|
||||
if pe.Position.Line > 1 {
|
||||
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, lines[pe.Position.Line-2])
|
||||
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, expandTab(lines[pe.Position.Line-2]))
|
||||
}
|
||||
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, lines[pe.Position.Line-1])
|
||||
fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col), strings.Repeat("^", pe.Position.Len))
|
||||
|
||||
/// Expand tabs, so that the ^^^s are at the correct position, but leave
|
||||
/// "column 10-13" intact. Adjusting this to the visual column would be
|
||||
/// better, but we don't know the tabsize of the user in their editor, which
|
||||
/// can be 8, 4, 2, or something else. We can't know. So leaving it as the
|
||||
/// character index is probably the "most correct".
|
||||
expanded := expandTab(lines[pe.Position.Line-1])
|
||||
diff := len(expanded) - len(lines[pe.Position.Line-1])
|
||||
|
||||
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, expanded)
|
||||
fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", pe.Position.Col-1+diff), strings.Repeat("^", pe.Position.Len))
|
||||
return b.String()
|
||||
}
|
||||
|
||||
|
|
@ -142,34 +159,47 @@ func (pe ParseError) ErrorWithUsage() string {
|
|||
return m
|
||||
}
|
||||
|
||||
func (pe ParseError) column(lines []string) int {
|
||||
var pos, col int
|
||||
for i := range lines {
|
||||
ll := len(lines[i]) + 1 // +1 for the removed newline
|
||||
if pos+ll >= pe.Position.Start {
|
||||
col = pe.Position.Start - pos
|
||||
if col < 0 { // Should never happen, but just in case.
|
||||
col = 0
|
||||
func expandTab(s string) string {
|
||||
var (
|
||||
b strings.Builder
|
||||
l int
|
||||
fill = func(n int) string {
|
||||
b := make([]byte, n)
|
||||
for i := range b {
|
||||
b[i] = ' '
|
||||
}
|
||||
break
|
||||
return string(b)
|
||||
}
|
||||
)
|
||||
b.Grow(len(s))
|
||||
for _, r := range s {
|
||||
switch r {
|
||||
case '\t':
|
||||
tw := 8 - l%8
|
||||
b.WriteString(fill(tw))
|
||||
l += tw
|
||||
default:
|
||||
b.WriteRune(r)
|
||||
l += 1
|
||||
}
|
||||
pos += ll
|
||||
}
|
||||
|
||||
return col
|
||||
return b.String()
|
||||
}
|
||||
|
||||
type (
|
||||
errLexControl struct{ r rune }
|
||||
errLexEscape struct{ r rune }
|
||||
errLexUTF8 struct{ b byte }
|
||||
errLexInvalidNum struct{ v string }
|
||||
errLexInvalidDate struct{ v string }
|
||||
errParseDate struct{ v string }
|
||||
errLexInlineTableNL struct{}
|
||||
errLexStringNL struct{}
|
||||
errParseRange struct {
|
||||
i interface{} // int or float
|
||||
size string // "int64", "uint16", etc.
|
||||
i any // int or float
|
||||
size string // "int64", "uint16", etc.
|
||||
}
|
||||
errUnsafeFloat struct {
|
||||
i interface{} // float32 or float64
|
||||
size string // "float32" or "float64"
|
||||
}
|
||||
errParseDuration struct{ d string }
|
||||
)
|
||||
|
|
@ -183,18 +213,20 @@ func (e errLexEscape) Error() string { return fmt.Sprintf(`invalid escape
|
|||
func (e errLexEscape) Usage() string { return usageEscape }
|
||||
func (e errLexUTF8) Error() string { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) }
|
||||
func (e errLexUTF8) Usage() string { return "" }
|
||||
func (e errLexInvalidNum) Error() string { return fmt.Sprintf("invalid number: %q", e.v) }
|
||||
func (e errLexInvalidNum) Usage() string { return "" }
|
||||
func (e errLexInvalidDate) Error() string { return fmt.Sprintf("invalid date: %q", e.v) }
|
||||
func (e errLexInvalidDate) Usage() string { return "" }
|
||||
func (e errParseDate) Error() string { return fmt.Sprintf("invalid datetime: %q", e.v) }
|
||||
func (e errParseDate) Usage() string { return usageDate }
|
||||
func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" }
|
||||
func (e errLexInlineTableNL) Usage() string { return usageInlineNewline }
|
||||
func (e errLexStringNL) Error() string { return "strings cannot contain newlines" }
|
||||
func (e errLexStringNL) Usage() string { return usageStringNewline }
|
||||
func (e errParseRange) Error() string { return fmt.Sprintf("%v is out of range for %s", e.i, e.size) }
|
||||
func (e errParseRange) Usage() string { return usageIntOverflow }
|
||||
func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) }
|
||||
func (e errParseDuration) Usage() string { return usageDuration }
|
||||
func (e errUnsafeFloat) Error() string {
|
||||
return fmt.Sprintf("%v is out of the safe %s range", e.i, e.size)
|
||||
}
|
||||
func (e errUnsafeFloat) Usage() string { return usageUnsafeFloat }
|
||||
func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) }
|
||||
func (e errParseDuration) Usage() string { return usageDuration }
|
||||
|
||||
const usageEscape = `
|
||||
A '\' inside a "-delimited string is interpreted as an escape character.
|
||||
|
|
@ -251,19 +283,35 @@ bug in the program that uses too small of an integer.
|
|||
The maximum and minimum values are:
|
||||
|
||||
size │ lowest │ highest
|
||||
───────┼────────────────┼──────────
|
||||
───────┼────────────────┼──────────────
|
||||
int8 │ -128 │ 127
|
||||
int16 │ -32,768 │ 32,767
|
||||
int32 │ -2,147,483,648 │ 2,147,483,647
|
||||
int64 │ -9.2 × 10¹⁷ │ 9.2 × 10¹⁷
|
||||
uint8 │ 0 │ 255
|
||||
uint16 │ 0 │ 65535
|
||||
uint32 │ 0 │ 4294967295
|
||||
uint16 │ 0 │ 65,535
|
||||
uint32 │ 0 │ 4,294,967,295
|
||||
uint64 │ 0 │ 1.8 × 10¹⁸
|
||||
|
||||
int refers to int32 on 32-bit systems and int64 on 64-bit systems.
|
||||
`
|
||||
|
||||
const usageUnsafeFloat = `
|
||||
This number is outside of the "safe" range for floating point numbers; whole
|
||||
(non-fractional) numbers outside the below range can not always be represented
|
||||
accurately in a float, leading to some loss of accuracy.
|
||||
|
||||
Explicitly mark a number as a fractional unit by adding ".0", which will incur
|
||||
some loss of accuracy; for example:
|
||||
|
||||
f = 2_000_000_000.0
|
||||
|
||||
Accuracy ranges:
|
||||
|
||||
float32 = 16,777,215
|
||||
float64 = 9,007,199,254,740,991
|
||||
`
|
||||
|
||||
const usageDuration = `
|
||||
A duration must be as "number<unit>", without any spaces. Valid units are:
|
||||
|
||||
|
|
@ -277,3 +325,23 @@ A duration must be as "number<unit>", without any spaces. Valid units are:
|
|||
You can combine multiple units; for example "5m10s" for 5 minutes and 10
|
||||
seconds.
|
||||
`
|
||||
|
||||
const usageDate = `
|
||||
A TOML datetime must be in one of the following formats:
|
||||
|
||||
2006-01-02T15:04:05Z07:00 Date and time, with timezone.
|
||||
2006-01-02T15:04:05 Date and time, but without timezone.
|
||||
2006-01-02 Date without a time or timezone.
|
||||
15:04:05 Just a time, without any timezone.
|
||||
|
||||
Seconds may optionally have a fraction, up to nanosecond precision:
|
||||
|
||||
15:04:05.123
|
||||
15:04:05.856018510
|
||||
`
|
||||
|
||||
// TOML 1.1:
|
||||
// The seconds part in times is optional, and may be omitted:
|
||||
// 2006-01-02T15:04Z07:00
|
||||
// 2006-01-02T15:04
|
||||
// 15:04
|
||||
|
|
|
|||
83
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
83
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
|
|
@ -17,6 +17,7 @@ const (
|
|||
itemEOF
|
||||
itemText
|
||||
itemString
|
||||
itemStringEsc
|
||||
itemRawString
|
||||
itemMultilineString
|
||||
itemRawMultilineString
|
||||
|
|
@ -53,6 +54,7 @@ type lexer struct {
|
|||
state stateFn
|
||||
items chan item
|
||||
tomlNext bool
|
||||
esc bool
|
||||
|
||||
// Allow for backing up up to 4 runes. This is necessary because TOML
|
||||
// contains 3-rune tokens (""" and ''').
|
||||
|
|
@ -164,7 +166,7 @@ func (lx *lexer) next() (r rune) {
|
|||
}
|
||||
|
||||
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
|
||||
if r == utf8.RuneError {
|
||||
if r == utf8.RuneError && w == 1 {
|
||||
lx.error(errLexUTF8{lx.input[lx.pos]})
|
||||
return utf8.RuneError
|
||||
}
|
||||
|
|
@ -270,10 +272,12 @@ func (lx *lexer) errorPos(start, length int, err error) stateFn {
|
|||
}
|
||||
|
||||
// errorf is like error, and creates a new error.
|
||||
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
|
||||
func (lx *lexer) errorf(format string, values ...any) stateFn {
|
||||
if lx.atEOF {
|
||||
pos := lx.getPos()
|
||||
pos.Line--
|
||||
if lx.pos >= 1 && lx.input[lx.pos-1] == '\n' {
|
||||
pos.Line--
|
||||
}
|
||||
pos.Len = 1
|
||||
pos.Start = lx.pos - 1
|
||||
lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)}
|
||||
|
|
@ -333,9 +337,7 @@ func lexTopEnd(lx *lexer) stateFn {
|
|||
lx.emit(itemEOF)
|
||||
return nil
|
||||
}
|
||||
return lx.errorf(
|
||||
"expected a top-level item to end with a newline, comment, or EOF, but got %q instead",
|
||||
r)
|
||||
return lx.errorf("expected a top-level item to end with a newline, comment, or EOF, but got %q instead", r)
|
||||
}
|
||||
|
||||
// lexTable lexes the beginning of a table. Namely, it makes sure that
|
||||
|
|
@ -492,6 +494,9 @@ func lexKeyEnd(lx *lexer) stateFn {
|
|||
lx.emit(itemKeyEnd)
|
||||
return lexSkip(lx, lexValue)
|
||||
default:
|
||||
if r == '\n' {
|
||||
return lx.errorPrevLine(fmt.Errorf("expected '.' or '=', but got %q instead", r))
|
||||
}
|
||||
return lx.errorf("expected '.' or '=', but got %q instead", r)
|
||||
}
|
||||
}
|
||||
|
|
@ -560,6 +565,9 @@ func lexValue(lx *lexer) stateFn {
|
|||
if r == eof {
|
||||
return lx.errorf("unexpected EOF; expected value")
|
||||
}
|
||||
if r == '\n' {
|
||||
return lx.errorPrevLine(fmt.Errorf("expected value but found %q instead", r))
|
||||
}
|
||||
return lx.errorf("expected value but found %q instead", r)
|
||||
}
|
||||
|
||||
|
|
@ -698,7 +706,12 @@ func lexString(lx *lexer) stateFn {
|
|||
return lexStringEscape
|
||||
case r == '"':
|
||||
lx.backup()
|
||||
lx.emit(itemString)
|
||||
if lx.esc {
|
||||
lx.esc = false
|
||||
lx.emit(itemStringEsc)
|
||||
} else {
|
||||
lx.emit(itemString)
|
||||
}
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
|
|
@ -748,6 +761,7 @@ func lexMultilineString(lx *lexer) stateFn {
|
|||
lx.backup() /// backup: don't include the """ in the item.
|
||||
lx.backup()
|
||||
lx.backup()
|
||||
lx.esc = false
|
||||
lx.emit(itemMultilineString)
|
||||
lx.next() /// Read over ''' again and discard it.
|
||||
lx.next()
|
||||
|
|
@ -837,6 +851,7 @@ func lexMultilineStringEscape(lx *lexer) stateFn {
|
|||
}
|
||||
|
||||
func lexStringEscape(lx *lexer) stateFn {
|
||||
lx.esc = true
|
||||
r := lx.next()
|
||||
switch r {
|
||||
case 'e':
|
||||
|
|
@ -879,10 +894,8 @@ func lexHexEscape(lx *lexer) stateFn {
|
|||
var r rune
|
||||
for i := 0; i < 2; i++ {
|
||||
r = lx.next()
|
||||
if !isHexadecimal(r) {
|
||||
return lx.errorf(
|
||||
`expected two hexadecimal digits after '\x', but got %q instead`,
|
||||
lx.current())
|
||||
if !isHex(r) {
|
||||
return lx.errorf(`expected two hexadecimal digits after '\x', but got %q instead`, lx.current())
|
||||
}
|
||||
}
|
||||
return lx.pop()
|
||||
|
|
@ -892,10 +905,8 @@ func lexShortUnicodeEscape(lx *lexer) stateFn {
|
|||
var r rune
|
||||
for i := 0; i < 4; i++ {
|
||||
r = lx.next()
|
||||
if !isHexadecimal(r) {
|
||||
return lx.errorf(
|
||||
`expected four hexadecimal digits after '\u', but got %q instead`,
|
||||
lx.current())
|
||||
if !isHex(r) {
|
||||
return lx.errorf(`expected four hexadecimal digits after '\u', but got %q instead`, lx.current())
|
||||
}
|
||||
}
|
||||
return lx.pop()
|
||||
|
|
@ -905,10 +916,8 @@ func lexLongUnicodeEscape(lx *lexer) stateFn {
|
|||
var r rune
|
||||
for i := 0; i < 8; i++ {
|
||||
r = lx.next()
|
||||
if !isHexadecimal(r) {
|
||||
return lx.errorf(
|
||||
`expected eight hexadecimal digits after '\U', but got %q instead`,
|
||||
lx.current())
|
||||
if !isHex(r) {
|
||||
return lx.errorf(`expected eight hexadecimal digits after '\U', but got %q instead`, lx.current())
|
||||
}
|
||||
}
|
||||
return lx.pop()
|
||||
|
|
@ -975,7 +984,7 @@ func lexDatetime(lx *lexer) stateFn {
|
|||
// lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix.
|
||||
func lexHexInteger(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isHexadecimal(r) {
|
||||
if isHex(r) {
|
||||
return lexHexInteger
|
||||
}
|
||||
switch r {
|
||||
|
|
@ -1109,8 +1118,8 @@ func lexBaseNumberOrDate(lx *lexer) stateFn {
|
|||
return lexOctalInteger
|
||||
case 'x':
|
||||
r = lx.peek()
|
||||
if !isHexadecimal(r) {
|
||||
lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r)
|
||||
if !isHex(r) {
|
||||
lx.errorf("not a hexadecimal number: '%s%c'", lx.current(), r)
|
||||
}
|
||||
return lexHexInteger
|
||||
}
|
||||
|
|
@ -1207,7 +1216,7 @@ func (itype itemType) String() string {
|
|||
return "EOF"
|
||||
case itemText:
|
||||
return "Text"
|
||||
case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
|
||||
case itemString, itemStringEsc, itemRawString, itemMultilineString, itemRawMultilineString:
|
||||
return "String"
|
||||
case itemBool:
|
||||
return "Bool"
|
||||
|
|
@ -1240,7 +1249,7 @@ func (itype itemType) String() string {
|
|||
}
|
||||
|
||||
func (item item) String() string {
|
||||
return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
|
||||
return fmt.Sprintf("(%s, %s)", item.typ, item.val)
|
||||
}
|
||||
|
||||
func isWhitespace(r rune) bool { return r == '\t' || r == ' ' }
|
||||
|
|
@ -1256,28 +1265,8 @@ func isControl(r rune) bool { // Control characters except \t, \r, \n
|
|||
func isDigit(r rune) bool { return r >= '0' && r <= '9' }
|
||||
func isBinary(r rune) bool { return r == '0' || r == '1' }
|
||||
func isOctal(r rune) bool { return r >= '0' && r <= '7' }
|
||||
func isHexadecimal(r rune) bool {
|
||||
return (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F')
|
||||
}
|
||||
|
||||
func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') }
|
||||
func isBareKeyChar(r rune, tomlNext bool) bool {
|
||||
if tomlNext {
|
||||
return (r >= 'A' && r <= 'Z') ||
|
||||
(r >= 'a' && r <= 'z') ||
|
||||
(r >= '0' && r <= '9') ||
|
||||
r == '_' || r == '-' ||
|
||||
r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) ||
|
||||
(r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) ||
|
||||
(r >= 0x037f && r <= 0x1fff) ||
|
||||
(r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) ||
|
||||
(r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) ||
|
||||
(r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) ||
|
||||
(r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) ||
|
||||
(r >= 0x10000 && r <= 0xeffff)
|
||||
}
|
||||
|
||||
return (r >= 'A' && r <= 'Z') ||
|
||||
(r >= 'a' && r <= 'z') ||
|
||||
(r >= '0' && r <= '9') ||
|
||||
r == '_' || r == '-'
|
||||
return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') ||
|
||||
(r >= '0' && r <= '9') || r == '_' || r == '-'
|
||||
}
|
||||
|
|
|
|||
46
vendor/github.com/BurntSushi/toml/meta.go
generated
vendored
46
vendor/github.com/BurntSushi/toml/meta.go
generated
vendored
|
|
@ -13,7 +13,7 @@ type MetaData struct {
|
|||
context Key // Used only during decoding.
|
||||
|
||||
keyInfo map[string]keyInfo
|
||||
mapping map[string]interface{}
|
||||
mapping map[string]any
|
||||
keys []Key
|
||||
decoded map[string]struct{}
|
||||
data []byte // Input file; for errors.
|
||||
|
|
@ -31,12 +31,12 @@ func (md *MetaData) IsDefined(key ...string) bool {
|
|||
}
|
||||
|
||||
var (
|
||||
hash map[string]interface{}
|
||||
hash map[string]any
|
||||
ok bool
|
||||
hashOrVal interface{} = md.mapping
|
||||
hashOrVal any = md.mapping
|
||||
)
|
||||
for _, k := range key {
|
||||
if hash, ok = hashOrVal.(map[string]interface{}); !ok {
|
||||
if hash, ok = hashOrVal.(map[string]any); !ok {
|
||||
return false
|
||||
}
|
||||
if hashOrVal, ok = hash[k]; !ok {
|
||||
|
|
@ -94,28 +94,52 @@ func (md *MetaData) Undecoded() []Key {
|
|||
type Key []string
|
||||
|
||||
func (k Key) String() string {
|
||||
ss := make([]string, len(k))
|
||||
for i := range k {
|
||||
ss[i] = k.maybeQuoted(i)
|
||||
// This is called quite often, so it's a bit funky to make it faster.
|
||||
var b strings.Builder
|
||||
b.Grow(len(k) * 25)
|
||||
outer:
|
||||
for i, kk := range k {
|
||||
if i > 0 {
|
||||
b.WriteByte('.')
|
||||
}
|
||||
if kk == "" {
|
||||
b.WriteString(`""`)
|
||||
} else {
|
||||
for _, r := range kk {
|
||||
// "Inline" isBareKeyChar
|
||||
if !((r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' || r == '-') {
|
||||
b.WriteByte('"')
|
||||
b.WriteString(dblQuotedReplacer.Replace(kk))
|
||||
b.WriteByte('"')
|
||||
continue outer
|
||||
}
|
||||
}
|
||||
b.WriteString(kk)
|
||||
}
|
||||
}
|
||||
return strings.Join(ss, ".")
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (k Key) maybeQuoted(i int) string {
|
||||
if k[i] == "" {
|
||||
return `""`
|
||||
}
|
||||
for _, c := range k[i] {
|
||||
if !isBareKeyChar(c, false) {
|
||||
return `"` + dblQuotedReplacer.Replace(k[i]) + `"`
|
||||
for _, r := range k[i] {
|
||||
if (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' || r == '-' {
|
||||
continue
|
||||
}
|
||||
return `"` + dblQuotedReplacer.Replace(k[i]) + `"`
|
||||
}
|
||||
return k[i]
|
||||
}
|
||||
|
||||
// Like append(), but only increase the cap by 1.
|
||||
func (k Key) add(piece string) Key {
|
||||
newKey := make(Key, len(k)+1)
|
||||
copy(newKey, k)
|
||||
newKey[len(k)] = piece
|
||||
return newKey
|
||||
}
|
||||
|
||||
func (k Key) parent() Key { return k[:len(k)-1] } // all except the last piece.
|
||||
func (k Key) last() string { return k[len(k)-1] } // last piece of this key.
|
||||
|
|
|
|||
282
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
282
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
|
|
@ -2,6 +2,7 @@ package toml
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
|
@ -20,9 +21,9 @@ type parser struct {
|
|||
|
||||
ordered []Key // List of keys in the order that they appear in the TOML data.
|
||||
|
||||
keyInfo map[string]keyInfo // Map keyname → info about the TOML key.
|
||||
mapping map[string]interface{} // Map keyname → key value.
|
||||
implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names").
|
||||
keyInfo map[string]keyInfo // Map keyname → info about the TOML key.
|
||||
mapping map[string]any // Map keyname → key value.
|
||||
implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names").
|
||||
}
|
||||
|
||||
type keyInfo struct {
|
||||
|
|
@ -63,7 +64,7 @@ func parse(data string) (p *parser, err error) {
|
|||
if i := strings.IndexRune(data[:ex], 0); i > -1 {
|
||||
return nil, ParseError{
|
||||
Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8",
|
||||
Position: Position{Line: 1, Start: i, Len: 1},
|
||||
Position: Position{Line: 1, Col: 1, Start: i, Len: 1},
|
||||
Line: 1,
|
||||
input: data,
|
||||
}
|
||||
|
|
@ -71,7 +72,7 @@ func parse(data string) (p *parser, err error) {
|
|||
|
||||
p = &parser{
|
||||
keyInfo: make(map[string]keyInfo),
|
||||
mapping: make(map[string]interface{}),
|
||||
mapping: make(map[string]any),
|
||||
lx: lex(data, tomlNext),
|
||||
ordered: make([]Key, 0),
|
||||
implicits: make(map[string]struct{}),
|
||||
|
|
@ -90,26 +91,27 @@ func parse(data string) (p *parser, err error) {
|
|||
|
||||
func (p *parser) panicErr(it item, err error) {
|
||||
panic(ParseError{
|
||||
Message: err.Error(),
|
||||
err: err,
|
||||
Position: it.pos,
|
||||
Position: it.pos.withCol(p.lx.input),
|
||||
Line: it.pos.Len,
|
||||
LastKey: p.current(),
|
||||
})
|
||||
}
|
||||
|
||||
func (p *parser) panicItemf(it item, format string, v ...interface{}) {
|
||||
func (p *parser) panicItemf(it item, format string, v ...any) {
|
||||
panic(ParseError{
|
||||
Message: fmt.Sprintf(format, v...),
|
||||
Position: it.pos,
|
||||
Position: it.pos.withCol(p.lx.input),
|
||||
Line: it.pos.Len,
|
||||
LastKey: p.current(),
|
||||
})
|
||||
}
|
||||
|
||||
func (p *parser) panicf(format string, v ...interface{}) {
|
||||
func (p *parser) panicf(format string, v ...any) {
|
||||
panic(ParseError{
|
||||
Message: fmt.Sprintf(format, v...),
|
||||
Position: p.pos,
|
||||
Position: p.pos.withCol(p.lx.input),
|
||||
Line: p.pos.Line,
|
||||
LastKey: p.current(),
|
||||
})
|
||||
|
|
@ -121,10 +123,11 @@ func (p *parser) next() item {
|
|||
if it.typ == itemError {
|
||||
if it.err != nil {
|
||||
panic(ParseError{
|
||||
Position: it.pos,
|
||||
Message: it.err.Error(),
|
||||
err: it.err,
|
||||
Position: it.pos.withCol(p.lx.input),
|
||||
Line: it.pos.Line,
|
||||
LastKey: p.current(),
|
||||
err: it.err,
|
||||
})
|
||||
}
|
||||
|
||||
|
|
@ -139,7 +142,7 @@ func (p *parser) nextPos() item {
|
|||
return it
|
||||
}
|
||||
|
||||
func (p *parser) bug(format string, v ...interface{}) {
|
||||
func (p *parser) bug(format string, v ...any) {
|
||||
panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
|
||||
}
|
||||
|
||||
|
|
@ -194,11 +197,11 @@ func (p *parser) topLevel(item item) {
|
|||
p.assertEqual(itemKeyEnd, k.typ)
|
||||
|
||||
/// The current key is the last part.
|
||||
p.currentKey = key[len(key)-1]
|
||||
p.currentKey = key.last()
|
||||
|
||||
/// All the other parts (if any) are the context; need to set each part
|
||||
/// as implicit.
|
||||
context := key[:len(key)-1]
|
||||
context := key.parent()
|
||||
for i := range context {
|
||||
p.addImplicitContext(append(p.context, context[i:i+1]...))
|
||||
}
|
||||
|
|
@ -207,7 +210,8 @@ func (p *parser) topLevel(item item) {
|
|||
/// Set value.
|
||||
vItem := p.next()
|
||||
val, typ := p.value(vItem, false)
|
||||
p.set(p.currentKey, val, typ, vItem.pos)
|
||||
p.setValue(p.currentKey, val)
|
||||
p.setType(p.currentKey, typ, vItem.pos)
|
||||
|
||||
/// Remove the context we added (preserving any context from [tbl] lines).
|
||||
p.context = outerContext
|
||||
|
|
@ -222,7 +226,7 @@ func (p *parser) keyString(it item) string {
|
|||
switch it.typ {
|
||||
case itemText:
|
||||
return it.val
|
||||
case itemString, itemMultilineString,
|
||||
case itemString, itemStringEsc, itemMultilineString,
|
||||
itemRawString, itemRawMultilineString:
|
||||
s, _ := p.value(it, false)
|
||||
return s.(string)
|
||||
|
|
@ -239,9 +243,11 @@ var datetimeRepl = strings.NewReplacer(
|
|||
|
||||
// value translates an expected value from the lexer into a Go value wrapped
|
||||
// as an empty interface.
|
||||
func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) {
|
||||
func (p *parser) value(it item, parentIsArray bool) (any, tomlType) {
|
||||
switch it.typ {
|
||||
case itemString:
|
||||
return it.val, p.typeOfPrimitive(it)
|
||||
case itemStringEsc:
|
||||
return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it)
|
||||
case itemMultilineString:
|
||||
return p.replaceEscapes(it, p.stripEscapedNewlines(stripFirstNewline(it.val))), p.typeOfPrimitive(it)
|
||||
|
|
@ -274,7 +280,7 @@ func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) {
|
|||
panic("unreachable")
|
||||
}
|
||||
|
||||
func (p *parser) valueInteger(it item) (interface{}, tomlType) {
|
||||
func (p *parser) valueInteger(it item) (any, tomlType) {
|
||||
if !numUnderscoresOK(it.val) {
|
||||
p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val)
|
||||
}
|
||||
|
|
@ -298,7 +304,7 @@ func (p *parser) valueInteger(it item) (interface{}, tomlType) {
|
|||
return num, p.typeOfPrimitive(it)
|
||||
}
|
||||
|
||||
func (p *parser) valueFloat(it item) (interface{}, tomlType) {
|
||||
func (p *parser) valueFloat(it item) (any, tomlType) {
|
||||
parts := strings.FieldsFunc(it.val, func(r rune) bool {
|
||||
switch r {
|
||||
case '.', 'e', 'E':
|
||||
|
|
@ -322,7 +328,9 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) {
|
|||
p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val)
|
||||
}
|
||||
val := strings.Replace(it.val, "_", "", -1)
|
||||
if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does.
|
||||
signbit := false
|
||||
if val == "+nan" || val == "-nan" {
|
||||
signbit = val == "-nan"
|
||||
val = "nan"
|
||||
}
|
||||
num, err := strconv.ParseFloat(val, 64)
|
||||
|
|
@ -333,6 +341,9 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) {
|
|||
p.panicItemf(it, "Invalid float value: %q", it.val)
|
||||
}
|
||||
}
|
||||
if signbit {
|
||||
num = math.Copysign(num, -1)
|
||||
}
|
||||
return num, p.typeOfPrimitive(it)
|
||||
}
|
||||
|
||||
|
|
@ -352,7 +363,7 @@ var dtTypes = []struct {
|
|||
{"15:04", internal.LocalTime, true},
|
||||
}
|
||||
|
||||
func (p *parser) valueDatetime(it item) (interface{}, tomlType) {
|
||||
func (p *parser) valueDatetime(it item) (any, tomlType) {
|
||||
it.val = datetimeRepl.Replace(it.val)
|
||||
var (
|
||||
t time.Time
|
||||
|
|
@ -365,26 +376,44 @@ func (p *parser) valueDatetime(it item) (interface{}, tomlType) {
|
|||
}
|
||||
t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone)
|
||||
if err == nil {
|
||||
if missingLeadingZero(it.val, dt.fmt) {
|
||||
p.panicErr(it, errParseDate{it.val})
|
||||
}
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
p.panicItemf(it, "Invalid TOML Datetime: %q.", it.val)
|
||||
p.panicErr(it, errParseDate{it.val})
|
||||
}
|
||||
return t, p.typeOfPrimitive(it)
|
||||
}
|
||||
|
||||
func (p *parser) valueArray(it item) (interface{}, tomlType) {
|
||||
// Go's time.Parse() will accept numbers without a leading zero; there isn't any
|
||||
// way to require it. https://github.com/golang/go/issues/29911
|
||||
//
|
||||
// Depend on the fact that the separators (- and :) should always be at the same
|
||||
// location.
|
||||
func missingLeadingZero(d, l string) bool {
|
||||
for i, c := range []byte(l) {
|
||||
if c == '.' || c == 'Z' {
|
||||
return false
|
||||
}
|
||||
if (c < '0' || c > '9') && d[i] != c {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *parser) valueArray(it item) (any, tomlType) {
|
||||
p.setType(p.currentKey, tomlArray, it.pos)
|
||||
|
||||
var (
|
||||
types []tomlType
|
||||
|
||||
// Initialize to a non-nil empty slice. This makes it consistent with
|
||||
// how S = [] decodes into a non-nil slice inside something like struct
|
||||
// { S []string }. See #338
|
||||
array = []interface{}{}
|
||||
// Initialize to a non-nil slice to make it consistent with how S = []
|
||||
// decodes into a non-nil slice inside something like struct { S
|
||||
// []string }. See #338
|
||||
array = make([]any, 0, 2)
|
||||
)
|
||||
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
|
||||
if it.typ == itemCommentStart {
|
||||
|
|
@ -394,21 +423,20 @@ func (p *parser) valueArray(it item) (interface{}, tomlType) {
|
|||
|
||||
val, typ := p.value(it, true)
|
||||
array = append(array, val)
|
||||
types = append(types, typ)
|
||||
|
||||
// XXX: types isn't used here, we need it to record the accurate type
|
||||
// XXX: type isn't used here, we need it to record the accurate type
|
||||
// information.
|
||||
//
|
||||
// Not entirely sure how to best store this; could use "key[0]",
|
||||
// "key[1]" notation, or maybe store it on the Array type?
|
||||
_ = types
|
||||
_ = typ
|
||||
}
|
||||
return array, tomlArray
|
||||
}
|
||||
|
||||
func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) {
|
||||
func (p *parser) valueInlineTable(it item, parentIsArray bool) (any, tomlType) {
|
||||
var (
|
||||
hash = make(map[string]interface{})
|
||||
topHash = make(map[string]any)
|
||||
outerContext = p.context
|
||||
outerKey = p.currentKey
|
||||
)
|
||||
|
|
@ -436,11 +464,11 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom
|
|||
p.assertEqual(itemKeyEnd, k.typ)
|
||||
|
||||
/// The current key is the last part.
|
||||
p.currentKey = key[len(key)-1]
|
||||
p.currentKey = key.last()
|
||||
|
||||
/// All the other parts (if any) are the context; need to set each part
|
||||
/// as implicit.
|
||||
context := key[:len(key)-1]
|
||||
context := key.parent()
|
||||
for i := range context {
|
||||
p.addImplicitContext(append(p.context, context[i:i+1]...))
|
||||
}
|
||||
|
|
@ -448,7 +476,21 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom
|
|||
|
||||
/// Set the value.
|
||||
val, typ := p.value(p.next(), false)
|
||||
p.set(p.currentKey, val, typ, it.pos)
|
||||
p.setValue(p.currentKey, val)
|
||||
p.setType(p.currentKey, typ, it.pos)
|
||||
|
||||
hash := topHash
|
||||
for _, c := range context {
|
||||
h, ok := hash[c]
|
||||
if !ok {
|
||||
h = make(map[string]any)
|
||||
hash[c] = h
|
||||
}
|
||||
hash, ok = h.(map[string]any)
|
||||
if !ok {
|
||||
p.panicf("%q is not a table", p.context)
|
||||
}
|
||||
}
|
||||
hash[p.currentKey] = val
|
||||
|
||||
/// Restore context.
|
||||
|
|
@ -456,7 +498,7 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom
|
|||
}
|
||||
p.context = outerContext
|
||||
p.currentKey = outerKey
|
||||
return hash, tomlHash
|
||||
return topHash, tomlHash
|
||||
}
|
||||
|
||||
// numHasLeadingZero checks if this number has leading zeroes, allowing for '0',
|
||||
|
|
@ -486,9 +528,9 @@ func numUnderscoresOK(s string) bool {
|
|||
}
|
||||
}
|
||||
|
||||
// isHexadecimal is a superset of all the permissable characters
|
||||
// surrounding an underscore.
|
||||
accept = isHexadecimal(r)
|
||||
// isHex is a superset of all the permissible characters surrounding an
|
||||
// underscore.
|
||||
accept = isHex(r)
|
||||
}
|
||||
return accept
|
||||
}
|
||||
|
|
@ -511,21 +553,19 @@ func numPeriodsOK(s string) bool {
|
|||
// Establishing the context also makes sure that the key isn't a duplicate, and
|
||||
// will create implicit hashes automatically.
|
||||
func (p *parser) addContext(key Key, array bool) {
|
||||
var ok bool
|
||||
|
||||
// Always start at the top level and drill down for our context.
|
||||
/// Always start at the top level and drill down for our context.
|
||||
hashContext := p.mapping
|
||||
keyContext := make(Key, 0)
|
||||
keyContext := make(Key, 0, len(key)-1)
|
||||
|
||||
// We only need implicit hashes for key[0:-1]
|
||||
for _, k := range key[0 : len(key)-1] {
|
||||
_, ok = hashContext[k]
|
||||
/// We only need implicit hashes for the parents.
|
||||
for _, k := range key.parent() {
|
||||
_, ok := hashContext[k]
|
||||
keyContext = append(keyContext, k)
|
||||
|
||||
// No key? Make an implicit hash and move on.
|
||||
if !ok {
|
||||
p.addImplicit(keyContext)
|
||||
hashContext[k] = make(map[string]interface{})
|
||||
hashContext[k] = make(map[string]any)
|
||||
}
|
||||
|
||||
// If the hash context is actually an array of tables, then set
|
||||
|
|
@ -534,9 +574,9 @@ func (p *parser) addContext(key Key, array bool) {
|
|||
// Otherwise, it better be a table, since this MUST be a key group (by
|
||||
// virtue of it not being the last element in a key).
|
||||
switch t := hashContext[k].(type) {
|
||||
case []map[string]interface{}:
|
||||
case []map[string]any:
|
||||
hashContext = t[len(t)-1]
|
||||
case map[string]interface{}:
|
||||
case map[string]any:
|
||||
hashContext = t
|
||||
default:
|
||||
p.panicf("Key '%s' was already created as a hash.", keyContext)
|
||||
|
|
@ -547,39 +587,33 @@ func (p *parser) addContext(key Key, array bool) {
|
|||
if array {
|
||||
// If this is the first element for this array, then allocate a new
|
||||
// list of tables for it.
|
||||
k := key[len(key)-1]
|
||||
k := key.last()
|
||||
if _, ok := hashContext[k]; !ok {
|
||||
hashContext[k] = make([]map[string]interface{}, 0, 4)
|
||||
hashContext[k] = make([]map[string]any, 0, 4)
|
||||
}
|
||||
|
||||
// Add a new table. But make sure the key hasn't already been used
|
||||
// for something else.
|
||||
if hash, ok := hashContext[k].([]map[string]interface{}); ok {
|
||||
hashContext[k] = append(hash, make(map[string]interface{}))
|
||||
if hash, ok := hashContext[k].([]map[string]any); ok {
|
||||
hashContext[k] = append(hash, make(map[string]any))
|
||||
} else {
|
||||
p.panicf("Key '%s' was already created and cannot be used as an array.", key)
|
||||
}
|
||||
} else {
|
||||
p.setValue(key[len(key)-1], make(map[string]interface{}))
|
||||
p.setValue(key.last(), make(map[string]any))
|
||||
}
|
||||
p.context = append(p.context, key[len(key)-1])
|
||||
}
|
||||
|
||||
// set calls setValue and setType.
|
||||
func (p *parser) set(key string, val interface{}, typ tomlType, pos Position) {
|
||||
p.setValue(key, val)
|
||||
p.setType(key, typ, pos)
|
||||
p.context = append(p.context, key.last())
|
||||
}
|
||||
|
||||
// setValue sets the given key to the given value in the current context.
|
||||
// It will make sure that the key hasn't already been defined, account for
|
||||
// implicit key groups.
|
||||
func (p *parser) setValue(key string, value interface{}) {
|
||||
func (p *parser) setValue(key string, value any) {
|
||||
var (
|
||||
tmpHash interface{}
|
||||
tmpHash any
|
||||
ok bool
|
||||
hash = p.mapping
|
||||
keyContext Key
|
||||
keyContext = make(Key, 0, len(p.context)+1)
|
||||
)
|
||||
for _, k := range p.context {
|
||||
keyContext = append(keyContext, k)
|
||||
|
|
@ -587,11 +621,11 @@ func (p *parser) setValue(key string, value interface{}) {
|
|||
p.bug("Context for key '%s' has not been established.", keyContext)
|
||||
}
|
||||
switch t := tmpHash.(type) {
|
||||
case []map[string]interface{}:
|
||||
case []map[string]any:
|
||||
// The context is a table of hashes. Pick the most recent table
|
||||
// defined as the current hash.
|
||||
hash = t[len(t)-1]
|
||||
case map[string]interface{}:
|
||||
case map[string]any:
|
||||
hash = t
|
||||
default:
|
||||
p.panicf("Key '%s' has already been defined.", keyContext)
|
||||
|
|
@ -618,9 +652,8 @@ func (p *parser) setValue(key string, value interface{}) {
|
|||
p.removeImplicit(keyContext)
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, we have a concrete key trying to override a previous
|
||||
// key, which is *always* wrong.
|
||||
// Otherwise, we have a concrete key trying to override a previous key,
|
||||
// which is *always* wrong.
|
||||
p.panicf("Key '%s' has already been defined.", keyContext)
|
||||
}
|
||||
|
||||
|
|
@ -683,8 +716,11 @@ func stripFirstNewline(s string) string {
|
|||
// the next newline. After a line-ending backslash, all whitespace is removed
|
||||
// until the next non-whitespace character.
|
||||
func (p *parser) stripEscapedNewlines(s string) string {
|
||||
var b strings.Builder
|
||||
var i int
|
||||
var (
|
||||
b strings.Builder
|
||||
i int
|
||||
)
|
||||
b.Grow(len(s))
|
||||
for {
|
||||
ix := strings.Index(s[i:], `\`)
|
||||
if ix < 0 {
|
||||
|
|
@ -714,9 +750,8 @@ func (p *parser) stripEscapedNewlines(s string) string {
|
|||
continue
|
||||
}
|
||||
if !strings.Contains(s[i:j], "\n") {
|
||||
// This is not a line-ending backslash.
|
||||
// (It's a bad escape sequence, but we can let
|
||||
// replaceEscapes catch it.)
|
||||
// This is not a line-ending backslash. (It's a bad escape sequence,
|
||||
// but we can let replaceEscapes catch it.)
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
|
@ -727,79 +762,78 @@ func (p *parser) stripEscapedNewlines(s string) string {
|
|||
}
|
||||
|
||||
func (p *parser) replaceEscapes(it item, str string) string {
|
||||
replaced := make([]rune, 0, len(str))
|
||||
s := []byte(str)
|
||||
r := 0
|
||||
for r < len(s) {
|
||||
if s[r] != '\\' {
|
||||
c, size := utf8.DecodeRune(s[r:])
|
||||
r += size
|
||||
replaced = append(replaced, c)
|
||||
var (
|
||||
b strings.Builder
|
||||
skip = 0
|
||||
)
|
||||
b.Grow(len(str))
|
||||
for i, c := range str {
|
||||
if skip > 0 {
|
||||
skip--
|
||||
continue
|
||||
}
|
||||
r += 1
|
||||
if r >= len(s) {
|
||||
if c != '\\' {
|
||||
b.WriteRune(c)
|
||||
continue
|
||||
}
|
||||
|
||||
if i >= len(str) {
|
||||
p.bug("Escape sequence at end of string.")
|
||||
return ""
|
||||
}
|
||||
switch s[r] {
|
||||
switch str[i+1] {
|
||||
default:
|
||||
p.bug("Expected valid escape code after \\, but got %q.", s[r])
|
||||
p.bug("Expected valid escape code after \\, but got %q.", str[i+1])
|
||||
case ' ', '\t':
|
||||
p.panicItemf(it, "invalid escape: '\\%c'", s[r])
|
||||
p.panicItemf(it, "invalid escape: '\\%c'", str[i+1])
|
||||
case 'b':
|
||||
replaced = append(replaced, rune(0x0008))
|
||||
r += 1
|
||||
b.WriteByte(0x08)
|
||||
skip = 1
|
||||
case 't':
|
||||
replaced = append(replaced, rune(0x0009))
|
||||
r += 1
|
||||
b.WriteByte(0x09)
|
||||
skip = 1
|
||||
case 'n':
|
||||
replaced = append(replaced, rune(0x000A))
|
||||
r += 1
|
||||
b.WriteByte(0x0a)
|
||||
skip = 1
|
||||
case 'f':
|
||||
replaced = append(replaced, rune(0x000C))
|
||||
r += 1
|
||||
b.WriteByte(0x0c)
|
||||
skip = 1
|
||||
case 'r':
|
||||
replaced = append(replaced, rune(0x000D))
|
||||
r += 1
|
||||
b.WriteByte(0x0d)
|
||||
skip = 1
|
||||
case 'e':
|
||||
if p.tomlNext {
|
||||
replaced = append(replaced, rune(0x001B))
|
||||
r += 1
|
||||
b.WriteByte(0x1b)
|
||||
skip = 1
|
||||
}
|
||||
case '"':
|
||||
replaced = append(replaced, rune(0x0022))
|
||||
r += 1
|
||||
b.WriteByte(0x22)
|
||||
skip = 1
|
||||
case '\\':
|
||||
replaced = append(replaced, rune(0x005C))
|
||||
r += 1
|
||||
b.WriteByte(0x5c)
|
||||
skip = 1
|
||||
// The lexer guarantees the correct number of characters are present;
|
||||
// don't need to check here.
|
||||
case 'x':
|
||||
if p.tomlNext {
|
||||
escaped := p.asciiEscapeToUnicode(it, s[r+1:r+3])
|
||||
replaced = append(replaced, escaped)
|
||||
r += 3
|
||||
escaped := p.asciiEscapeToUnicode(it, str[i+2:i+4])
|
||||
b.WriteRune(escaped)
|
||||
skip = 3
|
||||
}
|
||||
case 'u':
|
||||
// At this point, we know we have a Unicode escape of the form
|
||||
// `uXXXX` at [r, r+5). (Because the lexer guarantees this
|
||||
// for us.)
|
||||
escaped := p.asciiEscapeToUnicode(it, s[r+1:r+5])
|
||||
replaced = append(replaced, escaped)
|
||||
r += 5
|
||||
escaped := p.asciiEscapeToUnicode(it, str[i+2:i+6])
|
||||
b.WriteRune(escaped)
|
||||
skip = 5
|
||||
case 'U':
|
||||
// At this point, we know we have a Unicode escape of the form
|
||||
// `uXXXX` at [r, r+9). (Because the lexer guarantees this
|
||||
// for us.)
|
||||
escaped := p.asciiEscapeToUnicode(it, s[r+1:r+9])
|
||||
replaced = append(replaced, escaped)
|
||||
r += 9
|
||||
escaped := p.asciiEscapeToUnicode(it, str[i+2:i+10])
|
||||
b.WriteRune(escaped)
|
||||
skip = 9
|
||||
}
|
||||
}
|
||||
return string(replaced)
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (p *parser) asciiEscapeToUnicode(it item, bs []byte) rune {
|
||||
s := string(bs)
|
||||
func (p *parser) asciiEscapeToUnicode(it item, s string) rune {
|
||||
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
|
||||
if err != nil {
|
||||
p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err)
|
||||
|
|
|
|||
8
vendor/github.com/BurntSushi/toml/type_fields.go
generated
vendored
8
vendor/github.com/BurntSushi/toml/type_fields.go
generated
vendored
|
|
@ -25,10 +25,8 @@ type field struct {
|
|||
// breaking ties with index sequence.
|
||||
type byName []field
|
||||
|
||||
func (x byName) Len() int { return len(x) }
|
||||
|
||||
func (x byName) Len() int { return len(x) }
|
||||
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byName) Less(i, j int) bool {
|
||||
if x[i].name != x[j].name {
|
||||
return x[i].name < x[j].name
|
||||
|
|
@ -45,10 +43,8 @@ func (x byName) Less(i, j int) bool {
|
|||
// byIndex sorts field by index sequence.
|
||||
type byIndex []field
|
||||
|
||||
func (x byIndex) Len() int { return len(x) }
|
||||
|
||||
func (x byIndex) Len() int { return len(x) }
|
||||
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byIndex) Less(i, j int) bool {
|
||||
for k, xik := range x[i].index {
|
||||
if k >= len(x[j].index) {
|
||||
|
|
|
|||
11
vendor/github.com/BurntSushi/toml/type_toml.go
generated
vendored
11
vendor/github.com/BurntSushi/toml/type_toml.go
generated
vendored
|
|
@ -22,13 +22,8 @@ func typeIsTable(t tomlType) bool {
|
|||
|
||||
type tomlBaseType string
|
||||
|
||||
func (btype tomlBaseType) typeString() string {
|
||||
return string(btype)
|
||||
}
|
||||
|
||||
func (btype tomlBaseType) String() string {
|
||||
return btype.typeString()
|
||||
}
|
||||
func (btype tomlBaseType) typeString() string { return string(btype) }
|
||||
func (btype tomlBaseType) String() string { return btype.typeString() }
|
||||
|
||||
var (
|
||||
tomlInteger tomlBaseType = "Integer"
|
||||
|
|
@ -54,7 +49,7 @@ func (p *parser) typeOfPrimitive(lexItem item) tomlType {
|
|||
return tomlFloat
|
||||
case itemDatetime:
|
||||
return tomlDatetime
|
||||
case itemString:
|
||||
case itemString, itemStringEsc:
|
||||
return tomlString
|
||||
case itemMultilineString:
|
||||
return tomlString
|
||||
|
|
|
|||
37
vendor/github.com/bradleyfalzon/ghinstallation/v2/.golangci.yml
generated
vendored
Normal file
37
vendor/github.com/bradleyfalzon/ghinstallation/v2/.golangci.yml
generated
vendored
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
version: "2"
|
||||
linters:
|
||||
default: none
|
||||
enable:
|
||||
- errcheck
|
||||
- gocritic
|
||||
- gocyclo
|
||||
- gosec
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
- promlinter
|
||||
- revive
|
||||
- staticcheck
|
||||
- unconvert
|
||||
- unused
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
formatters:
|
||||
enable:
|
||||
- gofmt
|
||||
- goimports
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
4
vendor/github.com/bradleyfalzon/ghinstallation/v2/appsTransport.go
generated
vendored
4
vendor/github.com/bradleyfalzon/ghinstallation/v2/appsTransport.go
generated
vendored
|
|
@ -4,8 +4,8 @@ import (
|
|||
"crypto/rsa"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
|
|
@ -30,7 +30,7 @@ type AppsTransport struct {
|
|||
|
||||
// NewAppsTransportKeyFromFile returns a AppsTransport using a private key from file.
|
||||
func NewAppsTransportKeyFromFile(tr http.RoundTripper, appID int64, privateKeyFile string) (*AppsTransport, error) {
|
||||
privateKey, err := ioutil.ReadFile(privateKeyFile)
|
||||
privateKey, err := os.ReadFile(privateKeyFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not read private key: %s", err)
|
||||
}
|
||||
|
|
|
|||
6
vendor/github.com/bradleyfalzon/ghinstallation/v2/transport.go
generated
vendored
6
vendor/github.com/bradleyfalzon/ghinstallation/v2/transport.go
generated
vendored
|
|
@ -7,13 +7,13 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-github/v60/github"
|
||||
"github.com/google/go-github/v71/github"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -73,7 +73,7 @@ var _ http.RoundTripper = &Transport{}
|
|||
|
||||
// NewKeyFromFile returns a Transport using a private key from file.
|
||||
func NewKeyFromFile(tr http.RoundTripper, appID, installationID int64, privateKeyFile string) (*Transport, error) {
|
||||
privateKey, err := ioutil.ReadFile(privateKeyFile)
|
||||
privateKey, err := os.ReadFile(privateKeyFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not read private key: %s", err)
|
||||
}
|
||||
|
|
|
|||
99
vendor/github.com/cloudbase/garm-provider-common/execution/common/commands.go
generated
vendored
Normal file
99
vendor/github.com/cloudbase/garm-provider-common/execution/common/commands.go
generated
vendored
Normal file
|
|
@ -0,0 +1,99 @@
|
|||
// Copyright 2023 Cloudbase Solutions SRL
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
// License for the specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
gErrors "github.com/cloudbase/garm-provider-common/errors"
|
||||
"github.com/cloudbase/garm-provider-common/params"
|
||||
"github.com/mattn/go-isatty"
|
||||
)
|
||||
|
||||
type ExecutionCommand string
|
||||
|
||||
const (
|
||||
CreateInstanceCommand ExecutionCommand = "CreateInstance"
|
||||
DeleteInstanceCommand ExecutionCommand = "DeleteInstance"
|
||||
GetInstanceCommand ExecutionCommand = "GetInstance"
|
||||
ListInstancesCommand ExecutionCommand = "ListInstances"
|
||||
StartInstanceCommand ExecutionCommand = "StartInstance"
|
||||
StopInstanceCommand ExecutionCommand = "StopInstance"
|
||||
RemoveAllInstancesCommand ExecutionCommand = "RemoveAllInstances"
|
||||
GetVersionCommand ExecutionCommand = "GetVersion"
|
||||
)
|
||||
|
||||
// V0.1.1 commands
|
||||
const (
|
||||
GetSupportedInterfaceVersionsCommand ExecutionCommand = "GetSupportedInterfaceVersions"
|
||||
ValidatePoolInfoCommand ExecutionCommand = "ValidatePoolInfo"
|
||||
GetConfigJSONSchemaCommand ExecutionCommand = "GetConfigJSONSchema"
|
||||
GetExtraSpecsJSONSchemaCommand ExecutionCommand = "GetExtraSpecsJSONSchema"
|
||||
)
|
||||
|
||||
const (
|
||||
// ExitCodeNotFound is an exit code that indicates a Not Found error
|
||||
ExitCodeNotFound int = 30
|
||||
// ExitCodeDuplicate is an exit code that indicates a duplicate error
|
||||
ExitCodeDuplicate int = 31
|
||||
)
|
||||
|
||||
func GetBoostrapParamsFromStdin(c ExecutionCommand) (params.BootstrapInstance, error) {
|
||||
var bootstrapParams params.BootstrapInstance
|
||||
if c == CreateInstanceCommand {
|
||||
if isatty.IsTerminal(os.Stdin.Fd()) || isatty.IsCygwinTerminal(os.Stdin.Fd()) {
|
||||
return params.BootstrapInstance{}, fmt.Errorf("%s requires data passed into stdin", CreateInstanceCommand)
|
||||
}
|
||||
|
||||
var data bytes.Buffer
|
||||
if _, err := io.Copy(&data, os.Stdin); err != nil {
|
||||
return params.BootstrapInstance{}, fmt.Errorf("failed to copy bootstrap params")
|
||||
}
|
||||
|
||||
if data.Len() == 0 {
|
||||
return params.BootstrapInstance{}, fmt.Errorf("%s requires data passed into stdin", CreateInstanceCommand)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(data.Bytes(), &bootstrapParams); err != nil {
|
||||
return params.BootstrapInstance{}, fmt.Errorf("failed to decode instance params: %w", err)
|
||||
}
|
||||
if bootstrapParams.ExtraSpecs == nil {
|
||||
// Initialize ExtraSpecs as an empty JSON object
|
||||
bootstrapParams.ExtraSpecs = json.RawMessage([]byte("{}"))
|
||||
}
|
||||
|
||||
return bootstrapParams, nil
|
||||
}
|
||||
|
||||
// If the command is not CreateInstance, we don't need to read from stdin
|
||||
return params.BootstrapInstance{}, nil
|
||||
}
|
||||
|
||||
func ResolveErrorToExitCode(err error) int {
|
||||
if err != nil {
|
||||
if errors.Is(err, gErrors.ErrNotFound) {
|
||||
return ExitCodeNotFound
|
||||
} else if errors.Is(err, gErrors.ErrDuplicateEntity) {
|
||||
return ExitCodeDuplicate
|
||||
}
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
|
@ -12,7 +12,7 @@
|
|||
// License for the specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package execution
|
||||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
|
@ -20,7 +20,7 @@ import (
|
|||
"github.com/cloudbase/garm-provider-common/params"
|
||||
)
|
||||
|
||||
// ExternalProvider defines an interface that external providers need to implement.
|
||||
// ExternalProvider defines a common interface that external providers need to implement.
|
||||
// This is very similar to the common.Provider interface, and was redefined here to
|
||||
// decouple it, in case it may diverge from native providers.
|
||||
type ExternalProvider interface {
|
||||
|
|
@ -38,4 +38,6 @@ type ExternalProvider interface {
|
|||
Stop(ctx context.Context, instance string, force bool) error
|
||||
// Start boots up an instance.
|
||||
Start(ctx context.Context, instance string) error
|
||||
// GetVersion returns the version of the provider.
|
||||
GetVersion(ctx context.Context) string
|
||||
}
|
||||
|
|
@ -12,16 +12,11 @@
|
|||
// License for the specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package execution
|
||||
|
||||
type ExecutionCommand string
|
||||
package common
|
||||
|
||||
const (
|
||||
CreateInstanceCommand ExecutionCommand = "CreateInstance"
|
||||
DeleteInstanceCommand ExecutionCommand = "DeleteInstance"
|
||||
GetInstanceCommand ExecutionCommand = "GetInstance"
|
||||
ListInstancesCommand ExecutionCommand = "ListInstances"
|
||||
StartInstanceCommand ExecutionCommand = "StartInstance"
|
||||
StopInstanceCommand ExecutionCommand = "StopInstance"
|
||||
RemoveAllInstancesCommand ExecutionCommand = "RemoveAllInstances"
|
||||
// Version v0.1.0
|
||||
Version010 = "v0.1.0"
|
||||
// Version v0.1.1
|
||||
Version011 = "v0.1.1"
|
||||
)
|
||||
204
vendor/github.com/cloudbase/garm-provider-common/execution/execution.go
generated
vendored
204
vendor/github.com/cloudbase/garm-provider-common/execution/execution.go
generated
vendored
|
|
@ -1,204 +0,0 @@
|
|||
// Copyright 2023 Cloudbase Solutions SRL
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
// License for the specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package execution
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
gErrors "github.com/cloudbase/garm-provider-common/errors"
|
||||
"github.com/cloudbase/garm-provider-common/params"
|
||||
|
||||
"github.com/mattn/go-isatty"
|
||||
)
|
||||
|
||||
const (
|
||||
// ExitCodeNotFound is an exit code that indicates a Not Found error
|
||||
ExitCodeNotFound int = 30
|
||||
// ExitCodeDuplicate is an exit code that indicates a duplicate error
|
||||
ExitCodeDuplicate int = 31
|
||||
)
|
||||
|
||||
func ResolveErrorToExitCode(err error) int {
|
||||
if err != nil {
|
||||
if errors.Is(err, gErrors.ErrNotFound) {
|
||||
return ExitCodeNotFound
|
||||
} else if errors.Is(err, gErrors.ErrDuplicateEntity) {
|
||||
return ExitCodeDuplicate
|
||||
}
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func GetEnvironment() (Environment, error) {
|
||||
env := Environment{
|
||||
Command: ExecutionCommand(os.Getenv("GARM_COMMAND")),
|
||||
ControllerID: os.Getenv("GARM_CONTROLLER_ID"),
|
||||
PoolID: os.Getenv("GARM_POOL_ID"),
|
||||
ProviderConfigFile: os.Getenv("GARM_PROVIDER_CONFIG_FILE"),
|
||||
InstanceID: os.Getenv("GARM_INSTANCE_ID"),
|
||||
}
|
||||
|
||||
// If this is a CreateInstance command, we need to get the bootstrap params
|
||||
// from stdin
|
||||
if env.Command == CreateInstanceCommand {
|
||||
if isatty.IsTerminal(os.Stdin.Fd()) || isatty.IsCygwinTerminal(os.Stdin.Fd()) {
|
||||
return Environment{}, fmt.Errorf("%s requires data passed into stdin", CreateInstanceCommand)
|
||||
}
|
||||
|
||||
var data bytes.Buffer
|
||||
if _, err := io.Copy(&data, os.Stdin); err != nil {
|
||||
return Environment{}, fmt.Errorf("failed to copy bootstrap params")
|
||||
}
|
||||
|
||||
if data.Len() == 0 {
|
||||
return Environment{}, fmt.Errorf("%s requires data passed into stdin", CreateInstanceCommand)
|
||||
}
|
||||
|
||||
var bootstrapParams params.BootstrapInstance
|
||||
if err := json.Unmarshal(data.Bytes(), &bootstrapParams); err != nil {
|
||||
return Environment{}, fmt.Errorf("failed to decode instance params: %w", err)
|
||||
}
|
||||
if bootstrapParams.ExtraSpecs == nil {
|
||||
// Initialize ExtraSpecs as an empty JSON object
|
||||
bootstrapParams.ExtraSpecs = json.RawMessage([]byte("{}"))
|
||||
}
|
||||
env.BootstrapParams = bootstrapParams
|
||||
}
|
||||
|
||||
if err := env.Validate(); err != nil {
|
||||
return Environment{}, fmt.Errorf("failed to validate execution environment: %w", err)
|
||||
}
|
||||
|
||||
return env, nil
|
||||
}
|
||||
|
||||
type Environment struct {
|
||||
Command ExecutionCommand
|
||||
ControllerID string
|
||||
PoolID string
|
||||
ProviderConfigFile string
|
||||
InstanceID string
|
||||
BootstrapParams params.BootstrapInstance
|
||||
}
|
||||
|
||||
func (e Environment) Validate() error {
|
||||
if e.Command == "" {
|
||||
return fmt.Errorf("missing GARM_COMMAND")
|
||||
}
|
||||
|
||||
if e.ProviderConfigFile == "" {
|
||||
return fmt.Errorf("missing GARM_PROVIDER_CONFIG_FILE")
|
||||
}
|
||||
|
||||
if _, err := os.Lstat(e.ProviderConfigFile); err != nil {
|
||||
return fmt.Errorf("error accessing config file: %w", err)
|
||||
}
|
||||
|
||||
if e.ControllerID == "" {
|
||||
return fmt.Errorf("missing GARM_CONTROLLER_ID")
|
||||
}
|
||||
|
||||
switch e.Command {
|
||||
case CreateInstanceCommand:
|
||||
if e.BootstrapParams.Name == "" {
|
||||
return fmt.Errorf("missing bootstrap params")
|
||||
}
|
||||
if e.ControllerID == "" {
|
||||
return fmt.Errorf("missing controller ID")
|
||||
}
|
||||
if e.PoolID == "" {
|
||||
return fmt.Errorf("missing pool ID")
|
||||
}
|
||||
case DeleteInstanceCommand, GetInstanceCommand,
|
||||
StartInstanceCommand, StopInstanceCommand:
|
||||
if e.InstanceID == "" {
|
||||
return fmt.Errorf("missing instance ID")
|
||||
}
|
||||
case ListInstancesCommand:
|
||||
if e.PoolID == "" {
|
||||
return fmt.Errorf("missing pool ID")
|
||||
}
|
||||
case RemoveAllInstancesCommand:
|
||||
if e.ControllerID == "" {
|
||||
return fmt.Errorf("missing controller ID")
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unknown GARM_COMMAND: %s", e.Command)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Run(ctx context.Context, provider ExternalProvider, env Environment) (string, error) {
|
||||
var ret string
|
||||
switch env.Command {
|
||||
case CreateInstanceCommand:
|
||||
instance, err := provider.CreateInstance(ctx, env.BootstrapParams)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create instance in provider: %w", err)
|
||||
}
|
||||
|
||||
asJs, err := json.Marshal(instance)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to marshal response: %w", err)
|
||||
}
|
||||
ret = string(asJs)
|
||||
case GetInstanceCommand:
|
||||
instance, err := provider.GetInstance(ctx, env.InstanceID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get instance from provider: %w", err)
|
||||
}
|
||||
asJs, err := json.Marshal(instance)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to marshal response: %w", err)
|
||||
}
|
||||
ret = string(asJs)
|
||||
case ListInstancesCommand:
|
||||
instances, err := provider.ListInstances(ctx, env.PoolID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to list instances from provider: %w", err)
|
||||
}
|
||||
asJs, err := json.Marshal(instances)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to marshal response: %w", err)
|
||||
}
|
||||
ret = string(asJs)
|
||||
case DeleteInstanceCommand:
|
||||
if err := provider.DeleteInstance(ctx, env.InstanceID); err != nil {
|
||||
return "", fmt.Errorf("failed to delete instance from provider: %w", err)
|
||||
}
|
||||
case RemoveAllInstancesCommand:
|
||||
if err := provider.RemoveAllInstances(ctx); err != nil {
|
||||
return "", fmt.Errorf("failed to destroy environment: %w", err)
|
||||
}
|
||||
case StartInstanceCommand:
|
||||
if err := provider.Start(ctx, env.InstanceID); err != nil {
|
||||
return "", fmt.Errorf("failed to start instance: %w", err)
|
||||
}
|
||||
case StopInstanceCommand:
|
||||
if err := provider.Stop(ctx, env.InstanceID, true); err != nil {
|
||||
return "", fmt.Errorf("failed to stop instance: %w", err)
|
||||
}
|
||||
default:
|
||||
return "", fmt.Errorf("invalid command: %s", env.Command)
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
1
vendor/github.com/go-logr/logr/README.md
generated
vendored
1
vendor/github.com/go-logr/logr/README.md
generated
vendored
|
|
@ -1,6 +1,7 @@
|
|||
# A minimal logging API for Go
|
||||
|
||||
[](https://pkg.go.dev/github.com/go-logr/logr)
|
||||
[](https://goreportcard.com/report/github.com/go-logr/logr)
|
||||
[](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr)
|
||||
|
||||
logr offers an(other) opinion on how Go programs and libraries can do logging
|
||||
|
|
|
|||
185
vendor/github.com/go-logr/logr/funcr/funcr.go
generated
vendored
185
vendor/github.com/go-logr/logr/funcr/funcr.go
generated
vendored
|
|
@ -236,15 +236,14 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
|
|||
// implementation. It should be constructed with NewFormatter. Some of
|
||||
// its methods directly implement logr.LogSink.
|
||||
type Formatter struct {
|
||||
outputFormat outputFormat
|
||||
prefix string
|
||||
values []any
|
||||
valuesStr string
|
||||
parentValuesStr string
|
||||
depth int
|
||||
opts *Options
|
||||
group string // for slog groups
|
||||
groupDepth int
|
||||
outputFormat outputFormat
|
||||
prefix string
|
||||
values []any
|
||||
valuesStr string
|
||||
depth int
|
||||
opts *Options
|
||||
groupName string // for slog groups
|
||||
groups []groupDef
|
||||
}
|
||||
|
||||
// outputFormat indicates which outputFormat to use.
|
||||
|
|
@ -257,6 +256,13 @@ const (
|
|||
outputJSON
|
||||
)
|
||||
|
||||
// groupDef represents a saved group. The values may be empty, but we don't
|
||||
// know if we need to render the group until the final record is rendered.
|
||||
type groupDef struct {
|
||||
name string
|
||||
values string
|
||||
}
|
||||
|
||||
// PseudoStruct is a list of key-value pairs that gets logged as a struct.
|
||||
type PseudoStruct []any
|
||||
|
||||
|
|
@ -264,76 +270,102 @@ type PseudoStruct []any
|
|||
func (f Formatter) render(builtins, args []any) string {
|
||||
// Empirically bytes.Buffer is faster than strings.Builder for this.
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte('{') // for the whole line
|
||||
buf.WriteByte('{') // for the whole record
|
||||
}
|
||||
|
||||
// Render builtins
|
||||
vals := builtins
|
||||
if hook := f.opts.RenderBuiltinsHook; hook != nil {
|
||||
vals = hook(f.sanitize(vals))
|
||||
}
|
||||
f.flatten(buf, vals, false, false) // keys are ours, no need to escape
|
||||
f.flatten(buf, vals, false) // keys are ours, no need to escape
|
||||
continuing := len(builtins) > 0
|
||||
|
||||
if f.parentValuesStr != "" {
|
||||
// Turn the inner-most group into a string
|
||||
argsStr := func() string {
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
|
||||
vals = args
|
||||
if hook := f.opts.RenderArgsHook; hook != nil {
|
||||
vals = hook(f.sanitize(vals))
|
||||
}
|
||||
f.flatten(buf, vals, true) // escape user-provided keys
|
||||
|
||||
return buf.String()
|
||||
}()
|
||||
|
||||
// Render the stack of groups from the inside out.
|
||||
bodyStr := f.renderGroup(f.groupName, f.valuesStr, argsStr)
|
||||
for i := len(f.groups) - 1; i >= 0; i-- {
|
||||
grp := &f.groups[i]
|
||||
if grp.values == "" && bodyStr == "" {
|
||||
// no contents, so we must elide the whole group
|
||||
continue
|
||||
}
|
||||
bodyStr = f.renderGroup(grp.name, grp.values, bodyStr)
|
||||
}
|
||||
|
||||
if bodyStr != "" {
|
||||
if continuing {
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
buf.WriteString(f.parentValuesStr)
|
||||
continuing = true
|
||||
}
|
||||
|
||||
groupDepth := f.groupDepth
|
||||
if f.group != "" {
|
||||
if f.valuesStr != "" || len(args) != 0 {
|
||||
if continuing {
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
|
||||
buf.WriteByte(f.colon())
|
||||
buf.WriteByte('{') // for the group
|
||||
continuing = false
|
||||
} else {
|
||||
// The group was empty
|
||||
groupDepth--
|
||||
}
|
||||
}
|
||||
|
||||
if f.valuesStr != "" {
|
||||
if continuing {
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
buf.WriteString(f.valuesStr)
|
||||
continuing = true
|
||||
}
|
||||
|
||||
vals = args
|
||||
if hook := f.opts.RenderArgsHook; hook != nil {
|
||||
vals = hook(f.sanitize(vals))
|
||||
}
|
||||
f.flatten(buf, vals, continuing, true) // escape user-provided keys
|
||||
|
||||
for i := 0; i < groupDepth; i++ {
|
||||
buf.WriteByte('}') // for the groups
|
||||
buf.WriteString(bodyStr)
|
||||
}
|
||||
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte('}') // for the whole line
|
||||
buf.WriteByte('}') // for the whole record
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// flatten renders a list of key-value pairs into a buffer. If continuing is
|
||||
// true, it assumes that the buffer has previous values and will emit a
|
||||
// separator (which depends on the output format) before the first pair it
|
||||
// writes. If escapeKeys is true, the keys are assumed to have
|
||||
// non-JSON-compatible characters in them and must be evaluated for escapes.
|
||||
// renderGroup returns a string representation of the named group with rendered
|
||||
// values and args. If the name is empty, this will return the values and args,
|
||||
// joined. If the name is not empty, this will return a single key-value pair,
|
||||
// where the value is a grouping of the values and args. If the values and
|
||||
// args are both empty, this will return an empty string, even if the name was
|
||||
// specified.
|
||||
func (f Formatter) renderGroup(name string, values string, args string) string {
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
|
||||
needClosingBrace := false
|
||||
if name != "" && (values != "" || args != "") {
|
||||
buf.WriteString(f.quoted(name, true)) // escape user-provided keys
|
||||
buf.WriteByte(f.colon())
|
||||
buf.WriteByte('{')
|
||||
needClosingBrace = true
|
||||
}
|
||||
|
||||
continuing := false
|
||||
if values != "" {
|
||||
buf.WriteString(values)
|
||||
continuing = true
|
||||
}
|
||||
|
||||
if args != "" {
|
||||
if continuing {
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
buf.WriteString(args)
|
||||
}
|
||||
|
||||
if needClosingBrace {
|
||||
buf.WriteByte('}')
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// flatten renders a list of key-value pairs into a buffer. If escapeKeys is
|
||||
// true, the keys are assumed to have non-JSON-compatible characters in them
|
||||
// and must be evaluated for escapes.
|
||||
//
|
||||
// This function returns a potentially modified version of kvList, which
|
||||
// ensures that there is a value for every key (adding a value if needed) and
|
||||
// that each key is a string (substituting a key if needed).
|
||||
func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any {
|
||||
func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, escapeKeys bool) []any {
|
||||
// This logic overlaps with sanitize() but saves one type-cast per key,
|
||||
// which can be measurable.
|
||||
if len(kvList)%2 != 0 {
|
||||
|
|
@ -354,7 +386,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
|
|||
}
|
||||
v := kvList[i+1]
|
||||
|
||||
if i > 0 || continuing {
|
||||
if i > 0 {
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte(f.comma())
|
||||
} else {
|
||||
|
|
@ -766,46 +798,17 @@ func (f Formatter) sanitize(kvList []any) []any {
|
|||
// startGroup opens a new group scope (basically a sub-struct), which locks all
|
||||
// the current saved values and starts them anew. This is needed to satisfy
|
||||
// slog.
|
||||
func (f *Formatter) startGroup(group string) {
|
||||
func (f *Formatter) startGroup(name string) {
|
||||
// Unnamed groups are just inlined.
|
||||
if group == "" {
|
||||
if name == "" {
|
||||
return
|
||||
}
|
||||
|
||||
// Any saved values can no longer be changed.
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
continuing := false
|
||||
|
||||
if f.parentValuesStr != "" {
|
||||
buf.WriteString(f.parentValuesStr)
|
||||
continuing = true
|
||||
}
|
||||
|
||||
if f.group != "" && f.valuesStr != "" {
|
||||
if continuing {
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
|
||||
buf.WriteByte(f.colon())
|
||||
buf.WriteByte('{') // for the group
|
||||
continuing = false
|
||||
}
|
||||
|
||||
if f.valuesStr != "" {
|
||||
if continuing {
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
buf.WriteString(f.valuesStr)
|
||||
}
|
||||
|
||||
// NOTE: We don't close the scope here - that's done later, when a log line
|
||||
// is actually rendered (because we have N scopes to close).
|
||||
|
||||
f.parentValuesStr = buf.String()
|
||||
n := len(f.groups)
|
||||
f.groups = append(f.groups[:n:n], groupDef{f.groupName, f.valuesStr})
|
||||
|
||||
// Start collecting new values.
|
||||
f.group = group
|
||||
f.groupDepth++
|
||||
f.groupName = name
|
||||
f.valuesStr = ""
|
||||
f.values = nil
|
||||
}
|
||||
|
|
@ -900,7 +903,7 @@ func (f *Formatter) AddValues(kvList []any) {
|
|||
|
||||
// Pre-render values, so we don't have to do it on each Info/Error call.
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
f.flatten(buf, vals, false, true) // escape user-provided keys
|
||||
f.flatten(buf, vals, true) // escape user-provided keys
|
||||
f.valuesStr = buf.String()
|
||||
}
|
||||
|
||||
|
|
|
|||
31
vendor/github.com/go-openapi/errors/.golangci.yml
generated
vendored
31
vendor/github.com/go-openapi/errors/.golangci.yml
generated
vendored
|
|
@ -1,12 +1,6 @@
|
|||
linters-settings:
|
||||
govet:
|
||||
check-shadowing: true
|
||||
golint:
|
||||
min-confidence: 0
|
||||
gocyclo:
|
||||
min-complexity: 45
|
||||
maligned:
|
||||
suggest-new: true
|
||||
dupl:
|
||||
threshold: 200
|
||||
goconst:
|
||||
|
|
@ -16,8 +10,6 @@ linters-settings:
|
|||
linters:
|
||||
enable-all: true
|
||||
disable:
|
||||
- errname # this repo doesn't follow the convention advised by this linter
|
||||
- maligned
|
||||
- unparam
|
||||
- lll
|
||||
- gochecknoinits
|
||||
|
|
@ -30,9 +22,6 @@ linters:
|
|||
- wrapcheck
|
||||
- testpackage
|
||||
- nlreturn
|
||||
- gomnd
|
||||
- exhaustivestruct
|
||||
- goerr113
|
||||
- errorlint
|
||||
- nestif
|
||||
- godot
|
||||
|
|
@ -40,7 +29,6 @@ linters:
|
|||
- paralleltest
|
||||
- tparallel
|
||||
- thelper
|
||||
- ifshort
|
||||
- exhaustruct
|
||||
- varnamelen
|
||||
- gci
|
||||
|
|
@ -53,10 +41,15 @@ linters:
|
|||
- forcetypeassert
|
||||
- cyclop
|
||||
# deprecated linters
|
||||
- deadcode
|
||||
- interfacer
|
||||
- scopelint
|
||||
- varcheck
|
||||
- structcheck
|
||||
- golint
|
||||
- nosnakecase
|
||||
#- deadcode
|
||||
#- interfacer
|
||||
#- scopelint
|
||||
#- varcheck
|
||||
#- structcheck
|
||||
#- golint
|
||||
#- nosnakecase
|
||||
#- maligned
|
||||
#- goerr113
|
||||
#- ifshort
|
||||
#- gomnd
|
||||
#- exhaustivestruct
|
||||
|
|
|
|||
2
vendor/github.com/go-openapi/errors/api.go
generated
vendored
2
vendor/github.com/go-openapi/errors/api.go
generated
vendored
|
|
@ -185,7 +185,7 @@ func ServeError(rw http.ResponseWriter, r *http.Request, err error) {
|
|||
}
|
||||
|
||||
func asHTTPCode(input int) int {
|
||||
if input >= 600 {
|
||||
if input >= maximumValidHTTPCode {
|
||||
return DefaultHTTPCode
|
||||
}
|
||||
return input
|
||||
|
|
|
|||
2
vendor/github.com/go-openapi/errors/headers.go
generated
vendored
2
vendor/github.com/go-openapi/errors/headers.go
generated
vendored
|
|
@ -21,7 +21,7 @@ import (
|
|||
)
|
||||
|
||||
// Validation represents a failure of a precondition
|
||||
type Validation struct {
|
||||
type Validation struct { //nolint: errname
|
||||
code int32
|
||||
Name string
|
||||
In string
|
||||
|
|
|
|||
2
vendor/github.com/go-openapi/errors/middleware.go
generated
vendored
2
vendor/github.com/go-openapi/errors/middleware.go
generated
vendored
|
|
@ -22,7 +22,7 @@ import (
|
|||
|
||||
// APIVerificationFailed is an error that contains all the missing info for a mismatched section
|
||||
// between the api registrations and the api spec
|
||||
type APIVerificationFailed struct {
|
||||
type APIVerificationFailed struct { //nolint: errname
|
||||
Section string `json:"section,omitempty"`
|
||||
MissingSpecification []string `json:"missingSpecification,omitempty"`
|
||||
MissingRegistration []string `json:"missingRegistration,omitempty"`
|
||||
|
|
|
|||
3
vendor/github.com/go-openapi/errors/parsing.go
generated
vendored
3
vendor/github.com/go-openapi/errors/parsing.go
generated
vendored
|
|
@ -17,6 +17,7 @@ package errors
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// ParseError represents a parsing error
|
||||
|
|
@ -68,7 +69,7 @@ func NewParseError(name, in, value string, reason error) *ParseError {
|
|||
msg = fmt.Sprintf(parseErrorTemplContent, name, in, value, reason)
|
||||
}
|
||||
return &ParseError{
|
||||
code: 400,
|
||||
code: http.StatusBadRequest,
|
||||
Name: name,
|
||||
In: in,
|
||||
Value: value,
|
||||
|
|
|
|||
110
vendor/github.com/go-openapi/errors/schema.go
generated
vendored
110
vendor/github.com/go-openapi/errors/schema.go
generated
vendored
|
|
@ -17,6 +17,7 @@ package errors
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
|
|
@ -32,12 +33,12 @@ const (
|
|||
patternFail = "%s in %s should match '%s'"
|
||||
enumFail = "%s in %s should be one of %v"
|
||||
multipleOfFail = "%s in %s should be a multiple of %v"
|
||||
maxIncFail = "%s in %s should be less than or equal to %v"
|
||||
maxExcFail = "%s in %s should be less than %v"
|
||||
maximumIncFail = "%s in %s should be less than or equal to %v"
|
||||
maximumExcFail = "%s in %s should be less than %v"
|
||||
minIncFail = "%s in %s should be greater than or equal to %v"
|
||||
minExcFail = "%s in %s should be greater than %v"
|
||||
uniqueFail = "%s in %s shouldn't contain duplicates"
|
||||
maxItemsFail = "%s in %s should have at most %d items"
|
||||
maximumItemsFail = "%s in %s should have at most %d items"
|
||||
minItemsFail = "%s in %s should have at least %d items"
|
||||
typeFailNoIn = "%s must be of type %s"
|
||||
typeFailWithDataNoIn = "%s must be of type %s: %q"
|
||||
|
|
@ -49,12 +50,12 @@ const (
|
|||
patternFailNoIn = "%s should match '%s'"
|
||||
enumFailNoIn = "%s should be one of %v"
|
||||
multipleOfFailNoIn = "%s should be a multiple of %v"
|
||||
maxIncFailNoIn = "%s should be less than or equal to %v"
|
||||
maxExcFailNoIn = "%s should be less than %v"
|
||||
maximumIncFailNoIn = "%s should be less than or equal to %v"
|
||||
maximumExcFailNoIn = "%s should be less than %v"
|
||||
minIncFailNoIn = "%s should be greater than or equal to %v"
|
||||
minExcFailNoIn = "%s should be greater than %v"
|
||||
uniqueFailNoIn = "%s shouldn't contain duplicates"
|
||||
maxItemsFailNoIn = "%s should have at most %d items"
|
||||
maximumItemsFailNoIn = "%s should have at most %d items"
|
||||
minItemsFailNoIn = "%s should have at least %d items"
|
||||
noAdditionalItems = "%s in %s can't have additional items"
|
||||
noAdditionalItemsNoIn = "%s can't have additional items"
|
||||
|
|
@ -69,14 +70,17 @@ const (
|
|||
multipleOfMustBePositive = "factor MultipleOf declared for %s must be positive: %v"
|
||||
)
|
||||
|
||||
const maximumValidHTTPCode = 600
|
||||
|
||||
// All code responses can be used to differentiate errors for different handling
|
||||
// by the consuming program
|
||||
const (
|
||||
// CompositeErrorCode remains 422 for backwards-compatibility
|
||||
// and to separate it from validation errors with cause
|
||||
CompositeErrorCode = 422
|
||||
CompositeErrorCode = http.StatusUnprocessableEntity
|
||||
|
||||
// InvalidTypeCode is used for any subclass of invalid types
|
||||
InvalidTypeCode = 600 + iota
|
||||
InvalidTypeCode = maximumValidHTTPCode + iota
|
||||
RequiredFailCode
|
||||
TooLongFailCode
|
||||
TooShortFailCode
|
||||
|
|
@ -298,10 +302,10 @@ func DuplicateItems(name, in string) *Validation {
|
|||
}
|
||||
|
||||
// TooManyItems error for when an array contains too many items
|
||||
func TooManyItems(name, in string, max int64, value interface{}) *Validation {
|
||||
msg := fmt.Sprintf(maxItemsFail, name, in, max)
|
||||
func TooManyItems(name, in string, maximum int64, value interface{}) *Validation {
|
||||
msg := fmt.Sprintf(maximumItemsFail, name, in, maximum)
|
||||
if in == "" {
|
||||
msg = fmt.Sprintf(maxItemsFailNoIn, name, max)
|
||||
msg = fmt.Sprintf(maximumItemsFailNoIn, name, maximum)
|
||||
}
|
||||
|
||||
return &Validation{
|
||||
|
|
@ -314,10 +318,10 @@ func TooManyItems(name, in string, max int64, value interface{}) *Validation {
|
|||
}
|
||||
|
||||
// TooFewItems error for when an array contains too few items
|
||||
func TooFewItems(name, in string, min int64, value interface{}) *Validation {
|
||||
msg := fmt.Sprintf(minItemsFail, name, in, min)
|
||||
func TooFewItems(name, in string, minimum int64, value interface{}) *Validation {
|
||||
msg := fmt.Sprintf(minItemsFail, name, in, minimum)
|
||||
if in == "" {
|
||||
msg = fmt.Sprintf(minItemsFailNoIn, name, min)
|
||||
msg = fmt.Sprintf(minItemsFailNoIn, name, minimum)
|
||||
}
|
||||
return &Validation{
|
||||
code: MinItemsFailCode,
|
||||
|
|
@ -328,21 +332,21 @@ func TooFewItems(name, in string, min int64, value interface{}) *Validation {
|
|||
}
|
||||
}
|
||||
|
||||
// ExceedsMaximumInt error for when maximum validation fails
|
||||
func ExceedsMaximumInt(name, in string, max int64, exclusive bool, value interface{}) *Validation {
|
||||
// ExceedsMaximumInt error for when maximumimum validation fails
|
||||
func ExceedsMaximumInt(name, in string, maximum int64, exclusive bool, value interface{}) *Validation {
|
||||
var message string
|
||||
if in == "" {
|
||||
m := maxIncFailNoIn
|
||||
m := maximumIncFailNoIn
|
||||
if exclusive {
|
||||
m = maxExcFailNoIn
|
||||
m = maximumExcFailNoIn
|
||||
}
|
||||
message = fmt.Sprintf(m, name, max)
|
||||
message = fmt.Sprintf(m, name, maximum)
|
||||
} else {
|
||||
m := maxIncFail
|
||||
m := maximumIncFail
|
||||
if exclusive {
|
||||
m = maxExcFail
|
||||
m = maximumExcFail
|
||||
}
|
||||
message = fmt.Sprintf(m, name, in, max)
|
||||
message = fmt.Sprintf(m, name, in, maximum)
|
||||
}
|
||||
return &Validation{
|
||||
code: MaxFailCode,
|
||||
|
|
@ -353,21 +357,21 @@ func ExceedsMaximumInt(name, in string, max int64, exclusive bool, value interfa
|
|||
}
|
||||
}
|
||||
|
||||
// ExceedsMaximumUint error for when maximum validation fails
|
||||
func ExceedsMaximumUint(name, in string, max uint64, exclusive bool, value interface{}) *Validation {
|
||||
// ExceedsMaximumUint error for when maximumimum validation fails
|
||||
func ExceedsMaximumUint(name, in string, maximum uint64, exclusive bool, value interface{}) *Validation {
|
||||
var message string
|
||||
if in == "" {
|
||||
m := maxIncFailNoIn
|
||||
m := maximumIncFailNoIn
|
||||
if exclusive {
|
||||
m = maxExcFailNoIn
|
||||
m = maximumExcFailNoIn
|
||||
}
|
||||
message = fmt.Sprintf(m, name, max)
|
||||
message = fmt.Sprintf(m, name, maximum)
|
||||
} else {
|
||||
m := maxIncFail
|
||||
m := maximumIncFail
|
||||
if exclusive {
|
||||
m = maxExcFail
|
||||
m = maximumExcFail
|
||||
}
|
||||
message = fmt.Sprintf(m, name, in, max)
|
||||
message = fmt.Sprintf(m, name, in, maximum)
|
||||
}
|
||||
return &Validation{
|
||||
code: MaxFailCode,
|
||||
|
|
@ -378,21 +382,21 @@ func ExceedsMaximumUint(name, in string, max uint64, exclusive bool, value inter
|
|||
}
|
||||
}
|
||||
|
||||
// ExceedsMaximum error for when maximum validation fails
|
||||
func ExceedsMaximum(name, in string, max float64, exclusive bool, value interface{}) *Validation {
|
||||
// ExceedsMaximum error for when maximumimum validation fails
|
||||
func ExceedsMaximum(name, in string, maximum float64, exclusive bool, value interface{}) *Validation {
|
||||
var message string
|
||||
if in == "" {
|
||||
m := maxIncFailNoIn
|
||||
m := maximumIncFailNoIn
|
||||
if exclusive {
|
||||
m = maxExcFailNoIn
|
||||
m = maximumExcFailNoIn
|
||||
}
|
||||
message = fmt.Sprintf(m, name, max)
|
||||
message = fmt.Sprintf(m, name, maximum)
|
||||
} else {
|
||||
m := maxIncFail
|
||||
m := maximumIncFail
|
||||
if exclusive {
|
||||
m = maxExcFail
|
||||
m = maximumExcFail
|
||||
}
|
||||
message = fmt.Sprintf(m, name, in, max)
|
||||
message = fmt.Sprintf(m, name, in, maximum)
|
||||
}
|
||||
return &Validation{
|
||||
code: MaxFailCode,
|
||||
|
|
@ -404,20 +408,20 @@ func ExceedsMaximum(name, in string, max float64, exclusive bool, value interfac
|
|||
}
|
||||
|
||||
// ExceedsMinimumInt error for when minimum validation fails
|
||||
func ExceedsMinimumInt(name, in string, min int64, exclusive bool, value interface{}) *Validation {
|
||||
func ExceedsMinimumInt(name, in string, minimum int64, exclusive bool, value interface{}) *Validation {
|
||||
var message string
|
||||
if in == "" {
|
||||
m := minIncFailNoIn
|
||||
if exclusive {
|
||||
m = minExcFailNoIn
|
||||
}
|
||||
message = fmt.Sprintf(m, name, min)
|
||||
message = fmt.Sprintf(m, name, minimum)
|
||||
} else {
|
||||
m := minIncFail
|
||||
if exclusive {
|
||||
m = minExcFail
|
||||
}
|
||||
message = fmt.Sprintf(m, name, in, min)
|
||||
message = fmt.Sprintf(m, name, in, minimum)
|
||||
}
|
||||
return &Validation{
|
||||
code: MinFailCode,
|
||||
|
|
@ -429,20 +433,20 @@ func ExceedsMinimumInt(name, in string, min int64, exclusive bool, value interfa
|
|||
}
|
||||
|
||||
// ExceedsMinimumUint error for when minimum validation fails
|
||||
func ExceedsMinimumUint(name, in string, min uint64, exclusive bool, value interface{}) *Validation {
|
||||
func ExceedsMinimumUint(name, in string, minimum uint64, exclusive bool, value interface{}) *Validation {
|
||||
var message string
|
||||
if in == "" {
|
||||
m := minIncFailNoIn
|
||||
if exclusive {
|
||||
m = minExcFailNoIn
|
||||
}
|
||||
message = fmt.Sprintf(m, name, min)
|
||||
message = fmt.Sprintf(m, name, minimum)
|
||||
} else {
|
||||
m := minIncFail
|
||||
if exclusive {
|
||||
m = minExcFail
|
||||
}
|
||||
message = fmt.Sprintf(m, name, in, min)
|
||||
message = fmt.Sprintf(m, name, in, minimum)
|
||||
}
|
||||
return &Validation{
|
||||
code: MinFailCode,
|
||||
|
|
@ -454,20 +458,20 @@ func ExceedsMinimumUint(name, in string, min uint64, exclusive bool, value inter
|
|||
}
|
||||
|
||||
// ExceedsMinimum error for when minimum validation fails
|
||||
func ExceedsMinimum(name, in string, min float64, exclusive bool, value interface{}) *Validation {
|
||||
func ExceedsMinimum(name, in string, minimum float64, exclusive bool, value interface{}) *Validation {
|
||||
var message string
|
||||
if in == "" {
|
||||
m := minIncFailNoIn
|
||||
if exclusive {
|
||||
m = minExcFailNoIn
|
||||
}
|
||||
message = fmt.Sprintf(m, name, min)
|
||||
message = fmt.Sprintf(m, name, minimum)
|
||||
} else {
|
||||
m := minIncFail
|
||||
if exclusive {
|
||||
m = minExcFail
|
||||
}
|
||||
message = fmt.Sprintf(m, name, in, min)
|
||||
message = fmt.Sprintf(m, name, in, minimum)
|
||||
}
|
||||
return &Validation{
|
||||
code: MinFailCode,
|
||||
|
|
@ -549,12 +553,12 @@ func ReadOnly(name, in string, value interface{}) *Validation {
|
|||
}
|
||||
|
||||
// TooLong error for when a string is too long
|
||||
func TooLong(name, in string, max int64, value interface{}) *Validation {
|
||||
func TooLong(name, in string, maximum int64, value interface{}) *Validation {
|
||||
var msg string
|
||||
if in == "" {
|
||||
msg = fmt.Sprintf(tooLongMessageNoIn, name, max)
|
||||
msg = fmt.Sprintf(tooLongMessageNoIn, name, maximum)
|
||||
} else {
|
||||
msg = fmt.Sprintf(tooLongMessage, name, in, max)
|
||||
msg = fmt.Sprintf(tooLongMessage, name, in, maximum)
|
||||
}
|
||||
return &Validation{
|
||||
code: TooLongFailCode,
|
||||
|
|
@ -566,12 +570,12 @@ func TooLong(name, in string, max int64, value interface{}) *Validation {
|
|||
}
|
||||
|
||||
// TooShort error for when a string is too short
|
||||
func TooShort(name, in string, min int64, value interface{}) *Validation {
|
||||
func TooShort(name, in string, minimum int64, value interface{}) *Validation {
|
||||
var msg string
|
||||
if in == "" {
|
||||
msg = fmt.Sprintf(tooShortMessageNoIn, name, min)
|
||||
msg = fmt.Sprintf(tooShortMessageNoIn, name, minimum)
|
||||
} else {
|
||||
msg = fmt.Sprintf(tooShortMessage, name, in, min)
|
||||
msg = fmt.Sprintf(tooShortMessage, name, in, minimum)
|
||||
}
|
||||
|
||||
return &Validation{
|
||||
|
|
|
|||
31
vendor/github.com/go-openapi/jsonpointer/.golangci.yml
generated
vendored
31
vendor/github.com/go-openapi/jsonpointer/.golangci.yml
generated
vendored
|
|
@ -1,12 +1,6 @@
|
|||
linters-settings:
|
||||
govet:
|
||||
check-shadowing: true
|
||||
golint:
|
||||
min-confidence: 0
|
||||
gocyclo:
|
||||
min-complexity: 45
|
||||
maligned:
|
||||
suggest-new: true
|
||||
dupl:
|
||||
threshold: 200
|
||||
goconst:
|
||||
|
|
@ -16,7 +10,7 @@ linters-settings:
|
|||
linters:
|
||||
enable-all: true
|
||||
disable:
|
||||
- maligned
|
||||
- recvcheck
|
||||
- unparam
|
||||
- lll
|
||||
- gochecknoinits
|
||||
|
|
@ -29,9 +23,6 @@ linters:
|
|||
- wrapcheck
|
||||
- testpackage
|
||||
- nlreturn
|
||||
- gomnd
|
||||
- exhaustivestruct
|
||||
- goerr113
|
||||
- errorlint
|
||||
- nestif
|
||||
- godot
|
||||
|
|
@ -39,7 +30,6 @@ linters:
|
|||
- paralleltest
|
||||
- tparallel
|
||||
- thelper
|
||||
- ifshort
|
||||
- exhaustruct
|
||||
- varnamelen
|
||||
- gci
|
||||
|
|
@ -52,10 +42,15 @@ linters:
|
|||
- forcetypeassert
|
||||
- cyclop
|
||||
# deprecated linters
|
||||
- deadcode
|
||||
- interfacer
|
||||
- scopelint
|
||||
- varcheck
|
||||
- structcheck
|
||||
- golint
|
||||
- nosnakecase
|
||||
#- deadcode
|
||||
#- interfacer
|
||||
#- scopelint
|
||||
#- varcheck
|
||||
#- structcheck
|
||||
#- golint
|
||||
#- nosnakecase
|
||||
#- maligned
|
||||
#- goerr113
|
||||
#- ifshort
|
||||
#- gomnd
|
||||
#- exhaustivestruct
|
||||
|
|
|
|||
18
vendor/github.com/go-openapi/jsonpointer/errors.go
generated
vendored
Normal file
18
vendor/github.com/go-openapi/jsonpointer/errors.go
generated
vendored
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
package jsonpointer
|
||||
|
||||
type pointerError string
|
||||
|
||||
func (e pointerError) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
const (
|
||||
// ErrPointer is an error raised by the jsonpointer package
|
||||
ErrPointer pointerError = "JSON pointer error"
|
||||
|
||||
// ErrInvalidStart states that a JSON pointer must start with a separator ("/")
|
||||
ErrInvalidStart pointerError = `JSON pointer must be empty or start with a "` + pointerSeparator
|
||||
|
||||
// ErrUnsupportedValueType indicates that a value of the wrong type is being set
|
||||
ErrUnsupportedValueType pointerError = "only structs, pointers, maps and slices are supported for setting values"
|
||||
)
|
||||
49
vendor/github.com/go-openapi/jsonpointer/pointer.go
generated
vendored
49
vendor/github.com/go-openapi/jsonpointer/pointer.go
generated
vendored
|
|
@ -39,9 +39,6 @@ import (
|
|||
const (
|
||||
emptyPointer = ``
|
||||
pointerSeparator = `/`
|
||||
|
||||
invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator
|
||||
notFound = `Can't find the pointer in the document`
|
||||
)
|
||||
|
||||
var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem()
|
||||
|
|
@ -80,7 +77,7 @@ func (p *Pointer) parse(jsonPointerString string) error {
|
|||
|
||||
if jsonPointerString != emptyPointer {
|
||||
if !strings.HasPrefix(jsonPointerString, pointerSeparator) {
|
||||
err = errors.New(invalidStart)
|
||||
err = errors.Join(ErrInvalidStart, ErrPointer)
|
||||
} else {
|
||||
referenceTokens := strings.Split(jsonPointerString, pointerSeparator)
|
||||
p.referenceTokens = append(p.referenceTokens, referenceTokens[1:]...)
|
||||
|
|
@ -128,7 +125,7 @@ func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvide
|
|||
rValue := reflect.Indirect(reflect.ValueOf(node))
|
||||
kind := rValue.Kind()
|
||||
if isNil(node) {
|
||||
return nil, kind, fmt.Errorf("nil value has not field %q", decodedToken)
|
||||
return nil, kind, fmt.Errorf("nil value has no field %q: %w", decodedToken, ErrPointer)
|
||||
}
|
||||
|
||||
switch typed := node.(type) {
|
||||
|
|
@ -146,7 +143,7 @@ func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvide
|
|||
case reflect.Struct:
|
||||
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
|
||||
if !ok {
|
||||
return nil, kind, fmt.Errorf("object has no field %q", decodedToken)
|
||||
return nil, kind, fmt.Errorf("object has no field %q: %w", decodedToken, ErrPointer)
|
||||
}
|
||||
fld := rValue.FieldByName(nm)
|
||||
return fld.Interface(), kind, nil
|
||||
|
|
@ -158,7 +155,7 @@ func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvide
|
|||
if mv.IsValid() {
|
||||
return mv.Interface(), kind, nil
|
||||
}
|
||||
return nil, kind, fmt.Errorf("object has no key %q", decodedToken)
|
||||
return nil, kind, fmt.Errorf("object has no key %q: %w", decodedToken, ErrPointer)
|
||||
|
||||
case reflect.Slice:
|
||||
tokenIndex, err := strconv.Atoi(decodedToken)
|
||||
|
|
@ -167,14 +164,14 @@ func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvide
|
|||
}
|
||||
sLength := rValue.Len()
|
||||
if tokenIndex < 0 || tokenIndex >= sLength {
|
||||
return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength-1, tokenIndex)
|
||||
return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d': %w", sLength-1, tokenIndex, ErrPointer)
|
||||
}
|
||||
|
||||
elem := rValue.Index(tokenIndex)
|
||||
return elem.Interface(), kind, nil
|
||||
|
||||
default:
|
||||
return nil, kind, fmt.Errorf("invalid token reference %q", decodedToken)
|
||||
return nil, kind, fmt.Errorf("invalid token reference %q: %w", decodedToken, ErrPointer)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -194,7 +191,7 @@ func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameP
|
|||
case reflect.Struct:
|
||||
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
|
||||
if !ok {
|
||||
return fmt.Errorf("object has no field %q", decodedToken)
|
||||
return fmt.Errorf("object has no field %q: %w", decodedToken, ErrPointer)
|
||||
}
|
||||
fld := rValue.FieldByName(nm)
|
||||
if fld.IsValid() {
|
||||
|
|
@ -214,18 +211,18 @@ func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameP
|
|||
}
|
||||
sLength := rValue.Len()
|
||||
if tokenIndex < 0 || tokenIndex >= sLength {
|
||||
return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex)
|
||||
return fmt.Errorf("index out of bounds array[0,%d] index '%d': %w", sLength, tokenIndex, ErrPointer)
|
||||
}
|
||||
|
||||
elem := rValue.Index(tokenIndex)
|
||||
if !elem.CanSet() {
|
||||
return fmt.Errorf("can't set slice index %s to %v", decodedToken, data)
|
||||
return fmt.Errorf("can't set slice index %s to %v: %w", decodedToken, data, ErrPointer)
|
||||
}
|
||||
elem.Set(reflect.ValueOf(data))
|
||||
return nil
|
||||
|
||||
default:
|
||||
return fmt.Errorf("invalid token reference %q", decodedToken)
|
||||
return fmt.Errorf("invalid token reference %q: %w", decodedToken, ErrPointer)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -244,7 +241,6 @@ func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.K
|
|||
}
|
||||
|
||||
for _, token := range p.referenceTokens {
|
||||
|
||||
decodedToken := Unescape(token)
|
||||
|
||||
r, knd, err := getSingleImpl(node, decodedToken, nameProvider)
|
||||
|
|
@ -264,7 +260,10 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error {
|
|||
knd := reflect.ValueOf(node).Kind()
|
||||
|
||||
if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array {
|
||||
return errors.New("only structs, pointers, maps and slices are supported for setting values")
|
||||
return errors.Join(
|
||||
ErrUnsupportedValueType,
|
||||
ErrPointer,
|
||||
)
|
||||
}
|
||||
|
||||
if nameProvider == nil {
|
||||
|
|
@ -307,7 +306,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error {
|
|||
case reflect.Struct:
|
||||
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
|
||||
if !ok {
|
||||
return fmt.Errorf("object has no field %q", decodedToken)
|
||||
return fmt.Errorf("object has no field %q: %w", decodedToken, ErrPointer)
|
||||
}
|
||||
fld := rValue.FieldByName(nm)
|
||||
if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr {
|
||||
|
|
@ -321,7 +320,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error {
|
|||
mv := rValue.MapIndex(kv)
|
||||
|
||||
if !mv.IsValid() {
|
||||
return fmt.Errorf("object has no key %q", decodedToken)
|
||||
return fmt.Errorf("object has no key %q: %w", decodedToken, ErrPointer)
|
||||
}
|
||||
if mv.CanAddr() && mv.Kind() != reflect.Interface && mv.Kind() != reflect.Map && mv.Kind() != reflect.Slice && mv.Kind() != reflect.Ptr {
|
||||
node = mv.Addr().Interface()
|
||||
|
|
@ -336,7 +335,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error {
|
|||
}
|
||||
sLength := rValue.Len()
|
||||
if tokenIndex < 0 || tokenIndex >= sLength {
|
||||
return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex)
|
||||
return fmt.Errorf("index out of bounds array[0,%d] index '%d': %w", sLength, tokenIndex, ErrPointer)
|
||||
}
|
||||
|
||||
elem := rValue.Index(tokenIndex)
|
||||
|
|
@ -347,7 +346,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error {
|
|||
node = elem.Interface()
|
||||
|
||||
default:
|
||||
return fmt.Errorf("invalid token reference %q", decodedToken)
|
||||
return fmt.Errorf("invalid token reference %q: %w", decodedToken, ErrPointer)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -404,10 +403,10 @@ func (p *Pointer) Offset(document string) (int64, error) {
|
|||
return 0, err
|
||||
}
|
||||
default:
|
||||
return 0, fmt.Errorf("invalid token %#v", tk)
|
||||
return 0, fmt.Errorf("invalid token %#v: %w", tk, ErrPointer)
|
||||
}
|
||||
default:
|
||||
return 0, fmt.Errorf("invalid token %#v", tk)
|
||||
return 0, fmt.Errorf("invalid token %#v: %w", tk, ErrPointer)
|
||||
}
|
||||
}
|
||||
return offset, nil
|
||||
|
|
@ -437,16 +436,16 @@ func offsetSingleObject(dec *json.Decoder, decodedToken string) (int64, error) {
|
|||
return offset, nil
|
||||
}
|
||||
default:
|
||||
return 0, fmt.Errorf("invalid token %#v", tk)
|
||||
return 0, fmt.Errorf("invalid token %#v: %w", tk, ErrPointer)
|
||||
}
|
||||
}
|
||||
return 0, fmt.Errorf("token reference %q not found", decodedToken)
|
||||
return 0, fmt.Errorf("token reference %q not found: %w", decodedToken, ErrPointer)
|
||||
}
|
||||
|
||||
func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) {
|
||||
idx, err := strconv.Atoi(decodedToken)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("token reference %q is not a number: %v", decodedToken, err)
|
||||
return 0, fmt.Errorf("token reference %q is not a number: %v: %w", decodedToken, err, ErrPointer)
|
||||
}
|
||||
var i int
|
||||
for i = 0; i < idx && dec.More(); i++ {
|
||||
|
|
@ -470,7 +469,7 @@ func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) {
|
|||
}
|
||||
|
||||
if !dec.More() {
|
||||
return 0, fmt.Errorf("token reference %q not found", decodedToken)
|
||||
return 0, fmt.Errorf("token reference %q not found: %w", decodedToken, ErrPointer)
|
||||
}
|
||||
return dec.InputOffset(), nil
|
||||
}
|
||||
|
|
|
|||
34
vendor/github.com/go-openapi/swag/.golangci.yml
generated
vendored
34
vendor/github.com/go-openapi/swag/.golangci.yml
generated
vendored
|
|
@ -1,22 +1,17 @@
|
|||
linters-settings:
|
||||
govet:
|
||||
check-shadowing: true
|
||||
golint:
|
||||
min-confidence: 0
|
||||
gocyclo:
|
||||
min-complexity: 45
|
||||
maligned:
|
||||
suggest-new: true
|
||||
dupl:
|
||||
threshold: 200
|
||||
goconst:
|
||||
min-len: 3
|
||||
min-len: 2
|
||||
min-occurrences: 3
|
||||
|
||||
linters:
|
||||
enable-all: true
|
||||
disable:
|
||||
- maligned
|
||||
- recvcheck
|
||||
- unparam
|
||||
- lll
|
||||
- gochecknoinits
|
||||
- gochecknoglobals
|
||||
|
|
@ -28,9 +23,6 @@ linters:
|
|||
- wrapcheck
|
||||
- testpackage
|
||||
- nlreturn
|
||||
- gomnd
|
||||
- exhaustivestruct
|
||||
- goerr113
|
||||
- errorlint
|
||||
- nestif
|
||||
- godot
|
||||
|
|
@ -38,7 +30,6 @@ linters:
|
|||
- paralleltest
|
||||
- tparallel
|
||||
- thelper
|
||||
- ifshort
|
||||
- exhaustruct
|
||||
- varnamelen
|
||||
- gci
|
||||
|
|
@ -51,10 +42,15 @@ linters:
|
|||
- forcetypeassert
|
||||
- cyclop
|
||||
# deprecated linters
|
||||
- deadcode
|
||||
- interfacer
|
||||
- scopelint
|
||||
- varcheck
|
||||
- structcheck
|
||||
- golint
|
||||
- nosnakecase
|
||||
#- deadcode
|
||||
#- interfacer
|
||||
#- scopelint
|
||||
#- varcheck
|
||||
#- structcheck
|
||||
#- golint
|
||||
#- nosnakecase
|
||||
#- maligned
|
||||
#- goerr113
|
||||
#- ifshort
|
||||
#- gomnd
|
||||
#- exhaustivestruct
|
||||
|
|
|
|||
15
vendor/github.com/go-openapi/swag/errors.go
generated
vendored
Normal file
15
vendor/github.com/go-openapi/swag/errors.go
generated
vendored
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
package swag
|
||||
|
||||
type swagError string
|
||||
|
||||
const (
|
||||
// ErrYAML is an error raised by YAML utilities
|
||||
ErrYAML swagError = "yaml error"
|
||||
|
||||
// ErrLoader is an error raised by the file loader utility
|
||||
ErrLoader swagError = "loader error"
|
||||
)
|
||||
|
||||
func (e swagError) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
3
vendor/github.com/go-openapi/swag/json.go
generated
vendored
3
vendor/github.com/go-openapi/swag/json.go
generated
vendored
|
|
@ -126,7 +126,8 @@ func ConcatJSON(blobs ...[]byte) []byte {
|
|||
continue // don't know how to concatenate non container objects
|
||||
}
|
||||
|
||||
if len(b) < 3 { // yep empty but also the last one, so closing this thing
|
||||
const minLengthIfNotEmpty = 3
|
||||
if len(b) < minLengthIfNotEmpty { // yep empty but also the last one, so closing this thing
|
||||
if i == last && a > 0 {
|
||||
if err := buf.WriteByte(closing); err != nil {
|
||||
log.Println(err)
|
||||
|
|
|
|||
2
vendor/github.com/go-openapi/swag/loading.go
generated
vendored
2
vendor/github.com/go-openapi/swag/loading.go
generated
vendored
|
|
@ -168,7 +168,7 @@ func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) {
|
|||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("could not access document at %q [%s] ", path, resp.Status)
|
||||
return nil, fmt.Errorf("could not access document at %q [%s]: %w", path, resp.Status, ErrLoader)
|
||||
}
|
||||
|
||||
return io.ReadAll(resp.Body)
|
||||
|
|
|
|||
32
vendor/github.com/go-openapi/swag/yaml.go
generated
vendored
32
vendor/github.com/go-openapi/swag/yaml.go
generated
vendored
|
|
@ -16,7 +16,6 @@ package swag
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
|
|
@ -51,7 +50,7 @@ func BytesToYAMLDoc(data []byte) (interface{}, error) {
|
|||
return nil, err
|
||||
}
|
||||
if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode {
|
||||
return nil, errors.New("only YAML documents that are objects are supported")
|
||||
return nil, fmt.Errorf("only YAML documents that are objects are supported: %w", ErrYAML)
|
||||
}
|
||||
return &document, nil
|
||||
}
|
||||
|
|
@ -69,31 +68,32 @@ func yamlNode(root *yaml.Node) (interface{}, error) {
|
|||
case yaml.AliasNode:
|
||||
return yamlNode(root.Alias)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported YAML node type: %v", root.Kind)
|
||||
return nil, fmt.Errorf("unsupported YAML node type: %v: %w", root.Kind, ErrYAML)
|
||||
}
|
||||
}
|
||||
|
||||
func yamlDocument(node *yaml.Node) (interface{}, error) {
|
||||
if len(node.Content) != 1 {
|
||||
return nil, fmt.Errorf("unexpected YAML Document node content length: %d", len(node.Content))
|
||||
return nil, fmt.Errorf("unexpected YAML Document node content length: %d: %w", len(node.Content), ErrYAML)
|
||||
}
|
||||
return yamlNode(node.Content[0])
|
||||
}
|
||||
|
||||
func yamlMapping(node *yaml.Node) (interface{}, error) {
|
||||
m := make(JSONMapSlice, len(node.Content)/2)
|
||||
const sensibleAllocDivider = 2
|
||||
m := make(JSONMapSlice, len(node.Content)/sensibleAllocDivider)
|
||||
|
||||
var j int
|
||||
for i := 0; i < len(node.Content); i += 2 {
|
||||
var nmi JSONMapItem
|
||||
k, err := yamlStringScalarC(node.Content[i])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to decode YAML map key: %w", err)
|
||||
return nil, fmt.Errorf("unable to decode YAML map key: %w: %w", err, ErrYAML)
|
||||
}
|
||||
nmi.Key = k
|
||||
v, err := yamlNode(node.Content[i+1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to process YAML map value for key %q: %w", k, err)
|
||||
return nil, fmt.Errorf("unable to process YAML map value for key %q: %w: %w", k, err, ErrYAML)
|
||||
}
|
||||
nmi.Value = v
|
||||
m[j] = nmi
|
||||
|
|
@ -109,7 +109,7 @@ func yamlSequence(node *yaml.Node) (interface{}, error) {
|
|||
|
||||
v, err := yamlNode(node.Content[i])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to decode YAML sequence value: %w", err)
|
||||
return nil, fmt.Errorf("unable to decode YAML sequence value: %w: %w", err, ErrYAML)
|
||||
}
|
||||
s = append(s, v)
|
||||
}
|
||||
|
|
@ -132,19 +132,19 @@ func yamlScalar(node *yaml.Node) (interface{}, error) {
|
|||
case yamlBoolScalar:
|
||||
b, err := strconv.ParseBool(node.Value)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting bool content: %w", node.Value, err)
|
||||
return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting bool content: %w: %w", node.Value, err, ErrYAML)
|
||||
}
|
||||
return b, nil
|
||||
case yamlIntScalar:
|
||||
i, err := strconv.ParseInt(node.Value, 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting integer content: %w", node.Value, err)
|
||||
return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting integer content: %w: %w", node.Value, err, ErrYAML)
|
||||
}
|
||||
return i, nil
|
||||
case yamlFloatScalar:
|
||||
f, err := strconv.ParseFloat(node.Value, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting float content: %w", node.Value, err)
|
||||
return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting float content: %w: %w", node.Value, err, ErrYAML)
|
||||
}
|
||||
return f, nil
|
||||
case yamlTimestamp:
|
||||
|
|
@ -152,19 +152,19 @@ func yamlScalar(node *yaml.Node) (interface{}, error) {
|
|||
case yamlNull:
|
||||
return nil, nil //nolint:nilnil
|
||||
default:
|
||||
return nil, fmt.Errorf("YAML tag %q is not supported", node.LongTag())
|
||||
return nil, fmt.Errorf("YAML tag %q is not supported: %w", node.LongTag(), ErrYAML)
|
||||
}
|
||||
}
|
||||
|
||||
func yamlStringScalarC(node *yaml.Node) (string, error) {
|
||||
if node.Kind != yaml.ScalarNode {
|
||||
return "", fmt.Errorf("expecting a string scalar but got %q", node.Kind)
|
||||
return "", fmt.Errorf("expecting a string scalar but got %q: %w", node.Kind, ErrYAML)
|
||||
}
|
||||
switch node.LongTag() {
|
||||
case yamlStringScalar, yamlIntScalar, yamlFloatScalar:
|
||||
return node.Value, nil
|
||||
default:
|
||||
return "", fmt.Errorf("YAML tag %q is not supported as map key", node.LongTag())
|
||||
return "", fmt.Errorf("YAML tag %q is not supported as map key: %w", node.LongTag(), ErrYAML)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -349,7 +349,7 @@ func json2yaml(item interface{}) (*yaml.Node, error) {
|
|||
Value: strconv.FormatBool(val),
|
||||
}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unhandled type: %T", val)
|
||||
return nil, fmt.Errorf("unhandled type: %T: %w", val, ErrYAML)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -416,7 +416,7 @@ func transformData(input interface{}) (out interface{}, err error) {
|
|||
case int64:
|
||||
return strconv.FormatInt(k, 10), nil
|
||||
default:
|
||||
return "", fmt.Errorf("unexpected map key type, got: %T", k)
|
||||
return "", fmt.Errorf("unexpected map key type, got: %T: %w", k, ErrYAML)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
10
vendor/github.com/go-sql-driver/mysql/AUTHORS
generated
vendored
10
vendor/github.com/go-sql-driver/mysql/AUTHORS
generated
vendored
|
|
@ -20,7 +20,11 @@ Andrew Reid <andrew.reid at tixtrack.com>
|
|||
Animesh Ray <mail.rayanimesh at gmail.com>
|
||||
Arne Hormann <arnehormann at gmail.com>
|
||||
Ariel Mashraki <ariel at mashraki.co.il>
|
||||
Artur Melanchyk <artur.melanchyk@gmail.com>
|
||||
Asta Xie <xiemengjun at gmail.com>
|
||||
B Lamarche <blam413 at gmail.com>
|
||||
Bes Dollma <bdollma@thousandeyes.com>
|
||||
Bogdan Constantinescu <bog.con.bc at gmail.com>
|
||||
Brian Hendriks <brian at dolthub.com>
|
||||
Bulat Gaifullin <gaifullinbf at gmail.com>
|
||||
Caine Jette <jette at alum.mit.edu>
|
||||
|
|
@ -33,6 +37,7 @@ Daniel Montoya <dsmontoyam at gmail.com>
|
|||
Daniel Nichter <nil at codenode.com>
|
||||
Daniël van Eeden <git at myname.nl>
|
||||
Dave Protasowski <dprotaso at gmail.com>
|
||||
Dirkjan Bussink <d.bussink at gmail.com>
|
||||
DisposaBoy <disposaboy at dby.me>
|
||||
Egor Smolyakov <egorsmkv at gmail.com>
|
||||
Erwan Martin <hello at erwan.io>
|
||||
|
|
@ -50,6 +55,7 @@ ICHINOSE Shogo <shogo82148 at gmail.com>
|
|||
Ilia Cimpoes <ichimpoesh at gmail.com>
|
||||
INADA Naoki <songofacandy at gmail.com>
|
||||
Jacek Szwec <szwec.jacek at gmail.com>
|
||||
Jakub Adamus <kratky at zobak.cz>
|
||||
James Harr <james.harr at gmail.com>
|
||||
Janek Vedock <janekvedock at comcast.net>
|
||||
Jason Ng <oblitorum at gmail.com>
|
||||
|
|
@ -60,6 +66,7 @@ Jennifer Purevsuren <jennifer at dolthub.com>
|
|||
Jerome Meyer <jxmeyer at gmail.com>
|
||||
Jiajia Zhong <zhong2plus at gmail.com>
|
||||
Jian Zhen <zhenjl at gmail.com>
|
||||
Joe Mann <contact at joemann.co.uk>
|
||||
Joshua Prunier <joshua.prunier at gmail.com>
|
||||
Julien Lefevre <julien.lefevr at gmail.com>
|
||||
Julien Schmidt <go-sql-driver at julienschmidt.com>
|
||||
|
|
@ -80,6 +87,7 @@ Lunny Xiao <xiaolunwen at gmail.com>
|
|||
Luke Scott <luke at webconnex.com>
|
||||
Maciej Zimnoch <maciej.zimnoch at codilime.com>
|
||||
Michael Woolnough <michael.woolnough at gmail.com>
|
||||
Nao Yokotsuka <yokotukanao at gmail.com>
|
||||
Nathanial Murphy <nathanial.murphy at gmail.com>
|
||||
Nicola Peduzzi <thenikso at gmail.com>
|
||||
Oliver Bone <owbone at github.com>
|
||||
|
|
@ -89,6 +97,7 @@ Paul Bonser <misterpib at gmail.com>
|
|||
Paulius Lozys <pauliuslozys at gmail.com>
|
||||
Peter Schultz <peter.schultz at classmarkets.com>
|
||||
Phil Porada <philporada at gmail.com>
|
||||
Minh Quang <minhquang4334 at gmail.com>
|
||||
Rebecca Chin <rchin at pivotal.io>
|
||||
Reed Allman <rdallman10 at gmail.com>
|
||||
Richard Wilkes <wilkes at me.com>
|
||||
|
|
@ -139,4 +148,5 @@ PingCAP Inc.
|
|||
Pivotal Inc.
|
||||
Shattered Silicon Ltd.
|
||||
Stripe Inc.
|
||||
ThousandEyes
|
||||
Zendesk Inc.
|
||||
|
|
|
|||
41
vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
generated
vendored
41
vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
generated
vendored
|
|
@ -1,3 +1,44 @@
|
|||
# Changelog
|
||||
|
||||
## v1.9.2 (2025-04-07)
|
||||
|
||||
v1.9.2 is a re-release of v1.9.1 due to a release process issue; no changes were made to the content.
|
||||
|
||||
|
||||
## v1.9.1 (2025-03-21)
|
||||
|
||||
### Major Changes
|
||||
|
||||
* Add Charset() option. (#1679)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
* go.mod: fix go version format (#1682)
|
||||
* Fix FormatDSN missing ConnectionAttributes (#1619)
|
||||
|
||||
## v1.9.0 (2025-02-18)
|
||||
|
||||
### Major Changes
|
||||
|
||||
- Implement zlib compression. (#1487)
|
||||
- Supported Go version is updated to Go 1.21+. (#1639)
|
||||
- Add support for VECTOR type introduced in MySQL 9.0. (#1609)
|
||||
- Config object can have custom dial function. (#1527)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix auth errors when username/password are too long. (#1625)
|
||||
- Check if MySQL supports CLIENT_CONNECT_ATTRS before sending client attributes. (#1640)
|
||||
- Fix auth switch request handling. (#1666)
|
||||
|
||||
### Other changes
|
||||
|
||||
- Add "filename:line" prefix to log in go-mysql. Custom loggers now show it. (#1589)
|
||||
- Improve error handling. It reduces the "busy buffer" errors. (#1595, #1601, #1641)
|
||||
- Use `strconv.Atoi` to parse max_allowed_packet. (#1661)
|
||||
- `rejectReadOnly` option now handles ER_READ_ONLY_MODE (1290) error too. (#1660)
|
||||
|
||||
|
||||
## Version 1.8.1 (2024-03-26)
|
||||
|
||||
Bugfixes:
|
||||
|
|
|
|||
18
vendor/github.com/go-sql-driver/mysql/README.md
generated
vendored
18
vendor/github.com/go-sql-driver/mysql/README.md
generated
vendored
|
|
@ -38,11 +38,12 @@ A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) pac
|
|||
* Secure `LOAD DATA LOCAL INFILE` support with file allowlisting and `io.Reader` support
|
||||
* Optional `time.Time` parsing
|
||||
* Optional placeholder interpolation
|
||||
* Supports zlib compression.
|
||||
|
||||
## Requirements
|
||||
|
||||
* Go 1.19 or higher. We aim to support the 3 latest versions of Go.
|
||||
* MySQL (5.7+) and MariaDB (10.3+) are supported.
|
||||
* Go 1.21 or higher. We aim to support the 3 latest versions of Go.
|
||||
* MySQL (5.7+) and MariaDB (10.5+) are supported.
|
||||
* [TiDB](https://github.com/pingcap/tidb) is supported by PingCAP.
|
||||
* Do not ask questions about TiDB in our issue tracker or forum.
|
||||
* [Document](https://docs.pingcap.com/tidb/v6.1/dev-guide-sample-application-golang)
|
||||
|
|
@ -267,6 +268,16 @@ SELECT u.id FROM users as u
|
|||
|
||||
will return `u.id` instead of just `id` if `columnsWithAlias=true`.
|
||||
|
||||
##### `compress`
|
||||
|
||||
```
|
||||
Type: bool
|
||||
Valid Values: true, false
|
||||
Default: false
|
||||
```
|
||||
|
||||
Toggles zlib compression. false by default.
|
||||
|
||||
##### `interpolateParams`
|
||||
|
||||
```
|
||||
|
|
@ -519,6 +530,9 @@ This driver supports the [`ColumnType` interface](https://golang.org/pkg/databas
|
|||
Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts.
|
||||
See [context support in the database/sql package](https://golang.org/doc/go1.8#database_sql) for more details.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> The `QueryContext`, `ExecContext`, etc. variants provided by `database/sql` will cause the connection to be closed if the provided context is cancelled or timed out before the result is received by the driver.
|
||||
|
||||
|
||||
### `LOAD DATA LOCAL INFILE` support
|
||||
For this feature you need direct access to the package. Therefore you must change the import path (no `_`):
|
||||
|
|
|
|||
19
vendor/github.com/go-sql-driver/mysql/atomic_bool.go
generated
vendored
19
vendor/github.com/go-sql-driver/mysql/atomic_bool.go
generated
vendored
|
|
@ -1,19 +0,0 @@
|
|||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package.
|
||||
//
|
||||
// Copyright 2022 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
//go:build go1.19
|
||||
// +build go1.19
|
||||
|
||||
package mysql
|
||||
|
||||
import "sync/atomic"
|
||||
|
||||
/******************************************************************************
|
||||
* Sync utils *
|
||||
******************************************************************************/
|
||||
|
||||
type atomicBool = atomic.Bool
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue