Compare commits
101 commits
| Author | SHA1 | Date | |
|---|---|---|---|
| d97839cd90 | |||
| 1f8217d99b | |||
| cd83c525c8 | |||
| 88e6654a6c | |||
|
|
10a9ab9001 | ||
|
|
ea961a70c3 | ||
|
|
8a98a8a512 | ||
|
|
2301db6f85 | ||
|
|
aab9e22819 | ||
|
|
403489591e | ||
|
|
8034eaaabb | ||
|
|
44f4557005 | ||
|
|
d92a892ece | ||
|
|
a7487dadd7 | ||
|
|
aa6eef94fd | ||
|
|
f31a1b031e | ||
|
|
2de7e57e08 | ||
|
|
05795ee286 | ||
|
|
f48e9b3ba6 | ||
|
|
4d685c129c | ||
|
|
2d359067f6 | ||
|
|
e25d6b3c91 | ||
|
|
08723cf623 | ||
|
|
c80a40023c | ||
|
|
21d451085f | ||
|
|
02247b852a | ||
|
|
66a7e82c43 | ||
|
|
44b8b91540 | ||
|
|
bda2295f71 | ||
|
|
b17b1f00b3 | ||
|
|
3f52c56d1e | ||
|
|
041524d663 | ||
|
|
a3d46d7597 | ||
|
|
0a0b25d886 | ||
|
|
a22c5a2e65 | ||
|
|
e68940619c | ||
|
|
ba1beb08f4 | ||
|
|
4010e61165 | ||
|
|
5f3ff3d2e2 | ||
|
|
2bccbec3ae | ||
|
|
996ac343ee | ||
|
|
413a52605d | ||
|
|
a980acd936 | ||
|
|
1fa156c7e4 | ||
|
|
b772be7131 | ||
|
|
63351343ba | ||
|
|
01766ff4e2 | ||
|
|
d79d043696 | ||
|
|
7f90c8acb2 | ||
|
|
ea824bde1c | ||
|
|
46a955d1ff | ||
|
|
56ef60060b | ||
|
|
014b4ba5f6 | ||
|
|
9c09ca3f56 | ||
|
|
6bdb7ed9c7 | ||
|
|
8debbe699e | ||
|
|
fb1a38b840 | ||
|
|
225a05e1a5 | ||
|
|
8512d76ce7 | ||
|
|
ed7dcb0081 | ||
|
|
71bd44f9a0 | ||
|
|
aed4dd8766 | ||
|
|
331979b887 | ||
|
|
c25bd51857 | ||
|
|
ae283ef0f5 | ||
|
|
f4eb8e57fb | ||
|
|
b8ab05e367 | ||
|
|
797e0cd250 | ||
|
|
8c6c089a65 | ||
|
|
bcf2bfbf20 | ||
|
|
89f37985bd | ||
|
|
aaf9dea44a | ||
|
|
e1e7d0e85a | ||
|
|
d98522a2b6 | ||
|
|
6576754256 | ||
|
|
69df253e41 | ||
|
|
6877142ba4 | ||
|
|
b3c603644a | ||
|
|
5c93da6fc7 | ||
|
|
543f3fd495 | ||
|
|
2573ccaf19 | ||
|
|
3585d67743 | ||
|
|
458dae9827 | ||
|
|
19f4dbeb4b | ||
|
|
b6c9da470e | ||
|
|
f728f8a923 | ||
|
|
6f212fbb5a | ||
|
|
02a51c0a21 | ||
|
|
933667bdf1 | ||
|
|
3dd167d770 | ||
|
|
16276cc0d6 | ||
|
|
4d98bda282 | ||
|
|
7f75f52aaf | ||
|
|
106c50a51d | ||
|
|
86653c237d | ||
|
|
9a62046607 | ||
|
|
6a6a41bf05 | ||
|
|
151c90b2a9 | ||
|
|
02df78a9ec | ||
|
|
864fce89d2 | ||
|
|
149a9e3cf5 |
84 changed files with 2847 additions and 711 deletions
27
.forgejo/workflows/build-ipcei.yml
Normal file
27
.forgejo/workflows/build-ipcei.yml
Normal file
|
|
@ -0,0 +1,27 @@
|
||||||
|
name: ci
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- v*
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
goreleaser:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v6
|
||||||
|
with:
|
||||||
|
go-version: ">=1.25.1"
|
||||||
|
- name: Test code
|
||||||
|
run: make test
|
||||||
|
- name: Run GoReleaser
|
||||||
|
uses: https://github.com/goreleaser/goreleaser-action@v6
|
||||||
|
env:
|
||||||
|
GITEA_TOKEN: ${{ secrets.PACKAGES_TOKEN }}
|
||||||
|
with:
|
||||||
|
args: release --clean
|
||||||
|
|
@ -18,19 +18,22 @@ on:
|
||||||
|
|
||||||
enable-email-notifications: true
|
enable-email-notifications: true
|
||||||
|
|
||||||
|
env:
|
||||||
|
FORGEJO_VERSION: 11.0.7 # renovate: datasource=docker depName=code.forgejo.org/forgejo/forgejo
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
release-simulation:
|
release-simulation:
|
||||||
runs-on: lxc-bookworm
|
runs-on: lxc-bookworm
|
||||||
if: vars.ROLE == 'forgejo-coding'
|
if: vars.ROLE == 'forgejo-coding'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: https://data.forgejo.org/actions/checkout@v4
|
||||||
|
|
||||||
- id: forgejo
|
- id: forgejo
|
||||||
uses: https://data.forgejo.org/actions/setup-forgejo@v3.0.2
|
uses: https://data.forgejo.org/actions/setup-forgejo@v3.0.4
|
||||||
with:
|
with:
|
||||||
user: root
|
user: root
|
||||||
password: admin1234
|
password: admin1234
|
||||||
image-version: 1.20
|
image-version: ${{ env.FORGEJO_VERSION }}
|
||||||
lxc-ip-prefix: 10.0.9
|
lxc-ip-prefix: 10.0.9
|
||||||
|
|
||||||
- name: publish
|
- name: publish
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ jobs:
|
||||||
# root is used for testing, allow it
|
# root is used for testing, allow it
|
||||||
if: vars.ROLE == 'forgejo-integration' || forge.repository_owner == 'root'
|
if: vars.ROLE == 'forgejo-integration' || forge.repository_owner == 'root'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: https://data.forgejo.org/actions/checkout@v4
|
||||||
|
|
||||||
- name: Increase the verbosity when there are no secrets
|
- name: Increase the verbosity when there are no secrets
|
||||||
id: verbose
|
id: verbose
|
||||||
|
|
|
||||||
|
|
@ -87,10 +87,10 @@ jobs:
|
||||||
with:
|
with:
|
||||||
fetch-depth: '0'
|
fetch-depth: '0'
|
||||||
show-progress: 'false'
|
show-progress: 'false'
|
||||||
- uses: actions/setup-go@v5
|
- uses: https://data.forgejo.org/actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version-file: go.mod
|
go-version-file: go.mod
|
||||||
- uses: https://data.forgejo.org/actions/cascading-pr@v2.2.1
|
- uses: https://data.forgejo.org/actions/cascading-pr@v2.3.0
|
||||||
with:
|
with:
|
||||||
origin-url: ${{ forge.server_url }}
|
origin-url: ${{ forge.server_url }}
|
||||||
origin-repo: ${{ forge.repository }}
|
origin-repo: ${{ forge.repository }}
|
||||||
|
|
@ -106,4 +106,5 @@ jobs:
|
||||||
close: true
|
close: true
|
||||||
verbose: ${{ vars.VERBOSE == 'yes' }}
|
verbose: ${{ vars.VERBOSE == 'yes' }}
|
||||||
debug: ${{ vars.DEBUG == 'yes' }}
|
debug: ${{ vars.DEBUG == 'yes' }}
|
||||||
|
wait-iteration: 120
|
||||||
update: .forgejo/cascading-forgejo
|
update: .forgejo/cascading-forgejo
|
||||||
|
|
|
||||||
|
|
@ -81,7 +81,7 @@ jobs:
|
||||||
container:
|
container:
|
||||||
image: 'code.forgejo.org/oci/node:22-bookworm'
|
image: 'code.forgejo.org/oci/node:22-bookworm'
|
||||||
steps:
|
steps:
|
||||||
- uses: https://code.forgejo.org/actions/cascading-pr@v2.2.1
|
- uses: https://data.forgejo.org/actions/cascading-pr@v2.3.0
|
||||||
with:
|
with:
|
||||||
origin-url: ${{ forge.server_url }}
|
origin-url: ${{ forge.server_url }}
|
||||||
origin-repo: ${{ forge.repository }}
|
origin-repo: ${{ forge.repository }}
|
||||||
|
|
|
||||||
|
|
@ -21,7 +21,7 @@ on:
|
||||||
enable-email-notifications: true
|
enable-email-notifications: true
|
||||||
|
|
||||||
env:
|
env:
|
||||||
FORGEJO_VERSION: 11.0.5 # renovate: datasource=docker depName=code.forgejo.org/forgejo/forgejo
|
FORGEJO_VERSION: 11.0.7 # renovate: datasource=docker depName=code.forgejo.org/forgejo/forgejo
|
||||||
FORGEJO_USER: root
|
FORGEJO_USER: root
|
||||||
FORGEJO_PASSWORD: admin1234
|
FORGEJO_PASSWORD: admin1234
|
||||||
|
|
||||||
|
|
@ -34,7 +34,7 @@ jobs:
|
||||||
|
|
||||||
- name: install Forgejo so it can be used as a container registry
|
- name: install Forgejo so it can be used as a container registry
|
||||||
id: registry
|
id: registry
|
||||||
uses: https://data.forgejo.org/actions/setup-forgejo@v3.0.2
|
uses: https://data.forgejo.org/actions/setup-forgejo@v3.0.4
|
||||||
with:
|
with:
|
||||||
user: ${{ env.FORGEJO_USER }}
|
user: ${{ env.FORGEJO_USER }}
|
||||||
password: ${{ env.FORGEJO_PASSWORD }}
|
password: ${{ env.FORGEJO_PASSWORD }}
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,7 @@ jobs:
|
||||||
if: vars.ROLE == 'forgejo-coding'
|
if: vars.ROLE == 'forgejo-coding'
|
||||||
runs-on: lxc-bookworm
|
runs-on: lxc-bookworm
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: https://data.forgejo.org/actions/checkout@v4
|
||||||
|
|
||||||
- name: Install docker
|
- name: Install docker
|
||||||
run: |
|
run: |
|
||||||
|
|
|
||||||
|
|
@ -14,11 +14,12 @@ env:
|
||||||
SERIAL: "30"
|
SERIAL: "30"
|
||||||
LIFETIME: "60"
|
LIFETIME: "60"
|
||||||
SYSTEMD_OPTIONS: "--no-pager --full"
|
SYSTEMD_OPTIONS: "--no-pager --full"
|
||||||
|
USE_VERSION: 11.0.7 # renovate: datasource=docker depName=code.forgejo.org/forgejo/forgejo
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
example-lxc-systemd:
|
example-lxc-systemd:
|
||||||
if: vars.ROLE == 'forgejo-coding'
|
if: vars.ROLE == 'forgejo-coding'
|
||||||
runs-on: lxc-bookworm
|
runs-on: lxc-trixie
|
||||||
steps:
|
steps:
|
||||||
- uses: https://data.forgejo.org/actions/checkout@v4
|
- uses: https://data.forgejo.org/actions/checkout@v4
|
||||||
|
|
||||||
|
|
@ -53,11 +54,11 @@ jobs:
|
||||||
done
|
done
|
||||||
|
|
||||||
- id: forgejo
|
- id: forgejo
|
||||||
uses: https://data.forgejo.org/actions/setup-forgejo@v3.0.2
|
uses: https://data.forgejo.org/actions/setup-forgejo@v3.0.4
|
||||||
with:
|
with:
|
||||||
user: root
|
user: root
|
||||||
password: admin1234
|
password: admin1234
|
||||||
binary: https://code.forgejo.org/forgejo/forgejo/releases/download/v7.0.12/forgejo-7.0.12-linux-amd64
|
binary: https://code.forgejo.org/forgejo/forgejo/releases/download/v${{ env.USE_VERSION }}/forgejo-${{ env.USE_VERSION }}-linux-amd64
|
||||||
# must be the same as LXC_IPV4_PREFIX in examples/lxc-systemd/forgejo-runner-service.sh
|
# must be the same as LXC_IPV4_PREFIX in examples/lxc-systemd/forgejo-runner-service.sh
|
||||||
lxc-ip-prefix: 10.105.7
|
lxc-ip-prefix: 10.105.7
|
||||||
|
|
||||||
|
|
@ -123,8 +124,8 @@ jobs:
|
||||||
started_running=/etc/forgejo-runner/$serial/started-running
|
started_running=/etc/forgejo-runner/$serial/started-running
|
||||||
killed_gracefully=/etc/forgejo-runner/$serial/killed-gracefully
|
killed_gracefully=/etc/forgejo-runner/$serial/killed-gracefully
|
||||||
stopped_gracefully=/etc/forgejo-runner/$serial/stopped-gracefully
|
stopped_gracefully=/etc/forgejo-runner/$serial/stopped-gracefully
|
||||||
retry --delay 5 --times 20 cp -a $started_running /tmp/first-run
|
retry --delay 10 --times 20 cp -a $started_running /tmp/first-run
|
||||||
retry --delay 1 --times 30 grep --quiet 'Starting runner daemon' /var/log/forgejo-runner/$serial.log
|
retry --delay 2 --times 30 grep --quiet 'Starting runner daemon' /var/log/forgejo-runner/$serial.log
|
||||||
systemctl stop forgejo-runner@$serial
|
systemctl stop forgejo-runner@$serial
|
||||||
! systemctl $all status forgejo-runner@$serial
|
! systemctl $all status forgejo-runner@$serial
|
||||||
ls -l /etc/forgejo-runner/$serial
|
ls -l /etc/forgejo-runner/$serial
|
||||||
|
|
@ -136,7 +137,7 @@ jobs:
|
||||||
! test -f $killed_gracefully
|
! test -f $killed_gracefully
|
||||||
! test -f $stopped_gracefully
|
! test -f $stopped_gracefully
|
||||||
lifetime=${{ env.LIFETIME }}
|
lifetime=${{ env.LIFETIME }}
|
||||||
# give it time to restart at least once
|
: give it time to restart at least once
|
||||||
ls -l /etc/forgejo-runner/$serial
|
ls -l /etc/forgejo-runner/$serial
|
||||||
sleep $lifetime ; sleep $lifetime
|
sleep $lifetime ; sleep $lifetime
|
||||||
ls -l /etc/forgejo-runner/$serial
|
ls -l /etc/forgejo-runner/$serial
|
||||||
|
|
|
||||||
|
|
@ -6,16 +6,17 @@ name: issue-labels
|
||||||
on:
|
on:
|
||||||
pull_request_target:
|
pull_request_target:
|
||||||
types:
|
types:
|
||||||
|
- opened
|
||||||
- edited
|
- edited
|
||||||
- synchronize
|
- synchronize
|
||||||
- labeled
|
- labeled
|
||||||
|
|
||||||
env:
|
env:
|
||||||
RNA_VERSION: v1.4.0 # renovate: datasource=forgejo-releases depName=forgejo/release-notes-assistant registryUrl=https://code.forgejo.org
|
RNA_VERSION: v1.4.1 # renovate: datasource=forgejo-releases depName=forgejo/release-notes-assistant
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
release-notes:
|
release-notes:
|
||||||
if: vars.ROLE == 'forgejo-coding'
|
if: vars.ROLE == 'forgejo-coding' && !contains(forge.event.pull_request.labels.*.name, 'Kind/DependencyUpdate')
|
||||||
runs-on: docker
|
runs-on: docker
|
||||||
container:
|
container:
|
||||||
image: 'data.forgejo.org/oci/ci:1'
|
image: 'data.forgejo.org/oci/ci:1'
|
||||||
|
|
|
||||||
|
|
@ -59,7 +59,7 @@ jobs:
|
||||||
|
|
||||||
- run: make build
|
- run: make build
|
||||||
|
|
||||||
- uses: https://code.forgejo.org/actions/upload-artifact@v3
|
- uses: https://data.forgejo.org/actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: forgejo-runner
|
name: forgejo-runner
|
||||||
path: forgejo-runner
|
path: forgejo-runner
|
||||||
|
|
@ -79,9 +79,9 @@ jobs:
|
||||||
needs: [build-and-tests]
|
needs: [build-and-tests]
|
||||||
steps:
|
steps:
|
||||||
|
|
||||||
- uses: actions/checkout@v4
|
- uses: https://data.forgejo.org/actions/checkout@v4
|
||||||
|
|
||||||
- uses: https://code.forgejo.org/actions/download-artifact@v3
|
- uses: https://data.forgejo.org/actions/download-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: forgejo-runner
|
name: forgejo-runner
|
||||||
|
|
||||||
|
|
@ -156,9 +156,9 @@ jobs:
|
||||||
needs: [build-and-tests]
|
needs: [build-and-tests]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: https://data.forgejo.org/actions/checkout@v4
|
||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: https://data.forgejo.org/actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version-file: go.mod
|
go-version-file: go.mod
|
||||||
|
|
||||||
|
|
@ -185,6 +185,11 @@ jobs:
|
||||||
EOF
|
EOF
|
||||||
apt --quiet install --yes -qq docker.io make
|
apt --quiet install --yes -qq docker.io make
|
||||||
|
|
||||||
|
- name: install LXC
|
||||||
|
run: |
|
||||||
|
act/runner/lxc-helpers.sh lxc_prepare_environment
|
||||||
|
act/runner/lxc-helpers.sh lxc_install_lxc_inside 10.39.28 fdb1
|
||||||
|
|
||||||
- run: apt-get -q install -qq -y gcc # required for `-race`
|
- run: apt-get -q install -qq -y gcc # required for `-race`
|
||||||
|
|
||||||
- run: make integration-test
|
- run: make integration-test
|
||||||
|
|
@ -217,3 +222,53 @@ jobs:
|
||||||
git diff --color=always
|
git diff --color=always
|
||||||
exit 1
|
exit 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
validate-pre-commit:
|
||||||
|
name: validate pre-commit-hooks file
|
||||||
|
if: vars.ROLE == 'forgejo-coding'
|
||||||
|
runs-on: docker
|
||||||
|
container:
|
||||||
|
image: 'code.forgejo.org/oci/ci:1'
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: https://data.forgejo.org/actions/checkout@v4
|
||||||
|
|
||||||
|
- name: install pre-commit
|
||||||
|
env:
|
||||||
|
DEBIAN_FRONTEND: noninteractive
|
||||||
|
PIP_ROOT_USER_ACTION: ignore
|
||||||
|
PIP_BREAK_SYSTEM_PACKAGES: 1
|
||||||
|
PIP_PROGRESS_BAR: off
|
||||||
|
run: |
|
||||||
|
apt-get update -qq
|
||||||
|
apt-get -q install -qq -y python3-pip
|
||||||
|
python3 -m pip install 'pre-commit>=3.2.0'
|
||||||
|
|
||||||
|
- name: validate .pre-commit-hooks.yaml
|
||||||
|
run: pre-commit validate-manifest .pre-commit-hooks.yaml
|
||||||
|
|
||||||
|
# Will fail due to `act/runner/testdata/local-action-fails-schema-validation/action/action.yml`
|
||||||
|
- name: check pre-commit hook against local action files (should fail)
|
||||||
|
continue-on-error: true
|
||||||
|
run: |
|
||||||
|
pre-commit try-repo --all-files --verbose . forgejo-runner-validate
|
||||||
|
|
||||||
|
- name: check that a bad workflow file doesn’t validate (should fail)
|
||||||
|
continue-on-error: true
|
||||||
|
run: |
|
||||||
|
mkdir -p test-repo
|
||||||
|
cd test-repo
|
||||||
|
git config set advice.defaultBranchName false
|
||||||
|
git init --quiet
|
||||||
|
mkdir -p .forgejo/workflows
|
||||||
|
cp ../act/runner/testdata/local-action-fails-schema-validation/action/action.yml ./
|
||||||
|
touch .forgejo/workflows/bad-workflow.yml
|
||||||
|
cat > .pre-commit-config.yaml <<EOF
|
||||||
|
repos:
|
||||||
|
- repo: ..
|
||||||
|
rev: ${{ forge.sha }}
|
||||||
|
hooks:
|
||||||
|
- id: forgejo-runner-validate
|
||||||
|
EOF
|
||||||
|
git add .
|
||||||
|
pre-commit run --all-files --verbose forgejo-runner-validate
|
||||||
|
|
|
||||||
151
.github/workflows/build-release.yml
vendored
151
.github/workflows/build-release.yml
vendored
|
|
@ -1,151 +0,0 @@
|
||||||
# This workflow:
|
|
||||||
# - builds and uploads a binary artifact for each Windows architecture
|
|
||||||
# - tests the runner on Windows with a Forgejo server container running on Windows Subsystem for Linux (WSL)
|
|
||||||
# - releases the binary artifact (if triggered on a pushed tag)
|
|
||||||
#
|
|
||||||
# This build is currently supported on https://github.com/Crown0815/forgejo-runner-windows
|
|
||||||
|
|
||||||
name: Build Release
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
tags: ['v*']
|
|
||||||
branches: [ main ]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
name: Build ${{matrix.architecture}}
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
architecture: ['386', amd64, arm, arm64]
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
- name: Build for ${{matrix.architecture}}
|
|
||||||
run: |
|
|
||||||
env GOOS=windows GOARCH=${{matrix.architecture}} \
|
|
||||||
go build \
|
|
||||||
-ldflags "-s -w -X code.forgejo.org/forgejo/runner/internal/pkg/ver.version=${{ github.ref_name }}" \
|
|
||||||
-o forgejo-runner-windows-${{matrix.architecture}}.exe
|
|
||||||
|
|
||||||
- uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: forgejo-runner-windows-${{matrix.architecture}}
|
|
||||||
path: forgejo-runner-windows-${{matrix.architecture}}.exe
|
|
||||||
|
|
||||||
|
|
||||||
test:
|
|
||||||
name: Run Tests on Windows with Linux Forgejo Server
|
|
||||||
runs-on: windows-latest
|
|
||||||
env:
|
|
||||||
FORGEJO_ROOT_URL: 'http://localhost:3000/'
|
|
||||||
FORGEJO_ADMIN_USER: 'admin_user'
|
|
||||||
FORGEJO_ADMIN_PASSWORD: 'admin_password'
|
|
||||||
FORGEJO_RUNNER_SECRET: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
|
|
||||||
MAX_WAIT_ITERATIONS: 30
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Windows - Checkout code
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Windows - Setup Windows Subsystem for Linux (WSL)
|
|
||||||
uses: Vampire/setup-wsl@v5
|
|
||||||
with:
|
|
||||||
distribution: Alpine
|
|
||||||
wsl-shell-user: root
|
|
||||||
additional-packages: bash
|
|
||||||
|
|
||||||
- name: WSL - Install Docker
|
|
||||||
shell: wsl-bash {0}
|
|
||||||
run: |
|
|
||||||
apk --update add --no-cache docker curl
|
|
||||||
|
|
||||||
rc-update add docker default
|
|
||||||
openrc default
|
|
||||||
|
|
||||||
# Wait for Docker to be ready
|
|
||||||
i=0
|
|
||||||
until docker info > /dev/null 2>&1 || (( i == ${{ env.MAX_WAIT_ITERATIONS }} )); do
|
|
||||||
echo "Waiting for Docker to be ready... ($(( ++i ))/${{ env.MAX_WAIT_ITERATIONS }})"
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
[ $i -lt ${{ env.MAX_WAIT_ITERATIONS }} ] && echo "Docker is ready!" || { echo "Timed out waiting for Docker" ; exit 1; }
|
|
||||||
|
|
||||||
- name: WSL - Start Forgejo Server
|
|
||||||
shell: wsl-bash {0}
|
|
||||||
run: |
|
|
||||||
docker run -d --name forgejo \
|
|
||||||
-p 3000:3000 \
|
|
||||||
-e USER_UID=1000 \
|
|
||||||
-e USER_GID=1000 \
|
|
||||||
-e FORGEJO__security__INSTALL_LOCK=true \
|
|
||||||
-e FORGEJO__server__DOMAIN=localhost \
|
|
||||||
-e FORGEJO__server__ROOT_URL=${{ env.FORGEJO_ROOT_URL }} \
|
|
||||||
codeberg.org/forgejo/forgejo:11.0-rootless
|
|
||||||
|
|
||||||
- name: Windows - Set up Go
|
|
||||||
uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version-file: go.mod
|
|
||||||
|
|
||||||
- name: Windows - Install dependencies
|
|
||||||
run: go mod download
|
|
||||||
|
|
||||||
- name: WSL - Register Runner on Forgejo Server
|
|
||||||
# Starting the Forgejo server takes some time.
|
|
||||||
# That time used to install go.
|
|
||||||
shell: wsl-bash {0}
|
|
||||||
run: |
|
|
||||||
i=0
|
|
||||||
until curl -s ${{ env.FORGEJO_ROOT_URL }}/api/v1/version > /dev/null || (( i == ${{ env.MAX_WAIT_ITERATIONS }} )); do
|
|
||||||
echo "Waiting for Forgejo to be ready... ($(( ++i ))/${{ env.MAX_WAIT_ITERATIONS }})"
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
[ $i -lt ${{ env.MAX_WAIT_ITERATIONS }} ] && echo "Forgejo is ready!" || { echo "Timed out waiting for Forgejo" ; exit 1; }
|
|
||||||
|
|
||||||
# Create admin user and generate runner token
|
|
||||||
docker exec forgejo forgejo admin user create --admin --username ${{ env.FORGEJO_ADMIN_USER }} --password ${{ env.FORGEJO_ADMIN_PASSWORD }} --email root@example.com
|
|
||||||
docker exec forgejo forgejo forgejo-cli actions register --labels docker --name therunner --secret ${{ env.FORGEJO_RUNNER_SECRET }}
|
|
||||||
|
|
||||||
- name: Windows - Connect to Forgejo server
|
|
||||||
run: |
|
|
||||||
$configFileContent = @"
|
|
||||||
log:
|
|
||||||
level: debug
|
|
||||||
runner:
|
|
||||||
labels:
|
|
||||||
- windows:host
|
|
||||||
- docker:docker://node:20
|
|
||||||
"@
|
|
||||||
Set-Content -Path temporaryConfig.yml -Value $configFileContent
|
|
||||||
|
|
||||||
# Register the runner
|
|
||||||
go run main.go create-runner-file --config temporaryConfig.yml --instance ${{ env.FORGEJO_ROOT_URL }} --secret ${{ env.FORGEJO_RUNNER_SECRET }} --name "windows-test-runner"
|
|
||||||
|
|
||||||
- name: Windows - Run tests
|
|
||||||
run: go test -v ./...
|
|
||||||
env:
|
|
||||||
FORGEJO_URL: ${{ env.FORGEJO_ROOT_URL }}
|
|
||||||
FORGEJO_RUNNER_SECRET: ${{ env.FORGEJO_RUNNER_SECRET }}
|
|
||||||
FORGEJO_RUNNER_HEX_SECRET: ${{ env.FORGEJO_RUNNER_SECRET }}
|
|
||||||
|
|
||||||
|
|
||||||
release:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: [build, test]
|
|
||||||
if: github.event_name == 'push' && github.ref_type == 'tag'
|
|
||||||
steps:
|
|
||||||
- uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
path: .
|
|
||||||
|
|
||||||
- name: Create Release
|
|
||||||
uses: softprops/action-gh-release@v2
|
|
||||||
with:
|
|
||||||
tag_name: ${{ github.ref_name }}
|
|
||||||
files: forgejo-runner-windows-*/forgejo-runner-windows-*.exe
|
|
||||||
draft: false
|
|
||||||
prerelease: ${{ contains(github.ref, 'beta') || contains(github.ref, 'alpha') }}
|
|
||||||
token: ${{ secrets.RELEASE_TOKEN }}
|
|
||||||
fail_on_unmatched_files: true
|
|
||||||
body: See [original release notes](https://code.forgejo.org/forgejo/runner/releases/tag/${{ github.ref_name }}).
|
|
||||||
60
.goreleaser.yaml
Normal file
60
.goreleaser.yaml
Normal file
|
|
@ -0,0 +1,60 @@
|
||||||
|
version: 2
|
||||||
|
|
||||||
|
before:
|
||||||
|
hooks:
|
||||||
|
- go mod download
|
||||||
|
|
||||||
|
builds:
|
||||||
|
- env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos:
|
||||||
|
- linux
|
||||||
|
- darwin
|
||||||
|
- windows
|
||||||
|
goarch:
|
||||||
|
- amd64
|
||||||
|
- arm64
|
||||||
|
|
||||||
|
archives:
|
||||||
|
- formats: [binary]
|
||||||
|
# this name template makes the OS and Arch compatible with the results of `uname`.
|
||||||
|
name_template: >-
|
||||||
|
{{ .ProjectName }}_
|
||||||
|
{{- title .Os }}_
|
||||||
|
{{- if eq .Arch "amd64" }}x86_64
|
||||||
|
{{- else if eq .Arch "386" }}i386
|
||||||
|
{{- else }}{{ .Arch }}{{ end }}
|
||||||
|
{{- if .Arm }}v{{ .Arm }}{{ end }}
|
||||||
|
|
||||||
|
changelog:
|
||||||
|
abbrev: 10
|
||||||
|
filters:
|
||||||
|
exclude:
|
||||||
|
- "^docs:"
|
||||||
|
- "^test:"
|
||||||
|
format: "{{.SHA}}: {{.Message}}"
|
||||||
|
groups:
|
||||||
|
- title: Features
|
||||||
|
regexp: '^.*?feat(\([[:word:]]+\))??!?:.+$'
|
||||||
|
order: 0
|
||||||
|
- title: "Bug fixes"
|
||||||
|
regexp: '^.*?fix(\([[:word:]]+\))??!?:.+$'
|
||||||
|
order: 1
|
||||||
|
- title: "Chores"
|
||||||
|
regexp: '^.*?chore(\([[:word:]]+\))??!?:.+$'
|
||||||
|
order: 2
|
||||||
|
- title: Others
|
||||||
|
order: 999
|
||||||
|
sort: asc
|
||||||
|
|
||||||
|
release:
|
||||||
|
gitea:
|
||||||
|
owner: DevFW-CICD
|
||||||
|
name: runner
|
||||||
|
|
||||||
|
force_token: gitea
|
||||||
|
gitea_urls:
|
||||||
|
api: https://edp.buildth.ing/api/v1
|
||||||
|
download: https://edp.buildth.ing
|
||||||
|
# set to true if you use a self-signed certificate
|
||||||
|
skip_tls_verify: false
|
||||||
13
.pre-commit-hooks.yaml
Normal file
13
.pre-commit-hooks.yaml
Normal file
|
|
@ -0,0 +1,13 @@
|
||||||
|
- id: forgejo-runner-validate
|
||||||
|
name: Validate Forgejo Actions files
|
||||||
|
description: This hook validates Forgejo Actions action and workflow files.
|
||||||
|
language: golang
|
||||||
|
entry: runner validate
|
||||||
|
args: ['--directory', '.']
|
||||||
|
pass_filenames: false
|
||||||
|
files: (?:(?:^|/)action|^\.(?:forgejo|github|gitea)/workflows/[^/\n]+)\.ya?ml$
|
||||||
|
types: [yaml]
|
||||||
|
# 3.2.0 is when the pre-* `stages` used here were added.
|
||||||
|
# Old names (without the pre- prefix) are deprecated since 4.0.0.
|
||||||
|
minimum_pre_commit_version: '3.2.0'
|
||||||
|
stages: [pre-commit, pre-merge-commit, pre-push, manual]
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
FROM --platform=$BUILDPLATFORM data.forgejo.org/oci/xx AS xx
|
FROM --platform=$BUILDPLATFORM data.forgejo.org/oci/xx AS xx
|
||||||
|
|
||||||
FROM --platform=$BUILDPLATFORM data.forgejo.org/oci/golang:1.24-alpine3.22 AS build-env
|
FROM --platform=$BUILDPLATFORM data.forgejo.org/oci/golang:1.25-alpine3.22 AS build-env
|
||||||
|
|
||||||
#
|
#
|
||||||
# Transparently cross compile for the target platform
|
# Transparently cross compile for the target platform
|
||||||
|
|
|
||||||
2
Makefile
2
Makefile
|
|
@ -14,7 +14,7 @@ GO_FMT_FILES := $(shell find . -type f -name "*.go" ! -name "generated.*")
|
||||||
GOFILES := $(shell find . -type f -name "*.go" -o -name "go.mod" ! -name "generated.*")
|
GOFILES := $(shell find . -type f -name "*.go" -o -name "go.mod" ! -name "generated.*")
|
||||||
|
|
||||||
MOCKERY_PACKAGE ?= github.com/vektra/mockery/v2@v2.53.5 # renovate: datasource=go
|
MOCKERY_PACKAGE ?= github.com/vektra/mockery/v2@v2.53.5 # renovate: datasource=go
|
||||||
GOLANGCI_LINT_PACKAGE ?= github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.4.0 # renovate: datasource=go
|
GOLANGCI_LINT_PACKAGE ?= github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.5.0 # renovate: datasource=go
|
||||||
|
|
||||||
DOCKER_IMAGE ?= gitea/act_runner
|
DOCKER_IMAGE ?= gitea/act_runner
|
||||||
DOCKER_TAG ?= nightly
|
DOCKER_TAG ?= nightly
|
||||||
|
|
|
||||||
|
|
@ -19,12 +19,13 @@ import (
|
||||||
|
|
||||||
//go:generate mockery --inpackage --name caches
|
//go:generate mockery --inpackage --name caches
|
||||||
type caches interface {
|
type caches interface {
|
||||||
openDB() (*bolthold.Store, error)
|
getDB() *bolthold.Store
|
||||||
validateMac(rundata RunData) (string, error)
|
validateMac(rundata RunData) (string, error)
|
||||||
readCache(id uint64, repo string) (*Cache, error)
|
readCache(id uint64, repo string) (*Cache, error)
|
||||||
useCache(id uint64) error
|
useCache(id uint64) error
|
||||||
setgcAt(at time.Time)
|
setgcAt(at time.Time)
|
||||||
gcCache()
|
gcCache()
|
||||||
|
close()
|
||||||
|
|
||||||
serve(w http.ResponseWriter, r *http.Request, id uint64)
|
serve(w http.ResponseWriter, r *http.Request, id uint64)
|
||||||
commit(id uint64, size int64) (int64, error)
|
commit(id uint64, size int64) (int64, error)
|
||||||
|
|
@ -38,6 +39,8 @@ type cachesImpl struct {
|
||||||
logger logrus.FieldLogger
|
logger logrus.FieldLogger
|
||||||
secret string
|
secret string
|
||||||
|
|
||||||
|
db *bolthold.Store
|
||||||
|
|
||||||
gcing atomic.Bool
|
gcing atomic.Bool
|
||||||
gcAt time.Time
|
gcAt time.Time
|
||||||
}
|
}
|
||||||
|
|
@ -68,12 +71,6 @@ func newCaches(dir, secret string, logger logrus.FieldLogger) (caches, error) {
|
||||||
}
|
}
|
||||||
c.storage = storage
|
c.storage = storage
|
||||||
|
|
||||||
c.gcCache()
|
|
||||||
|
|
||||||
return c, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *cachesImpl) openDB() (*bolthold.Store, error) {
|
|
||||||
file := filepath.Join(c.dir, "bolt.db")
|
file := filepath.Join(c.dir, "bolt.db")
|
||||||
db, err := bolthold.Open(file, 0o644, &bolthold.Options{
|
db, err := bolthold.Open(file, 0o644, &bolthold.Options{
|
||||||
Encoder: json.Marshal,
|
Encoder: json.Marshal,
|
||||||
|
|
@ -87,7 +84,22 @@ func (c *cachesImpl) openDB() (*bolthold.Store, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Open(%s): %w", file, err)
|
return nil, fmt.Errorf("Open(%s): %w", file, err)
|
||||||
}
|
}
|
||||||
return db, nil
|
c.db = db
|
||||||
|
|
||||||
|
c.gcCache()
|
||||||
|
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cachesImpl) close() {
|
||||||
|
if c.db != nil {
|
||||||
|
c.db.Close()
|
||||||
|
c.db = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cachesImpl) getDB() *bolthold.Store {
|
||||||
|
return c.db
|
||||||
}
|
}
|
||||||
|
|
||||||
var findCacheWithIsolationKeyFallback = func(db *bolthold.Store, repo string, keys []string, version, writeIsolationKey string) (*Cache, error) {
|
var findCacheWithIsolationKeyFallback = func(db *bolthold.Store, repo string, keys []string, version, writeIsolationKey string) (*Cache, error) {
|
||||||
|
|
@ -156,11 +168,7 @@ func insertCache(db *bolthold.Store, cache *Cache) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cachesImpl) readCache(id uint64, repo string) (*Cache, error) {
|
func (c *cachesImpl) readCache(id uint64, repo string) (*Cache, error) {
|
||||||
db, err := c.openDB()
|
db := c.getDB()
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer db.Close()
|
|
||||||
cache := &Cache{}
|
cache := &Cache{}
|
||||||
if err := db.Get(id, cache); err != nil {
|
if err := db.Get(id, cache); err != nil {
|
||||||
return nil, fmt.Errorf("readCache: Get(%v): %w", id, err)
|
return nil, fmt.Errorf("readCache: Get(%v): %w", id, err)
|
||||||
|
|
@ -173,11 +181,7 @@ func (c *cachesImpl) readCache(id uint64, repo string) (*Cache, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cachesImpl) useCache(id uint64) error {
|
func (c *cachesImpl) useCache(id uint64) error {
|
||||||
db, err := c.openDB()
|
db := c.getDB()
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer db.Close()
|
|
||||||
cache := &Cache{}
|
cache := &Cache{}
|
||||||
if err := db.Get(id, cache); err != nil {
|
if err := db.Get(id, cache); err != nil {
|
||||||
return fmt.Errorf("useCache: Get(%v): %w", id, err)
|
return fmt.Errorf("useCache: Get(%v): %w", id, err)
|
||||||
|
|
@ -232,12 +236,7 @@ func (c *cachesImpl) gcCache() {
|
||||||
c.gcAt = time.Now()
|
c.gcAt = time.Now()
|
||||||
c.logger.Debugf("gc: %v", c.gcAt.String())
|
c.logger.Debugf("gc: %v", c.gcAt.String())
|
||||||
|
|
||||||
db, err := c.openDB()
|
db := c.getDB()
|
||||||
if err != nil {
|
|
||||||
fatal(c.logger, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer db.Close()
|
|
||||||
|
|
||||||
// Remove the caches which are not completed for a while, they are most likely to be broken.
|
// Remove the caches which are not completed for a while, they are most likely to be broken.
|
||||||
var caches []*Cache
|
var caches []*Cache
|
||||||
|
|
|
||||||
|
|
@ -14,6 +14,7 @@ import (
|
||||||
func TestCacheReadWrite(t *testing.T) {
|
func TestCacheReadWrite(t *testing.T) {
|
||||||
caches, err := newCaches(t.TempDir(), "secret", logrus.New())
|
caches, err := newCaches(t.TempDir(), "secret", logrus.New())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer caches.close()
|
||||||
t.Run("NotFound", func(t *testing.T) {
|
t.Run("NotFound", func(t *testing.T) {
|
||||||
found, err := caches.readCache(456, "repo")
|
found, err := caches.readCache(456, "repo")
|
||||||
assert.Nil(t, found)
|
assert.Nil(t, found)
|
||||||
|
|
@ -33,9 +34,7 @@ func TestCacheReadWrite(t *testing.T) {
|
||||||
cache.Repo = repo
|
cache.Repo = repo
|
||||||
|
|
||||||
t.Run("Insert", func(t *testing.T) {
|
t.Run("Insert", func(t *testing.T) {
|
||||||
db, err := caches.openDB()
|
db := caches.getDB()
|
||||||
require.NoError(t, err)
|
|
||||||
defer db.Close()
|
|
||||||
assert.NoError(t, insertCache(db, cache))
|
assert.NoError(t, insertCache(db, cache))
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -9,7 +9,6 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/julienschmidt/httprouter"
|
"github.com/julienschmidt/httprouter"
|
||||||
|
|
@ -25,7 +24,7 @@ const (
|
||||||
|
|
||||||
var fatal = func(logger logrus.FieldLogger, err error) {
|
var fatal = func(logger logrus.FieldLogger, err error) {
|
||||||
logger.Errorf("unrecoverable error in the cache: %v", err)
|
logger.Errorf("unrecoverable error in the cache: %v", err)
|
||||||
if err := syscall.Kill(syscall.Getpid(), syscall.SIGTERM); err != nil {
|
if err := suicide(); err != nil {
|
||||||
logger.Errorf("unrecoverable error in the cache: failed to send the TERM signal to shutdown the daemon %v", err)
|
logger.Errorf("unrecoverable error in the cache: failed to send the TERM signal to shutdown the daemon %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -122,6 +121,10 @@ func (h *handler) Close() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
var retErr error
|
var retErr error
|
||||||
|
if h.caches != nil {
|
||||||
|
h.caches.close()
|
||||||
|
h.caches = nil
|
||||||
|
}
|
||||||
if h.server != nil {
|
if h.server != nil {
|
||||||
err := h.server.Close()
|
err := h.server.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -151,6 +154,9 @@ func (h *handler) getCaches() caches {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) setCaches(caches caches) {
|
func (h *handler) setCaches(caches caches) {
|
||||||
|
if h.caches != nil {
|
||||||
|
h.caches.close()
|
||||||
|
}
|
||||||
h.caches = caches
|
h.caches = caches
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -170,12 +176,7 @@ func (h *handler) find(w http.ResponseWriter, r *http.Request, params httprouter
|
||||||
}
|
}
|
||||||
version := r.URL.Query().Get("version")
|
version := r.URL.Query().Get("version")
|
||||||
|
|
||||||
db, err := h.caches.openDB()
|
db := h.caches.getDB()
|
||||||
if err != nil {
|
|
||||||
h.responseFatalJSON(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer db.Close()
|
|
||||||
|
|
||||||
cache, err := findCacheWithIsolationKeyFallback(db, repo, keys, version, rundata.WriteIsolationKey)
|
cache, err := findCacheWithIsolationKeyFallback(db, repo, keys, version, rundata.WriteIsolationKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -221,12 +222,7 @@ func (h *handler) reserve(w http.ResponseWriter, r *http.Request, params httprou
|
||||||
api.Key = strings.ToLower(api.Key)
|
api.Key = strings.ToLower(api.Key)
|
||||||
|
|
||||||
cache := api.ToCache()
|
cache := api.ToCache()
|
||||||
db, err := h.caches.openDB()
|
db := h.caches.getDB()
|
||||||
if err != nil {
|
|
||||||
h.responseFatalJSON(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer db.Close()
|
|
||||||
|
|
||||||
now := time.Now().Unix()
|
now := time.Now().Unix()
|
||||||
cache.CreatedAt = now
|
cache.CreatedAt = now
|
||||||
|
|
@ -335,12 +331,7 @@ func (h *handler) commit(w http.ResponseWriter, r *http.Request, params httprout
|
||||||
// write real size back to cache, it may be different from the current value when the request doesn't specify it.
|
// write real size back to cache, it may be different from the current value when the request doesn't specify it.
|
||||||
cache.Size = size
|
cache.Size = size
|
||||||
|
|
||||||
db, err := h.caches.openDB()
|
db := h.caches.getDB()
|
||||||
if err != nil {
|
|
||||||
h.responseFatalJSON(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer db.Close()
|
|
||||||
|
|
||||||
cache.Complete = true
|
cache.Complete = true
|
||||||
if err := db.Update(cache.ID, cache); err != nil {
|
if err := db.Update(cache.ID, cache); err != nil {
|
||||||
|
|
|
||||||
|
|
@ -78,9 +78,7 @@ func TestHandler(t *testing.T) {
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
t.Run("inspect db", func(t *testing.T) {
|
t.Run("inspect db", func(t *testing.T) {
|
||||||
db, err := handler.getCaches().openDB()
|
db := handler.getCaches().getDB()
|
||||||
require.NoError(t, err)
|
|
||||||
defer db.Close()
|
|
||||||
require.NoError(t, db.Bolt().View(func(tx *bbolt.Tx) error {
|
require.NoError(t, db.Bolt().View(func(tx *bbolt.Tx) error {
|
||||||
return tx.Bucket([]byte("Cache")).ForEach(func(k, v []byte) error {
|
return tx.Bucket([]byte("Cache")).ForEach(func(k, v []byte) error {
|
||||||
t.Logf("%s: %s", k, v)
|
t.Logf("%s: %s", k, v)
|
||||||
|
|
@ -937,40 +935,11 @@ func TestHandlerAPIFatalErrors(t *testing.T) {
|
||||||
handler.find(w, req, nil)
|
handler.find(w, req, nil)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
|
||||||
name: "find open",
|
|
||||||
caches: func(t *testing.T, message string) caches {
|
|
||||||
caches := newMockCaches(t)
|
|
||||||
caches.On("validateMac", RunData{}).Return(cacheRepo, nil)
|
|
||||||
caches.On("openDB", mock.Anything, mock.Anything).Return(nil, errors.New(message))
|
|
||||||
return caches
|
|
||||||
},
|
|
||||||
call: func(t *testing.T, handler Handler, w http.ResponseWriter) {
|
|
||||||
req, err := http.NewRequest("GET", "example.com/cache", nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
handler.find(w, req, nil)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "reserve",
|
|
||||||
caches: func(t *testing.T, message string) caches {
|
|
||||||
caches := newMockCaches(t)
|
|
||||||
caches.On("validateMac", RunData{}).Return(cacheRepo, nil)
|
|
||||||
caches.On("openDB", mock.Anything, mock.Anything).Return(nil, errors.New(message))
|
|
||||||
return caches
|
|
||||||
},
|
|
||||||
call: func(t *testing.T, handler Handler, w http.ResponseWriter) {
|
|
||||||
body, err := json.Marshal(&Request{})
|
|
||||||
require.NoError(t, err)
|
|
||||||
req, err := http.NewRequest("POST", "example.com/caches", bytes.NewReader(body))
|
|
||||||
require.NoError(t, err)
|
|
||||||
handler.reserve(w, req, nil)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
name: "upload",
|
name: "upload",
|
||||||
caches: func(t *testing.T, message string) caches {
|
caches: func(t *testing.T, message string) caches {
|
||||||
caches := newMockCaches(t)
|
caches := newMockCaches(t)
|
||||||
|
caches.On("close").Return()
|
||||||
caches.On("validateMac", RunData{}).Return(cacheRepo, nil)
|
caches.On("validateMac", RunData{}).Return(cacheRepo, nil)
|
||||||
caches.On("readCache", mock.Anything, mock.Anything).Return(nil, errors.New(message))
|
caches.On("readCache", mock.Anything, mock.Anything).Return(nil, errors.New(message))
|
||||||
return caches
|
return caches
|
||||||
|
|
@ -988,6 +957,7 @@ func TestHandlerAPIFatalErrors(t *testing.T) {
|
||||||
name: "commit",
|
name: "commit",
|
||||||
caches: func(t *testing.T, message string) caches {
|
caches: func(t *testing.T, message string) caches {
|
||||||
caches := newMockCaches(t)
|
caches := newMockCaches(t)
|
||||||
|
caches.On("close").Return()
|
||||||
caches.On("validateMac", RunData{}).Return(cacheRepo, nil)
|
caches.On("validateMac", RunData{}).Return(cacheRepo, nil)
|
||||||
caches.On("readCache", mock.Anything, mock.Anything).Return(nil, errors.New(message))
|
caches.On("readCache", mock.Anything, mock.Anything).Return(nil, errors.New(message))
|
||||||
return caches
|
return caches
|
||||||
|
|
@ -1005,6 +975,7 @@ func TestHandlerAPIFatalErrors(t *testing.T) {
|
||||||
name: "get",
|
name: "get",
|
||||||
caches: func(t *testing.T, message string) caches {
|
caches: func(t *testing.T, message string) caches {
|
||||||
caches := newMockCaches(t)
|
caches := newMockCaches(t)
|
||||||
|
caches.On("close").Return()
|
||||||
caches.On("validateMac", RunData{}).Return(cacheRepo, nil)
|
caches.On("validateMac", RunData{}).Return(cacheRepo, nil)
|
||||||
caches.On("readCache", mock.Anything, mock.Anything).Return(nil, errors.New(message))
|
caches.On("readCache", mock.Anything, mock.Anything).Return(nil, errors.New(message))
|
||||||
return caches
|
return caches
|
||||||
|
|
@ -1042,10 +1013,12 @@ func TestHandlerAPIFatalErrors(t *testing.T) {
|
||||||
dir := filepath.Join(t.TempDir(), "artifactcache")
|
dir := filepath.Join(t.TempDir(), "artifactcache")
|
||||||
handler, err := StartHandler(dir, "", 0, "secret", nil)
|
handler, err := StartHandler(dir, "", 0, "secret", nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer handler.Close()
|
||||||
|
|
||||||
fatalMessage = "<unset>"
|
fatalMessage = "<unset>"
|
||||||
|
|
||||||
handler.setCaches(testCase.caches(t, message))
|
caches := testCase.caches(t, message) // doesn't need to be closed because it will be given to handler
|
||||||
|
handler.setCaches(caches)
|
||||||
|
|
||||||
w := httptest.NewRecorder()
|
w := httptest.NewRecorder()
|
||||||
testCase.call(t, handler, w)
|
testCase.call(t, handler, w)
|
||||||
|
|
@ -1138,18 +1111,15 @@ func TestHandler_gcCache(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
db, err := handler.getCaches().openDB()
|
db := handler.getCaches().getDB()
|
||||||
require.NoError(t, err)
|
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
require.NoError(t, insertCache(db, c.Cache))
|
require.NoError(t, insertCache(db, c.Cache))
|
||||||
}
|
}
|
||||||
require.NoError(t, db.Close())
|
|
||||||
|
|
||||||
handler.getCaches().setgcAt(time.Time{}) // ensure gcCache will not skip
|
handler.getCaches().setgcAt(time.Time{}) // ensure gcCache will not skip
|
||||||
handler.getCaches().gcCache()
|
handler.getCaches().gcCache()
|
||||||
|
|
||||||
db, err = handler.getCaches().openDB()
|
db = handler.getCaches().getDB()
|
||||||
require.NoError(t, err)
|
|
||||||
for i, v := range cases {
|
for i, v := range cases {
|
||||||
t.Run(fmt.Sprintf("%d_%s", i, v.Cache.Key), func(t *testing.T) {
|
t.Run(fmt.Sprintf("%d_%s", i, v.Cache.Key), func(t *testing.T) {
|
||||||
cache := &Cache{}
|
cache := &Cache{}
|
||||||
|
|
@ -1161,7 +1131,6 @@ func TestHandler_gcCache(t *testing.T) {
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
require.NoError(t, db.Close())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHandler_ExternalURL(t *testing.T) {
|
func TestHandler_ExternalURL(t *testing.T) {
|
||||||
|
|
|
||||||
|
|
@ -19,6 +19,11 @@ type mockCaches struct {
|
||||||
mock.Mock
|
mock.Mock
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// close provides a mock function with no fields
|
||||||
|
func (_m *mockCaches) close() {
|
||||||
|
_m.Called()
|
||||||
|
}
|
||||||
|
|
||||||
// commit provides a mock function with given fields: id, size
|
// commit provides a mock function with given fields: id, size
|
||||||
func (_m *mockCaches) commit(id uint64, size int64) (int64, error) {
|
func (_m *mockCaches) commit(id uint64, size int64) (int64, error) {
|
||||||
ret := _m.Called(id, size)
|
ret := _m.Called(id, size)
|
||||||
|
|
@ -80,19 +85,15 @@ func (_m *mockCaches) gcCache() {
|
||||||
_m.Called()
|
_m.Called()
|
||||||
}
|
}
|
||||||
|
|
||||||
// openDB provides a mock function with no fields
|
// getDB provides a mock function with no fields
|
||||||
func (_m *mockCaches) openDB() (*bolthold.Store, error) {
|
func (_m *mockCaches) getDB() *bolthold.Store {
|
||||||
ret := _m.Called()
|
ret := _m.Called()
|
||||||
|
|
||||||
if len(ret) == 0 {
|
if len(ret) == 0 {
|
||||||
panic("no return value specified for openDB")
|
panic("no return value specified for getDB")
|
||||||
}
|
}
|
||||||
|
|
||||||
var r0 *bolthold.Store
|
var r0 *bolthold.Store
|
||||||
var r1 error
|
|
||||||
if rf, ok := ret.Get(0).(func() (*bolthold.Store, error)); ok {
|
|
||||||
return rf()
|
|
||||||
}
|
|
||||||
if rf, ok := ret.Get(0).(func() *bolthold.Store); ok {
|
if rf, ok := ret.Get(0).(func() *bolthold.Store); ok {
|
||||||
r0 = rf()
|
r0 = rf()
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -101,13 +102,7 @@ func (_m *mockCaches) openDB() (*bolthold.Store, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if rf, ok := ret.Get(1).(func() error); ok {
|
return r0
|
||||||
r1 = rf()
|
|
||||||
} else {
|
|
||||||
r1 = ret.Error(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
return r0, r1
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// readCache provides a mock function with given fields: id, repo
|
// readCache provides a mock function with given fields: id, repo
|
||||||
|
|
|
||||||
9
act/artifactcache/utils.go
Normal file
9
act/artifactcache/utils.go
Normal file
|
|
@ -0,0 +1,9 @@
|
||||||
|
//go:build !windows
|
||||||
|
|
||||||
|
package artifactcache
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
func suicide() error {
|
||||||
|
return syscall.Kill(syscall.Getpid(), syscall.SIGTERM)
|
||||||
|
}
|
||||||
14
act/artifactcache/utils_windows.go
Normal file
14
act/artifactcache/utils_windows.go
Normal file
|
|
@ -0,0 +1,14 @@
|
||||||
|
//go:build windows
|
||||||
|
|
||||||
|
package artifactcache
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
func suicide() error {
|
||||||
|
handle, err := syscall.GetCurrentProcess()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return syscall.TerminateProcess(handle, uint32(syscall.SIGTERM))
|
||||||
|
}
|
||||||
|
|
@ -37,7 +37,8 @@ type Handler struct {
|
||||||
|
|
||||||
outboundIP string
|
outboundIP string
|
||||||
|
|
||||||
cacheServerHost string
|
cacheServerHost string
|
||||||
|
cacheProxyHostOverride string
|
||||||
|
|
||||||
cacheSecret string
|
cacheSecret string
|
||||||
|
|
||||||
|
|
@ -55,7 +56,7 @@ func (h *Handler) CreateRunData(fullName, runNumber, timestamp, writeIsolationKe
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func StartHandler(targetHost, outboundIP string, port uint16, cacheSecret string, logger logrus.FieldLogger) (*Handler, error) {
|
func StartHandler(targetHost, outboundIP string, port uint16, cacheProxyHostOverride, cacheSecret string, logger logrus.FieldLogger) (*Handler, error) {
|
||||||
h := &Handler{}
|
h := &Handler{}
|
||||||
|
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
|
|
@ -63,7 +64,7 @@ func StartHandler(targetHost, outboundIP string, port uint16, cacheSecret string
|
||||||
discard.Out = io.Discard
|
discard.Out = io.Discard
|
||||||
logger = discard
|
logger = discard
|
||||||
}
|
}
|
||||||
logger = logger.WithField("module", "artifactcache")
|
logger = logger.WithField("module", "cacheproxy")
|
||||||
h.logger = logger
|
h.logger = logger
|
||||||
|
|
||||||
h.cacheSecret = cacheSecret
|
h.cacheSecret = cacheSecret
|
||||||
|
|
@ -77,10 +78,11 @@ func StartHandler(targetHost, outboundIP string, port uint16, cacheSecret string
|
||||||
}
|
}
|
||||||
|
|
||||||
h.cacheServerHost = targetHost
|
h.cacheServerHost = targetHost
|
||||||
|
h.cacheProxyHostOverride = cacheProxyHostOverride
|
||||||
|
|
||||||
proxy, err := h.newReverseProxy(targetHost)
|
proxy, err := h.newReverseProxy(targetHost)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to set up proxy to target host")
|
return nil, fmt.Errorf("unable to set up proxy to target host: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
router := httprouter.New()
|
router := httprouter.New()
|
||||||
|
|
@ -137,6 +139,7 @@ func (h *Handler) newReverseProxy(targetHost string) (*httputil.ReverseProxy, er
|
||||||
|
|
||||||
r.SetURL(targetURL)
|
r.SetURL(targetURL)
|
||||||
r.Out.URL.Path = uri
|
r.Out.URL.Path = uri
|
||||||
|
h.logger.Debugf("proxy req %s %q to %q", r.In.Method, r.In.URL, r.Out.URL)
|
||||||
|
|
||||||
r.Out.Header.Set("Forgejo-Cache-Repo", runData.RepositoryFullName)
|
r.Out.Header.Set("Forgejo-Cache-Repo", runData.RepositoryFullName)
|
||||||
r.Out.Header.Set("Forgejo-Cache-RunNumber", runData.RunNumber)
|
r.Out.Header.Set("Forgejo-Cache-RunNumber", runData.RunNumber)
|
||||||
|
|
@ -148,12 +151,18 @@ func (h *Handler) newReverseProxy(targetHost string) (*httputil.ReverseProxy, er
|
||||||
r.Out.Header.Set("Forgejo-Cache-WriteIsolationKey", runData.WriteIsolationKey)
|
r.Out.Header.Set("Forgejo-Cache-WriteIsolationKey", runData.WriteIsolationKey)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
ModifyResponse: func(r *http.Response) error {
|
||||||
|
h.logger.Debugf("proxy resp %s w/ %d bytes", r.Status, r.ContentLength)
|
||||||
|
return nil
|
||||||
|
},
|
||||||
}
|
}
|
||||||
return proxy, nil
|
return proxy, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) ExternalURL() string {
|
func (h *Handler) ExternalURL() string {
|
||||||
// TODO: make the external url configurable if necessary
|
if h.cacheProxyHostOverride != "" {
|
||||||
|
return h.cacheProxyHostOverride
|
||||||
|
}
|
||||||
return fmt.Sprintf("http://%s", net.JoinHostPort(h.outboundIP, strconv.Itoa(h.listener.Addr().(*net.TCPAddr).Port)))
|
return fmt.Sprintf("http://%s", net.JoinHostPort(h.outboundIP, strconv.Itoa(h.listener.Addr().(*net.TCPAddr).Port)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -305,7 +305,7 @@ func gitOptions(token string) (fetchOptions git.FetchOptions, pullOptions git.Pu
|
||||||
func NewGitCloneExecutor(input NewGitCloneExecutorInput) common.Executor {
|
func NewGitCloneExecutor(input NewGitCloneExecutorInput) common.Executor {
|
||||||
return func(ctx context.Context) error {
|
return func(ctx context.Context) error {
|
||||||
logger := common.Logger(ctx)
|
logger := common.Logger(ctx)
|
||||||
logger.Infof(" \u2601 git clone '%s' # ref=%s", input.URL, input.Ref)
|
logger.Infof(" \u2601\ufe0f git clone '%s' # ref=%s", input.URL, input.Ref)
|
||||||
logger.Debugf(" cloning %s to %s", input.URL, input.Dir)
|
logger.Debugf(" cloning %s to %s", input.URL, input.Dir)
|
||||||
|
|
||||||
cloneLock.Lock()
|
cloneLock.Lock()
|
||||||
|
|
|
||||||
|
|
@ -483,6 +483,14 @@ func (cr *containerReference) mergeJobOptions(ctx context.Context, config *conta
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if jobConfig.HostConfig.Memory > 0 {
|
||||||
|
logger.Debugf("--memory %v", jobConfig.HostConfig.Memory)
|
||||||
|
if hostConfig.Memory > 0 && jobConfig.HostConfig.Memory > hostConfig.Memory {
|
||||||
|
return nil, nil, fmt.Errorf("the --memory %v option found in the workflow cannot be greater than the --memory %v option from the runner configuration file", jobConfig.HostConfig.Memory, hostConfig.Memory)
|
||||||
|
}
|
||||||
|
hostConfig.Memory = jobConfig.HostConfig.Memory
|
||||||
|
}
|
||||||
|
|
||||||
if len(jobConfig.Config.Hostname) > 0 {
|
if len(jobConfig.Config.Hostname) > 0 {
|
||||||
logger.Debugf("--hostname %v", jobConfig.Config.Hostname)
|
logger.Debugf("--hostname %v", jobConfig.Config.Hostname)
|
||||||
config.Hostname = jobConfig.Config.Hostname
|
config.Hostname = jobConfig.Config.Hostname
|
||||||
|
|
|
||||||
|
|
@ -13,6 +13,7 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-git/go-billy/v5/helper/polyfill"
|
"github.com/go-git/go-billy/v5/helper/polyfill"
|
||||||
|
|
@ -33,7 +34,6 @@ type HostEnvironment struct {
|
||||||
Workdir string
|
Workdir string
|
||||||
ActPath string
|
ActPath string
|
||||||
Root string
|
Root string
|
||||||
CleanUp func()
|
|
||||||
StdOut io.Writer
|
StdOut io.Writer
|
||||||
LXC bool
|
LXC bool
|
||||||
}
|
}
|
||||||
|
|
@ -191,12 +191,12 @@ func (e *HostEnvironment) Start(_ bool) common.Executor {
|
||||||
|
|
||||||
type ptyWriter struct {
|
type ptyWriter struct {
|
||||||
Out io.Writer
|
Out io.Writer
|
||||||
AutoStop bool
|
AutoStop atomic.Bool
|
||||||
dirtyLine bool
|
dirtyLine bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *ptyWriter) Write(buf []byte) (int, error) {
|
func (w *ptyWriter) Write(buf []byte) (int, error) {
|
||||||
if w.AutoStop && len(buf) > 0 && buf[len(buf)-1] == 4 {
|
if w.AutoStop.Load() && len(buf) > 0 && buf[len(buf)-1] == 4 {
|
||||||
n, err := w.Out.Write(buf[:len(buf)-1])
|
n, err := w.Out.Write(buf[:len(buf)-1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return n, err
|
return n, err
|
||||||
|
|
@ -365,7 +365,7 @@ func (e *HostEnvironment) exec(ctx context.Context, commandparam []string, cmdli
|
||||||
return fmt.Errorf("RUN %w", err)
|
return fmt.Errorf("RUN %w", err)
|
||||||
}
|
}
|
||||||
if tty != nil {
|
if tty != nil {
|
||||||
writer.AutoStop = true
|
writer.AutoStop.Store(true)
|
||||||
if _, err := tty.Write([]byte("\x04")); err != nil {
|
if _, err := tty.Write([]byte("\x04")); err != nil {
|
||||||
common.Logger(ctx).Debug("Failed to write EOT")
|
common.Logger(ctx).Debug("Failed to write EOT")
|
||||||
}
|
}
|
||||||
|
|
@ -388,7 +388,7 @@ func (e *HostEnvironment) ExecWithCmdLine(command []string, cmdline string, env
|
||||||
if err := e.exec(ctx, command, cmdline, env, user, workdir); err != nil {
|
if err := e.exec(ctx, command, cmdline, env, user, workdir); err != nil {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return fmt.Errorf("this step has been cancelled: %w", err)
|
return fmt.Errorf("this step has been cancelled: ctx: %w, exec: %w", ctx.Err(), err)
|
||||||
default:
|
default:
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
@ -403,10 +403,12 @@ func (e *HostEnvironment) UpdateFromEnv(srcPath string, env *map[string]string)
|
||||||
|
|
||||||
func (e *HostEnvironment) Remove() common.Executor {
|
func (e *HostEnvironment) Remove() common.Executor {
|
||||||
return func(ctx context.Context) error {
|
return func(ctx context.Context) error {
|
||||||
if e.CleanUp != nil {
|
if e.GetLXC() {
|
||||||
e.CleanUp()
|
// there may be files owned by root: removal
|
||||||
|
// is the responsibility of the LXC backend
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
return os.RemoveAll(e.Path)
|
return os.RemoveAll(e.Root)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -158,6 +158,8 @@ func (impl *interperterImpl) evaluateVariable(variableNode *actionlint.VariableN
|
||||||
return impl.env.Github, nil
|
return impl.env.Github, nil
|
||||||
case "forge":
|
case "forge":
|
||||||
return impl.env.Github, nil
|
return impl.env.Github, nil
|
||||||
|
case "forgejo":
|
||||||
|
return impl.env.Github, nil
|
||||||
case "env":
|
case "env":
|
||||||
return impl.env.Env, nil
|
return impl.env.Env, nil
|
||||||
case "job":
|
case "job":
|
||||||
|
|
|
||||||
|
|
@ -76,6 +76,36 @@ func NewInterpeter(
|
||||||
return exprparser.NewInterpeter(ee, config)
|
return exprparser.NewInterpeter(ee, config)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Returns an interpeter used in the server in the context of workflow-level templates. Needs github, inputs, and vars
|
||||||
|
// context only.
|
||||||
|
func NewWorkflowInterpeter(
|
||||||
|
gitCtx *model.GithubContext,
|
||||||
|
vars map[string]string,
|
||||||
|
inputs map[string]any,
|
||||||
|
) exprparser.Interpreter {
|
||||||
|
ee := &exprparser.EvaluationEnvironment{
|
||||||
|
Github: gitCtx,
|
||||||
|
Env: nil, // no need
|
||||||
|
Job: nil, // no need
|
||||||
|
Steps: nil, // no need
|
||||||
|
Runner: nil, // no need
|
||||||
|
Secrets: nil, // no need
|
||||||
|
Strategy: nil, // no need
|
||||||
|
Matrix: nil, // no need
|
||||||
|
Needs: nil, // no need
|
||||||
|
Inputs: inputs,
|
||||||
|
Vars: vars,
|
||||||
|
}
|
||||||
|
|
||||||
|
config := exprparser.Config{
|
||||||
|
Run: nil,
|
||||||
|
WorkingDir: "", // WorkingDir is used for the function hashFiles, but it's not needed in the server
|
||||||
|
Context: "workflow",
|
||||||
|
}
|
||||||
|
|
||||||
|
return exprparser.NewInterpeter(ee, config)
|
||||||
|
}
|
||||||
|
|
||||||
// JobResult is the minimum requirement of job results for Interpeter
|
// JobResult is the minimum requirement of job results for Interpeter
|
||||||
type JobResult struct {
|
type JobResult struct {
|
||||||
Needs []string
|
Needs []string
|
||||||
|
|
|
||||||
|
|
@ -1,8 +1,8 @@
|
||||||
package jobparser
|
package jobparser
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"code.forgejo.org/forgejo/runner/v11/act/model"
|
"code.forgejo.org/forgejo/runner/v11/act/model"
|
||||||
"go.yaml.in/yaml/v3"
|
"go.yaml.in/yaml/v3"
|
||||||
|
|
@ -193,83 +193,32 @@ func (evt *Event) Schedules() []map[string]string {
|
||||||
return evt.schedules
|
return evt.schedules
|
||||||
}
|
}
|
||||||
|
|
||||||
func ReadWorkflowRawConcurrency(content []byte) (*model.RawConcurrency, error) {
|
// Convert the raw YAML from the `concurrency` block on a workflow into the evaluated concurrency group and
|
||||||
w := new(model.Workflow)
|
// cancel-in-progress value. This implementation only supports workflow-level concurrency definition, where we expect
|
||||||
err := yaml.NewDecoder(bytes.NewReader(content)).Decode(w)
|
// expressions to be able to access only the github, inputs and vars contexts. If RawConcurrency is empty, then the
|
||||||
return w.RawConcurrency, err
|
// returned concurrency group will be "" and cancel-in-progress will be nil -- this can be used to distinguish from an
|
||||||
}
|
// explicit cancel-in-progress choice even if a group isn't specified.
|
||||||
|
func EvaluateWorkflowConcurrency(rc *model.RawConcurrency, gitCtx *model.GithubContext, vars map[string]string, inputs map[string]any) (string, *bool, error) {
|
||||||
func EvaluateConcurrency(rc *model.RawConcurrency, jobID string, job *Job, gitCtx map[string]any, results map[string]*JobResult, vars map[string]string, inputs map[string]any) (string, bool, error) {
|
evaluator := NewExpressionEvaluator(NewWorkflowInterpeter(gitCtx, vars, inputs))
|
||||||
actJob := &model.Job{}
|
|
||||||
if job != nil {
|
|
||||||
actJob.Strategy = &model.Strategy{
|
|
||||||
FailFastString: job.Strategy.FailFastString,
|
|
||||||
MaxParallelString: job.Strategy.MaxParallelString,
|
|
||||||
RawMatrix: job.Strategy.RawMatrix,
|
|
||||||
}
|
|
||||||
actJob.Strategy.FailFast = actJob.Strategy.GetFailFast()
|
|
||||||
actJob.Strategy.MaxParallel = actJob.Strategy.GetMaxParallel()
|
|
||||||
}
|
|
||||||
|
|
||||||
matrix := make(map[string]any)
|
|
||||||
matrixes, err := actJob.GetMatrixes()
|
|
||||||
if err != nil {
|
|
||||||
return "", false, err
|
|
||||||
}
|
|
||||||
if len(matrixes) > 0 {
|
|
||||||
matrix = matrixes[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
evaluator := NewExpressionEvaluator(NewInterpeter(jobID, actJob, matrix, toGitContext(gitCtx), results, vars, inputs))
|
|
||||||
var node yaml.Node
|
var node yaml.Node
|
||||||
if err := node.Encode(rc); err != nil {
|
if err := node.Encode(rc); err != nil {
|
||||||
return "", false, fmt.Errorf("failed to encode concurrency: %w", err)
|
return "", nil, fmt.Errorf("failed to encode concurrency: %w", err)
|
||||||
}
|
}
|
||||||
if err := evaluator.EvaluateYamlNode(&node); err != nil {
|
if err := evaluator.EvaluateYamlNode(&node); err != nil {
|
||||||
return "", false, fmt.Errorf("failed to evaluate concurrency: %w", err)
|
return "", nil, fmt.Errorf("failed to evaluate concurrency: %w", err)
|
||||||
}
|
}
|
||||||
var evaluated model.RawConcurrency
|
var evaluated model.RawConcurrency
|
||||||
if err := node.Decode(&evaluated); err != nil {
|
if err := node.Decode(&evaluated); err != nil {
|
||||||
return "", false, fmt.Errorf("failed to unmarshal evaluated concurrency: %w", err)
|
return "", nil, fmt.Errorf("failed to unmarshal evaluated concurrency: %w", err)
|
||||||
}
|
}
|
||||||
if evaluated.RawExpression != "" {
|
if evaluated.RawExpression != "" {
|
||||||
return evaluated.RawExpression, false, nil
|
return evaluated.RawExpression, nil, nil
|
||||||
}
|
}
|
||||||
return evaluated.Group, evaluated.CancelInProgress == "true", nil
|
if evaluated.CancelInProgress == "" {
|
||||||
}
|
return evaluated.Group, nil, nil
|
||||||
|
|
||||||
func toGitContext(input map[string]any) *model.GithubContext {
|
|
||||||
gitContext := &model.GithubContext{
|
|
||||||
EventPath: asString(input["event_path"]),
|
|
||||||
Workflow: asString(input["workflow"]),
|
|
||||||
RunID: asString(input["run_id"]),
|
|
||||||
RunNumber: asString(input["run_number"]),
|
|
||||||
Actor: asString(input["actor"]),
|
|
||||||
Repository: asString(input["repository"]),
|
|
||||||
EventName: asString(input["event_name"]),
|
|
||||||
Sha: asString(input["sha"]),
|
|
||||||
Ref: asString(input["ref"]),
|
|
||||||
RefName: asString(input["ref_name"]),
|
|
||||||
RefType: asString(input["ref_type"]),
|
|
||||||
HeadRef: asString(input["head_ref"]),
|
|
||||||
BaseRef: asString(input["base_ref"]),
|
|
||||||
Token: asString(input["token"]),
|
|
||||||
Workspace: asString(input["workspace"]),
|
|
||||||
Action: asString(input["action"]),
|
|
||||||
ActionPath: asString(input["action_path"]),
|
|
||||||
ActionRef: asString(input["action_ref"]),
|
|
||||||
ActionRepository: asString(input["action_repository"]),
|
|
||||||
Job: asString(input["job"]),
|
|
||||||
RepositoryOwner: asString(input["repository_owner"]),
|
|
||||||
RetentionDays: asString(input["retention_days"]),
|
|
||||||
}
|
}
|
||||||
|
cancelInProgress := evaluated.CancelInProgress == "true"
|
||||||
event, ok := input["event"].(map[string]any)
|
return evaluated.Group, &cancelInProgress, nil
|
||||||
if ok {
|
|
||||||
gitContext.Event = event
|
|
||||||
}
|
|
||||||
|
|
||||||
return gitContext
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
|
func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
|
||||||
|
|
@ -278,7 +227,7 @@ func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
|
||||||
var val string
|
var val string
|
||||||
err := rawOn.Decode(&val)
|
err := rawOn.Decode(&val)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, fmt.Errorf("unable to interpret scalar value into a string: %w", err)
|
||||||
}
|
}
|
||||||
return []*Event{
|
return []*Event{
|
||||||
{Name: val},
|
{Name: val},
|
||||||
|
|
@ -290,12 +239,12 @@ func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
res := make([]*Event, 0, len(val))
|
res := make([]*Event, 0, len(val))
|
||||||
for _, v := range val {
|
for i, v := range val {
|
||||||
switch t := v.(type) {
|
switch t := v.(type) {
|
||||||
case string:
|
case string:
|
||||||
res = append(res, &Event{Name: t})
|
res = append(res, &Event{Name: t})
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("invalid type %T", t)
|
return nil, fmt.Errorf("value at index %d was unexpected type %[2]T; must be a string but was %#[2]v", i, v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return res, nil
|
return res, nil
|
||||||
|
|
@ -315,16 +264,6 @@ func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
switch t := v.(type) {
|
switch t := v.(type) {
|
||||||
case string:
|
|
||||||
res = append(res, &Event{
|
|
||||||
Name: k,
|
|
||||||
acts: map[string][]string{},
|
|
||||||
})
|
|
||||||
case []string:
|
|
||||||
res = append(res, &Event{
|
|
||||||
Name: k,
|
|
||||||
acts: map[string][]string{},
|
|
||||||
})
|
|
||||||
case map[string]any:
|
case map[string]any:
|
||||||
acts := make(map[string][]string, len(t))
|
acts := make(map[string][]string, len(t))
|
||||||
for act, branches := range t {
|
for act, branches := range t {
|
||||||
|
|
@ -338,15 +277,15 @@ func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
|
||||||
for i, v := range b {
|
for i, v := range b {
|
||||||
var ok bool
|
var ok bool
|
||||||
if acts[act][i], ok = v.(string); !ok {
|
if acts[act][i], ok = v.(string); !ok {
|
||||||
return nil, fmt.Errorf("unknown on type: %#v", branches)
|
return nil, fmt.Errorf("key %q.%q index %d had unexpected type %[4]T; a string was expected but was %#[4]v", k, act, i, v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case map[string]any:
|
case map[string]any:
|
||||||
if isInvalidOnType(k, act) {
|
if err := isInvalidOnType(k, act); err != nil {
|
||||||
return nil, fmt.Errorf("unknown on type: %#v", v)
|
return nil, fmt.Errorf("invalid value on key %q: %w", k, err)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unknown on type: %#v", branches)
|
return nil, fmt.Errorf("key %q.%q had unexpected type %T; was %#v", k, act, branches, branches)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if k == "workflow_dispatch" || k == "workflow_call" {
|
if k == "workflow_dispatch" || k == "workflow_call" {
|
||||||
|
|
@ -358,19 +297,22 @@ func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
|
||||||
})
|
})
|
||||||
case []any:
|
case []any:
|
||||||
if k != "schedule" {
|
if k != "schedule" {
|
||||||
return nil, fmt.Errorf("unknown on type: %#v", v)
|
return nil, fmt.Errorf("key %q had an type %T; only the 'schedule' key is expected with this type", k, v)
|
||||||
}
|
}
|
||||||
schedules := make([]map[string]string, len(t))
|
schedules := make([]map[string]string, len(t))
|
||||||
for i, tt := range t {
|
for i, tt := range t {
|
||||||
vv, ok := tt.(map[string]any)
|
vv, ok := tt.(map[string]any)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("unknown on type: %#v", v)
|
return nil, fmt.Errorf("key %q[%d] had unexpected type %[3]T; a map with a key \"cron\" was expected, but value was %#[3]v", k, i, tt)
|
||||||
}
|
}
|
||||||
schedules[i] = make(map[string]string, len(vv))
|
schedules[i] = make(map[string]string, len(vv))
|
||||||
for k, vvv := range vv {
|
for kk, vvv := range vv {
|
||||||
|
if strings.ToLower(kk) != "cron" {
|
||||||
|
return nil, fmt.Errorf("key %q[%d] had unexpected key %q; \"cron\" was expected", k, i, kk)
|
||||||
|
}
|
||||||
var ok bool
|
var ok bool
|
||||||
if schedules[i][k], ok = vvv.(string); !ok {
|
if schedules[i][kk], ok = vvv.(string); !ok {
|
||||||
return nil, fmt.Errorf("unknown on type: %#v", v)
|
return nil, fmt.Errorf("key %q[%d].%q had unexpected type %[4]T; a string was expected by was %#[4]v", k, i, kk, vvv)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -379,23 +321,29 @@ func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
|
||||||
schedules: schedules,
|
schedules: schedules,
|
||||||
})
|
})
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unknown on type: %#v", v)
|
return nil, fmt.Errorf("key %q had unexpected type %[2]T; expected a map or array but was %#[2]v", k, v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return res, nil
|
return res, nil
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unknown on type: %v", rawOn.Kind)
|
return nil, fmt.Errorf("unexpected yaml node in `on`: %v", rawOn.Kind)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func isInvalidOnType(onType, subKey string) bool {
|
func isInvalidOnType(onType, subKey string) error {
|
||||||
if onType == "workflow_dispatch" && subKey == "inputs" {
|
if onType == "workflow_dispatch" {
|
||||||
return false
|
if subKey == "inputs" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("workflow_dispatch only supports key \"inputs\", but key %q was found", subKey)
|
||||||
}
|
}
|
||||||
if onType == "workflow_call" && (subKey == "inputs" || subKey == "outputs") {
|
if onType == "workflow_call" {
|
||||||
return false
|
if subKey == "inputs" || subKey == "outputs" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("workflow_call only supports keys \"inputs\" and \"outputs\", but key %q was found", subKey)
|
||||||
}
|
}
|
||||||
return true
|
return fmt.Errorf("unexpected key %q.%q", onType, subKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseMappingNode parse a mapping node and preserve order.
|
// parseMappingNode parse a mapping node and preserve order.
|
||||||
|
|
@ -430,12 +378,3 @@ func parseMappingNode[T any](node *yaml.Node) ([]string, []T, error) {
|
||||||
|
|
||||||
return scalars, datas, nil
|
return scalars, datas, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func asString(v any) string {
|
|
||||||
if v == nil {
|
|
||||||
return ""
|
|
||||||
} else if s, ok := v.(string); ok {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -16,6 +16,7 @@ func TestParseRawOn(t *testing.T) {
|
||||||
kases := []struct {
|
kases := []struct {
|
||||||
input string
|
input string
|
||||||
result []*Event
|
result []*Event
|
||||||
|
err string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
input: "on: issue_comment",
|
input: "on: issue_comment",
|
||||||
|
|
@ -33,7 +34,10 @@ func TestParseRawOn(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
input: "on: [123]",
|
||||||
|
err: "value at index 0 was unexpected type int; must be a string but was 123",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
input: "on:\n - push\n - pull_request",
|
input: "on:\n - push\n - pull_request",
|
||||||
result: []*Event{
|
result: []*Event{
|
||||||
|
|
@ -45,6 +49,19 @@ func TestParseRawOn(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
input: "on: { push: null }",
|
||||||
|
result: []*Event{
|
||||||
|
{
|
||||||
|
Name: "push",
|
||||||
|
acts: map[string][]string{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "on: { push: 'abc' }",
|
||||||
|
err: "key \"push\" had unexpected type string; expected a map or array but was \"abc\"",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
input: "on:\n push:\n branches:\n - master",
|
input: "on:\n push:\n branches:\n - master",
|
||||||
result: []*Event{
|
result: []*Event{
|
||||||
|
|
@ -72,6 +89,10 @@ func TestParseRawOn(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
input: "on:\n branch_protection_rule:\n types: [123, deleted]",
|
||||||
|
err: "key \"branch_protection_rule\".\"types\" index 0 had unexpected type int; a string was expected but was 123",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
input: "on:\n project:\n types: [created, deleted]\n milestone:\n types: [opened, deleted]",
|
input: "on:\n project:\n types: [created, deleted]\n milestone:\n types: [opened, deleted]",
|
||||||
result: []*Event{
|
result: []*Event{
|
||||||
|
|
@ -189,6 +210,22 @@ func TestParseRawOn(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
input: "on:\n schedule2:\n - cron: '20 6 * * *'",
|
||||||
|
err: "key \"schedule2\" had an type []interface {}; only the 'schedule' key is expected with this type",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "on:\n schedule:\n - 123",
|
||||||
|
err: "key \"schedule\"[0] had unexpected type int; a map with a key \"cron\" was expected, but value was 123",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "on:\n schedule:\n - corn: '20 6 * * *'",
|
||||||
|
err: "key \"schedule\"[0] had unexpected key \"corn\"; \"cron\" was expected",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "on:\n schedule:\n - cron: 123",
|
||||||
|
err: "key \"schedule\"[0].\"cron\" had unexpected type int; a string was expected by was 123",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
input: `
|
input: `
|
||||||
on:
|
on:
|
||||||
|
|
@ -222,15 +259,37 @@ on:
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
input: `
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
mistake:
|
||||||
|
access-token:
|
||||||
|
description: 'A token passed from the caller workflow'
|
||||||
|
required: false
|
||||||
|
`,
|
||||||
|
err: "invalid value on key \"workflow_call\": workflow_call only supports keys \"inputs\" and \"outputs\", but key \"mistake\" was found",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: `
|
||||||
|
on:
|
||||||
|
workflow_call: { map: 123 }
|
||||||
|
`,
|
||||||
|
err: "key \"workflow_call\".\"map\" had unexpected type int; was 123",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, kase := range kases {
|
for _, kase := range kases {
|
||||||
t.Run(kase.input, func(t *testing.T) {
|
t.Run(kase.input, func(t *testing.T) {
|
||||||
origin, err := model.ReadWorkflow(strings.NewReader(kase.input), false)
|
origin, err := model.ReadWorkflow(strings.NewReader(kase.input), false)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
events, err := ParseRawOn(&origin.RawOn)
|
events, err := ParseRawOn(&origin.RawOn)
|
||||||
assert.NoError(t, err)
|
if kase.err != "" {
|
||||||
assert.EqualValues(t, kase.result, events, fmt.Sprintf("%#v", events))
|
assert.ErrorContains(t, err, kase.err)
|
||||||
|
} else {
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.EqualValues(t, kase.result, events, fmt.Sprintf("%#v", events))
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -342,10 +401,11 @@ func TestParseMappingNode(t *testing.T) {
|
||||||
|
|
||||||
func TestEvaluateConcurrency(t *testing.T) {
|
func TestEvaluateConcurrency(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
input model.RawConcurrency
|
input model.RawConcurrency
|
||||||
group string
|
group string
|
||||||
cancelInProgress bool
|
cancelInProgressNil bool
|
||||||
|
cancelInProgress bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "basic",
|
name: "basic",
|
||||||
|
|
@ -357,18 +417,18 @@ func TestEvaluateConcurrency(t *testing.T) {
|
||||||
cancelInProgress: true,
|
cancelInProgress: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "undefined",
|
name: "undefined",
|
||||||
input: model.RawConcurrency{},
|
input: model.RawConcurrency{},
|
||||||
group: "",
|
group: "",
|
||||||
cancelInProgress: false,
|
cancelInProgressNil: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "group-evaluation",
|
name: "group-evaluation",
|
||||||
input: model.RawConcurrency{
|
input: model.RawConcurrency{
|
||||||
Group: "${{ github.workflow }}-${{ github.ref }}",
|
Group: "${{ github.workflow }}-${{ github.ref }}",
|
||||||
},
|
},
|
||||||
group: "test_workflow-main",
|
group: "test_workflow-main",
|
||||||
cancelInProgress: false,
|
cancelInProgressNil: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "cancel-evaluation-true",
|
name: "cancel-evaluation-true",
|
||||||
|
|
@ -393,37 +453,44 @@ func TestEvaluateConcurrency(t *testing.T) {
|
||||||
input: model.RawConcurrency{
|
input: model.RawConcurrency{
|
||||||
Group: "user-${{ github.event.commits[0].author.username }}",
|
Group: "user-${{ github.event.commits[0].author.username }}",
|
||||||
},
|
},
|
||||||
group: "user-someone",
|
group: "user-someone",
|
||||||
cancelInProgress: false,
|
cancelInProgressNil: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "arbitrary-var",
|
name: "arbitrary-var",
|
||||||
input: model.RawConcurrency{
|
input: model.RawConcurrency{
|
||||||
Group: "${{ vars.eval_arbitrary_var }}",
|
Group: "${{ vars.eval_arbitrary_var }}",
|
||||||
},
|
},
|
||||||
group: "123",
|
group: "123",
|
||||||
cancelInProgress: false,
|
cancelInProgressNil: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "arbitrary-input",
|
name: "arbitrary-input",
|
||||||
input: model.RawConcurrency{
|
input: model.RawConcurrency{
|
||||||
Group: "${{ inputs.eval_arbitrary_input }}",
|
Group: "${{ inputs.eval_arbitrary_input }}",
|
||||||
},
|
},
|
||||||
group: "456",
|
group: "456",
|
||||||
cancelInProgress: false,
|
cancelInProgressNil: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "cancel-in-progress-only",
|
||||||
|
input: model.RawConcurrency{
|
||||||
|
CancelInProgress: "true",
|
||||||
|
},
|
||||||
|
group: "",
|
||||||
|
cancelInProgress: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
group, cancelInProgress, err := EvaluateConcurrency(
|
group, cancelInProgress, err := EvaluateWorkflowConcurrency(
|
||||||
&test.input,
|
&test.input,
|
||||||
"job-id",
|
// gitCtx
|
||||||
nil, // job
|
&model.GithubContext{
|
||||||
map[string]any{
|
Workflow: "test_workflow",
|
||||||
"workflow": "test_workflow",
|
Ref: "main",
|
||||||
"ref": "main",
|
Event: map[string]any{
|
||||||
"event": map[string]any{
|
|
||||||
"commits": []any{
|
"commits": []any{
|
||||||
map[string]any{
|
map[string]any{
|
||||||
"author": map[string]any{
|
"author": map[string]any{
|
||||||
|
|
@ -437,20 +504,24 @@ func TestEvaluateConcurrency(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, // gitCtx
|
},
|
||||||
map[string]*JobResult{
|
// vars
|
||||||
"job-id": {},
|
|
||||||
}, // results
|
|
||||||
map[string]string{
|
map[string]string{
|
||||||
"eval_arbitrary_var": "123",
|
"eval_arbitrary_var": "123",
|
||||||
}, // vars
|
},
|
||||||
|
// inputs
|
||||||
map[string]any{
|
map[string]any{
|
||||||
"eval_arbitrary_input": "456",
|
"eval_arbitrary_input": "456",
|
||||||
}, // inputs
|
},
|
||||||
)
|
)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.EqualValues(t, test.group, group)
|
assert.EqualValues(t, test.group, group)
|
||||||
assert.EqualValues(t, test.cancelInProgress, cancelInProgress)
|
if test.cancelInProgressNil {
|
||||||
|
assert.Nil(t, cancelInProgress)
|
||||||
|
} else {
|
||||||
|
require.NotNil(t, cancelInProgress)
|
||||||
|
assert.EqualValues(t, test.cancelInProgress, *cancelInProgress)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -479,8 +479,6 @@ func rewriteSubExpression(ctx context.Context, in string, forceFormat bool) (str
|
||||||
func getEvaluatorInputs(ctx context.Context, rc *RunContext, step step, ghc *model.GithubContext) map[string]any {
|
func getEvaluatorInputs(ctx context.Context, rc *RunContext, step step, ghc *model.GithubContext) map[string]any {
|
||||||
inputs := map[string]any{}
|
inputs := map[string]any{}
|
||||||
|
|
||||||
setupWorkflowInputs(ctx, &inputs, rc)
|
|
||||||
|
|
||||||
var env map[string]string
|
var env map[string]string
|
||||||
if step != nil {
|
if step != nil {
|
||||||
env = *step.getEnv()
|
env = *step.getEnv()
|
||||||
|
|
@ -494,6 +492,8 @@ func getEvaluatorInputs(ctx context.Context, rc *RunContext, step step, ghc *mod
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
setupWorkflowInputs(ctx, &inputs, rc)
|
||||||
|
|
||||||
if rc.caller == nil && ghc.EventName == "workflow_dispatch" {
|
if rc.caller == nil && ghc.EventName == "workflow_dispatch" {
|
||||||
config := rc.Run.Workflow.WorkflowDispatchConfig()
|
config := rc.Run.Workflow.WorkflowDispatchConfig()
|
||||||
if config != nil && config.Inputs != nil {
|
if config != nil && config.Inputs != nil {
|
||||||
|
|
|
||||||
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"code.forgejo.org/forgejo/runner/v11/act/common"
|
"code.forgejo.org/forgejo/runner/v11/act/common"
|
||||||
"code.forgejo.org/forgejo/runner/v11/act/container"
|
"code.forgejo.org/forgejo/runner/v11/act/container"
|
||||||
"code.forgejo.org/forgejo/runner/v11/act/model"
|
"code.forgejo.org/forgejo/runner/v11/act/model"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
type jobInfo interface {
|
type jobInfo interface {
|
||||||
|
|
@ -104,37 +105,40 @@ func newJobExecutor(info jobInfo, sf stepFactory, rc *RunContext) common.Executo
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
postExecutor = postExecutor.Finally(func(ctx context.Context) error {
|
setJobResults := func(ctx context.Context) error {
|
||||||
jobError := common.JobError(ctx)
|
jobError := common.JobError(ctx)
|
||||||
|
|
||||||
// Fresh context to ensure job result output works even if prev. context was a cancelled job
|
// Fresh context to ensure job result output works even if prev. context was a cancelled job
|
||||||
ctx, cancel := context.WithTimeout(common.WithLogger(context.Background(), common.Logger(ctx)), time.Minute)
|
ctx, cancel := context.WithTimeout(common.WithLogger(context.Background(), common.Logger(ctx)), time.Minute)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
setJobResult(ctx, info, rc, jobError == nil)
|
setJobResult(ctx, info, rc, jobError == nil)
|
||||||
setJobOutputs(ctx, rc)
|
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanupJob := func(_ context.Context) error {
|
||||||
var err error
|
var err error
|
||||||
{
|
|
||||||
// Separate timeout for cleanup tasks; logger is cleared so that cleanup logs go to runner, not job
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), cleanupTimeout)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
logger := common.Logger(ctx)
|
// Separate timeout for cleanup tasks; logger is cleared so that cleanup logs go to runner, not job
|
||||||
logger.Debugf("Cleaning up container for job %s", rc.jobContainerName())
|
ctx, cancel := context.WithTimeout(context.Background(), cleanupTimeout)
|
||||||
if err = info.stopContainer()(ctx); err != nil {
|
defer cancel()
|
||||||
logger.Errorf("Error while stop job container %s: %v", rc.jobContainerName(), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !rc.IsHostEnv(ctx) && rc.getNetworkCreated(ctx) {
|
logger := common.Logger(ctx)
|
||||||
networkName := rc.getNetworkName(ctx)
|
logger.Debugf("Cleaning up container for job %s", rc.jobContainerName())
|
||||||
logger.Debugf("Cleaning up network %s for job %s", networkName, rc.jobContainerName())
|
if err = info.stopContainer()(ctx); err != nil {
|
||||||
if err := container.NewDockerNetworkRemoveExecutor(networkName)(ctx); err != nil {
|
logger.Errorf("Error while stop job container %s: %v", rc.jobContainerName(), err)
|
||||||
logger.Errorf("Error while cleaning network %s: %v", networkName, err)
|
}
|
||||||
}
|
|
||||||
|
if !rc.IsHostEnv(ctx) && rc.getNetworkCreated(ctx) {
|
||||||
|
networkName := rc.getNetworkName(ctx)
|
||||||
|
logger.Debugf("Cleaning up network %s for job %s", networkName, rc.jobContainerName())
|
||||||
|
if err := container.NewDockerNetworkRemoveExecutor(networkName)(ctx); err != nil {
|
||||||
|
logger.Errorf("Error while cleaning network %s: %v", networkName, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
})
|
}
|
||||||
|
|
||||||
pipeline := make([]common.Executor, 0)
|
pipeline := make([]common.Executor, 0)
|
||||||
pipeline = append(pipeline, preSteps...)
|
pipeline = append(pipeline, preSteps...)
|
||||||
|
|
@ -152,6 +156,8 @@ func newJobExecutor(info jobInfo, sf stepFactory, rc *RunContext) common.Executo
|
||||||
return postExecutor(ctx)
|
return postExecutor(ctx)
|
||||||
}).
|
}).
|
||||||
Finally(info.interpolateOutputs()).
|
Finally(info.interpolateOutputs()).
|
||||||
|
Finally(setJobResults).
|
||||||
|
Finally(cleanupJob).
|
||||||
Finally(info.closeContainer()))
|
Finally(info.closeContainer()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -174,33 +180,42 @@ func setJobResult(ctx context.Context, info jobInfo, rc *RunContext, success boo
|
||||||
jobResult = "failure"
|
jobResult = "failure"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set local result on current job (child or parent)
|
||||||
info.result(jobResult)
|
info.result(jobResult)
|
||||||
|
|
||||||
if rc.caller != nil {
|
if rc.caller != nil {
|
||||||
// set reusable workflow job result
|
// Child reusable workflow:
|
||||||
|
// 1) propagate result to parent job state
|
||||||
rc.caller.runContext.result(jobResult)
|
rc.caller.runContext.result(jobResult)
|
||||||
|
|
||||||
|
// 2) copy workflow_call outputs from child to parent (as in upstream)
|
||||||
|
jobOutputs := make(map[string]string)
|
||||||
|
ee := rc.NewExpressionEvaluator(ctx)
|
||||||
|
if wfcc := rc.Run.Workflow.WorkflowCallConfig(); wfcc != nil {
|
||||||
|
for k, v := range wfcc.Outputs {
|
||||||
|
jobOutputs[k] = ee.Interpolate(ctx, ee.Interpolate(ctx, v.Value))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rc.caller.runContext.Run.Job().Outputs = jobOutputs
|
||||||
|
|
||||||
|
// 3) DO NOT print banner in child job (prevents premature token revocation)
|
||||||
|
logger.Debugf("Reusable job result=%s (parent will finalize, no banner)", jobResult)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Parent job: print the final banner ONCE (job-level)
|
||||||
jobResultMessage := "succeeded"
|
jobResultMessage := "succeeded"
|
||||||
if jobResult != "success" {
|
if jobResult != "success" {
|
||||||
jobResultMessage = "failed"
|
jobResultMessage = "failed"
|
||||||
}
|
}
|
||||||
|
jobOutputs := rc.Run.Job().Outputs
|
||||||
|
|
||||||
logger.WithField("jobResult", jobResult).Infof("\U0001F3C1 Job %s", jobResultMessage)
|
logger.
|
||||||
}
|
WithFields(logrus.Fields{
|
||||||
|
"jobResult": jobResult,
|
||||||
func setJobOutputs(ctx context.Context, rc *RunContext) {
|
"jobOutputs": jobOutputs,
|
||||||
if rc.caller != nil {
|
}).
|
||||||
// map outputs for reusable workflows
|
Infof("\U0001F3C1 Job %s", jobResultMessage)
|
||||||
callerOutputs := make(map[string]string)
|
|
||||||
|
|
||||||
ee := rc.NewExpressionEvaluator(ctx)
|
|
||||||
|
|
||||||
for k, v := range rc.Run.Workflow.WorkflowCallConfig().Outputs {
|
|
||||||
callerOutputs[k] = ee.Interpolate(ctx, ee.Interpolate(ctx, v.Value))
|
|
||||||
}
|
|
||||||
|
|
||||||
rc.caller.runContext.Run.Job().Outputs = callerOutputs
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func useStepLogger(rc *RunContext, stepModel *model.Step, stage stepStage, executor common.Executor) common.Executor {
|
func useStepLogger(rc *RunContext, stepModel *model.Step, stage stepStage, executor common.Executor) common.Executor {
|
||||||
|
|
|
||||||
|
|
@ -12,10 +12,14 @@ import (
|
||||||
"code.forgejo.org/forgejo/runner/v11/act/common"
|
"code.forgejo.org/forgejo/runner/v11/act/common"
|
||||||
"code.forgejo.org/forgejo/runner/v11/act/container"
|
"code.forgejo.org/forgejo/runner/v11/act/container"
|
||||||
"code.forgejo.org/forgejo/runner/v11/act/model"
|
"code.forgejo.org/forgejo/runner/v11/act/model"
|
||||||
|
"code.forgejo.org/forgejo/runner/v11/act/runner/mocks"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/mock"
|
"github.com/stretchr/testify/mock"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
//go:generate mockery --srcpkg=github.com/sirupsen/logrus --name=FieldLogger
|
||||||
|
|
||||||
func TestJobExecutor(t *testing.T) {
|
func TestJobExecutor(t *testing.T) {
|
||||||
tables := []TestJobFileInfo{
|
tables := []TestJobFileInfo{
|
||||||
{workdir, "uses-and-run-in-one-step", "push", "Invalid run/uses syntax for job:test step:Test", platforms, secrets},
|
{workdir, "uses-and-run-in-one-step", "push", "Invalid run/uses syntax for job:test step:Test", platforms, secrets},
|
||||||
|
|
@ -127,8 +131,9 @@ func TestJobExecutorNewJobExecutor(t *testing.T) {
|
||||||
executedSteps: []string{
|
executedSteps: []string{
|
||||||
"startContainer",
|
"startContainer",
|
||||||
"step1",
|
"step1",
|
||||||
"stopContainer",
|
|
||||||
"interpolateOutputs",
|
"interpolateOutputs",
|
||||||
|
"setJobResults",
|
||||||
|
"stopContainer",
|
||||||
"closeContainer",
|
"closeContainer",
|
||||||
},
|
},
|
||||||
result: "success",
|
result: "success",
|
||||||
|
|
@ -144,8 +149,9 @@ func TestJobExecutorNewJobExecutor(t *testing.T) {
|
||||||
executedSteps: []string{
|
executedSteps: []string{
|
||||||
"startContainer",
|
"startContainer",
|
||||||
"step1",
|
"step1",
|
||||||
"stopContainer",
|
|
||||||
"interpolateOutputs",
|
"interpolateOutputs",
|
||||||
|
"setJobResults",
|
||||||
|
"stopContainer",
|
||||||
"closeContainer",
|
"closeContainer",
|
||||||
},
|
},
|
||||||
result: "failure",
|
result: "failure",
|
||||||
|
|
@ -162,8 +168,9 @@ func TestJobExecutorNewJobExecutor(t *testing.T) {
|
||||||
"startContainer",
|
"startContainer",
|
||||||
"pre1",
|
"pre1",
|
||||||
"step1",
|
"step1",
|
||||||
"stopContainer",
|
|
||||||
"interpolateOutputs",
|
"interpolateOutputs",
|
||||||
|
"setJobResults",
|
||||||
|
"stopContainer",
|
||||||
"closeContainer",
|
"closeContainer",
|
||||||
},
|
},
|
||||||
result: "success",
|
result: "success",
|
||||||
|
|
@ -180,8 +187,9 @@ func TestJobExecutorNewJobExecutor(t *testing.T) {
|
||||||
"startContainer",
|
"startContainer",
|
||||||
"step1",
|
"step1",
|
||||||
"post1",
|
"post1",
|
||||||
"stopContainer",
|
|
||||||
"interpolateOutputs",
|
"interpolateOutputs",
|
||||||
|
"setJobResults",
|
||||||
|
"stopContainer",
|
||||||
"closeContainer",
|
"closeContainer",
|
||||||
},
|
},
|
||||||
result: "success",
|
result: "success",
|
||||||
|
|
@ -199,8 +207,9 @@ func TestJobExecutorNewJobExecutor(t *testing.T) {
|
||||||
"pre1",
|
"pre1",
|
||||||
"step1",
|
"step1",
|
||||||
"post1",
|
"post1",
|
||||||
"stopContainer",
|
|
||||||
"interpolateOutputs",
|
"interpolateOutputs",
|
||||||
|
"setJobResults",
|
||||||
|
"stopContainer",
|
||||||
"closeContainer",
|
"closeContainer",
|
||||||
},
|
},
|
||||||
result: "success",
|
result: "success",
|
||||||
|
|
@ -229,8 +238,9 @@ func TestJobExecutorNewJobExecutor(t *testing.T) {
|
||||||
"step3",
|
"step3",
|
||||||
"post3",
|
"post3",
|
||||||
"post2",
|
"post2",
|
||||||
"stopContainer",
|
|
||||||
"interpolateOutputs",
|
"interpolateOutputs",
|
||||||
|
"setJobResults",
|
||||||
|
"stopContainer",
|
||||||
"closeContainer",
|
"closeContainer",
|
||||||
},
|
},
|
||||||
result: "success",
|
result: "success",
|
||||||
|
|
@ -246,7 +256,27 @@ func TestJobExecutorNewJobExecutor(t *testing.T) {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
fmt.Printf("::group::%s\n", tt.name)
|
fmt.Printf("::group::%s\n", tt.name)
|
||||||
|
|
||||||
ctx := common.WithJobErrorContainer(t.Context())
|
executorOrder := make([]string, 0)
|
||||||
|
|
||||||
|
mockLogger := mocks.NewFieldLogger(t)
|
||||||
|
mockLogger.On("Debugf", mock.Anything, mock.Anything).Return(0).Maybe()
|
||||||
|
mockLogger.On("Warningf", mock.Anything, mock.Anything).Return(0).Maybe()
|
||||||
|
mockLogger.On("WithField", mock.Anything, mock.Anything).Return(&logrus.Entry{Logger: &logrus.Logger{}}).Maybe()
|
||||||
|
// When `WithFields()` is called with jobResult & jobOutputs field, add `setJobResults` to executorOrder.
|
||||||
|
mockLogger.On("WithFields",
|
||||||
|
mock.MatchedBy(func(fields logrus.Fields) bool {
|
||||||
|
_, okJobResult := fields["jobResult"]
|
||||||
|
_, okJobOutput := fields["jobOutputs"]
|
||||||
|
return okJobOutput && okJobResult
|
||||||
|
})).
|
||||||
|
Run(func(args mock.Arguments) {
|
||||||
|
executorOrder = append(executorOrder, "setJobResults")
|
||||||
|
}).
|
||||||
|
Return(&logrus.Entry{Logger: &logrus.Logger{}}).Maybe()
|
||||||
|
|
||||||
|
mockLogger.On("WithFields", mock.Anything).Return(&logrus.Entry{Logger: &logrus.Logger{}}).Maybe()
|
||||||
|
|
||||||
|
ctx := common.WithLogger(common.WithJobErrorContainer(t.Context()), mockLogger)
|
||||||
jim := &jobInfoMock{}
|
jim := &jobInfoMock{}
|
||||||
sfm := &stepFactoryMock{}
|
sfm := &stepFactoryMock{}
|
||||||
rc := &RunContext{
|
rc := &RunContext{
|
||||||
|
|
@ -262,7 +292,6 @@ func TestJobExecutorNewJobExecutor(t *testing.T) {
|
||||||
Config: &Config{},
|
Config: &Config{},
|
||||||
}
|
}
|
||||||
rc.ExprEval = rc.NewExpressionEvaluator(ctx)
|
rc.ExprEval = rc.NewExpressionEvaluator(ctx)
|
||||||
executorOrder := make([]string, 0)
|
|
||||||
|
|
||||||
jim.On("steps").Return(tt.steps)
|
jim.On("steps").Return(tt.steps)
|
||||||
|
|
||||||
|
|
@ -415,3 +444,76 @@ func TestSetJobResultConcurrency(t *testing.T) {
|
||||||
|
|
||||||
assert.Equal(t, "failure", lastResult)
|
assert.Equal(t, "failure", lastResult)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSetJobResult_SkipsBannerInChildReusableWorkflow(t *testing.T) {
|
||||||
|
// Test that child reusable workflow does not print final banner
|
||||||
|
// to prevent premature token revocation
|
||||||
|
|
||||||
|
mockLogger := mocks.NewFieldLogger(t)
|
||||||
|
// Allow all variants of Debugf (git operations can call with 1-3 args)
|
||||||
|
mockLogger.On("Debugf", mock.Anything).Return(0).Maybe()
|
||||||
|
mockLogger.On("Debugf", mock.Anything, mock.Anything).Return(0).Maybe()
|
||||||
|
mockLogger.On("Debugf", mock.Anything, mock.Anything, mock.Anything).Return(0).Maybe()
|
||||||
|
// CRITICAL: In CI, git ref detection may fail and call Warningf
|
||||||
|
mockLogger.On("Warningf", mock.Anything, mock.Anything).Return(0).Maybe()
|
||||||
|
mockLogger.On("WithField", mock.Anything, mock.Anything).Return(&logrus.Entry{Logger: &logrus.Logger{}}).Maybe()
|
||||||
|
mockLogger.On("WithFields", mock.Anything).Return(&logrus.Entry{Logger: &logrus.Logger{}}).Maybe()
|
||||||
|
|
||||||
|
ctx := common.WithLogger(common.WithJobErrorContainer(t.Context()), mockLogger)
|
||||||
|
|
||||||
|
// Setup parent job
|
||||||
|
parentJob := &model.Job{
|
||||||
|
Result: "success",
|
||||||
|
}
|
||||||
|
parentRC := &RunContext{
|
||||||
|
Config: &Config{Env: map[string]string{}}, // Must have Config
|
||||||
|
Run: &model.Run{
|
||||||
|
JobID: "parent",
|
||||||
|
Workflow: &model.Workflow{
|
||||||
|
Jobs: map[string]*model.Job{
|
||||||
|
"parent": parentJob,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup child job with caller reference
|
||||||
|
childJob := &model.Job{
|
||||||
|
Result: "success",
|
||||||
|
}
|
||||||
|
childRC := &RunContext{
|
||||||
|
Config: &Config{Env: map[string]string{}}, // Must have Config
|
||||||
|
Run: &model.Run{
|
||||||
|
JobID: "child",
|
||||||
|
Workflow: &model.Workflow{
|
||||||
|
Jobs: map[string]*model.Job{
|
||||||
|
"child": childJob,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
caller: &caller{
|
||||||
|
runContext: parentRC,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
jim := &jobInfoMock{}
|
||||||
|
jim.On("matrix").Return(map[string]any{}) // REQUIRED: setJobResult always calls matrix()
|
||||||
|
jim.On("result", "success")
|
||||||
|
|
||||||
|
// Call setJobResult for child workflow
|
||||||
|
setJobResult(ctx, jim, childRC, true)
|
||||||
|
|
||||||
|
// Verify:
|
||||||
|
// 1. Child result is set
|
||||||
|
jim.AssertCalled(t, "result", "success")
|
||||||
|
|
||||||
|
// 2. Parent result is propagated
|
||||||
|
assert.Equal(t, "success", parentJob.Result)
|
||||||
|
|
||||||
|
// 3. Final banner was NOT printed by child (critical for token security)
|
||||||
|
mockLogger.AssertNotCalled(t, "WithFields", mock.MatchedBy(func(fields logrus.Fields) bool {
|
||||||
|
_, okJobResult := fields["jobResult"]
|
||||||
|
_, okJobOutput := fields["jobOutputs"]
|
||||||
|
return okJobOutput && okJobResult
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -146,6 +146,26 @@ func WithCompositeStepLogger(ctx context.Context, stepID string) context.Context
|
||||||
}).WithContext(ctx))
|
}).WithContext(ctx))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GetOuterStepResult(entry *logrus.Entry) any {
|
||||||
|
r, ok := entry.Data["stepResult"]
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// composite actions steps log with a list of stepID
|
||||||
|
if s, ok := entry.Data["stepID"]; ok {
|
||||||
|
if stepIDs, ok := s.([]string); ok {
|
||||||
|
if len(stepIDs) > 1 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
func withStepLogger(ctx context.Context, stepNumber int, stepID, stepName, stageName string) context.Context {
|
func withStepLogger(ctx context.Context, stepNumber int, stepID, stepName, stageName string) context.Context {
|
||||||
rtn := common.Logger(ctx).WithFields(logrus.Fields{
|
rtn := common.Logger(ctx).WithFields(logrus.Fields{
|
||||||
"stepNumber": stepNumber,
|
"stepNumber": stepNumber,
|
||||||
|
|
|
||||||
63
act/runner/logger_test.go
Normal file
63
act/runner/logger_test.go
Normal file
|
|
@ -0,0 +1,63 @@
|
||||||
|
package runner
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"code.forgejo.org/forgejo/runner/v11/act/common"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus/hooks/test"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRunner_GetOuterStepResult(t *testing.T) {
|
||||||
|
nullLogger, hook := test.NewNullLogger()
|
||||||
|
ctx := common.WithLogger(t.Context(), nullLogger)
|
||||||
|
|
||||||
|
t.Run("no stepResult", func(t *testing.T) {
|
||||||
|
hook.Reset()
|
||||||
|
common.Logger(ctx).Info("✅ Success")
|
||||||
|
entry := hook.LastEntry()
|
||||||
|
require.NotNil(t, entry)
|
||||||
|
assert.Nil(t, GetOuterStepResult(entry))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("stepResult and no stepID", func(t *testing.T) {
|
||||||
|
hook.Reset()
|
||||||
|
common.Logger(ctx).WithField("stepResult", "success").Info("✅ Success")
|
||||||
|
entry := hook.LastEntry()
|
||||||
|
require.NotNil(t, entry)
|
||||||
|
assert.Nil(t, GetOuterStepResult(entry))
|
||||||
|
})
|
||||||
|
|
||||||
|
stepNumber := 123
|
||||||
|
stepID := "step id"
|
||||||
|
stepName := "readable name"
|
||||||
|
stageName := "Main"
|
||||||
|
ctx = withStepLogger(ctx, stepNumber, stepID, stepName, stageName)
|
||||||
|
|
||||||
|
t.Run("stepResult and stepID", func(t *testing.T) {
|
||||||
|
hook.Reset()
|
||||||
|
common.Logger(ctx).WithField("stepResult", "success").Info("✅ Success")
|
||||||
|
entry := hook.LastEntry()
|
||||||
|
actualStepIDs, ok := entry.Data["stepID"]
|
||||||
|
require.True(t, ok)
|
||||||
|
require.Equal(t, []string{stepID}, actualStepIDs)
|
||||||
|
require.NotNil(t, entry)
|
||||||
|
assert.Equal(t, "success", GetOuterStepResult(entry))
|
||||||
|
})
|
||||||
|
|
||||||
|
compositeStepID := "composite step id"
|
||||||
|
ctx = WithCompositeStepLogger(ctx, compositeStepID)
|
||||||
|
|
||||||
|
t.Run("stepResult and composite stepID", func(t *testing.T) {
|
||||||
|
hook.Reset()
|
||||||
|
common.Logger(ctx).WithField("stepResult", "success").Info("✅ Success")
|
||||||
|
entry := hook.LastEntry()
|
||||||
|
actualStepIDs, ok := entry.Data["stepID"]
|
||||||
|
require.True(t, ok)
|
||||||
|
require.Equal(t, []string{stepID, compositeStepID}, actualStepIDs)
|
||||||
|
require.NotNil(t, entry)
|
||||||
|
assert.Nil(t, GetOuterStepResult(entry))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
@ -11,6 +11,8 @@ LXC_IPV6_PREFIX_DEFAULT="fd15"
|
||||||
LXC_DOCKER_PREFIX_DEFAULT="172.17"
|
LXC_DOCKER_PREFIX_DEFAULT="172.17"
|
||||||
LXC_IPV6_DOCKER_PREFIX_DEFAULT="fd00:d0ca"
|
LXC_IPV6_DOCKER_PREFIX_DEFAULT="fd00:d0ca"
|
||||||
LXC_APT_TOO_OLD='1 week ago'
|
LXC_APT_TOO_OLD='1 week ago'
|
||||||
|
: ${LXC_TRANSACTION_TIMEOUT:=600}
|
||||||
|
LXC_TRANSACTION_LOCK_FILE=/tmp/lxc-helper.lock
|
||||||
|
|
||||||
: ${LXC_SUDO:=}
|
: ${LXC_SUDO:=}
|
||||||
: ${LXC_CONTAINER_RELEASE:=bookworm}
|
: ${LXC_CONTAINER_RELEASE:=bookworm}
|
||||||
|
|
@ -28,16 +30,22 @@ function lxc_template_release() {
|
||||||
echo lxc-helpers-$LXC_CONTAINER_RELEASE
|
echo lxc-helpers-$LXC_CONTAINER_RELEASE
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function lxc_directory() {
|
||||||
|
local name="$1"
|
||||||
|
|
||||||
|
echo /var/lib/lxc/$name
|
||||||
|
}
|
||||||
|
|
||||||
function lxc_root() {
|
function lxc_root() {
|
||||||
local name="$1"
|
local name="$1"
|
||||||
|
|
||||||
echo /var/lib/lxc/$name/rootfs
|
echo $(lxc_directory $name)/rootfs
|
||||||
}
|
}
|
||||||
|
|
||||||
function lxc_config() {
|
function lxc_config() {
|
||||||
local name="$1"
|
local name="$1"
|
||||||
|
|
||||||
echo /var/lib/lxc/$name/config
|
echo $(lxc_directory $name)/config
|
||||||
}
|
}
|
||||||
|
|
||||||
function lxc_container_run() {
|
function lxc_container_run() {
|
||||||
|
|
@ -47,6 +55,42 @@ function lxc_container_run() {
|
||||||
$LXC_SUDO lxc-attach --clear-env --name $name -- "$@"
|
$LXC_SUDO lxc-attach --clear-env --name $name -- "$@"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function lxc_transaction_lock() {
|
||||||
|
exec 7>$LXC_TRANSACTION_LOCK_FILE
|
||||||
|
flock --timeout $LXC_TRANSACTION_TIMEOUT 7
|
||||||
|
}
|
||||||
|
|
||||||
|
function lxc_transaction_unlock() {
|
||||||
|
exec 7>&-
|
||||||
|
}
|
||||||
|
|
||||||
|
function lxc_transaction_draft_name() {
|
||||||
|
echo "lxc-helper-draft"
|
||||||
|
}
|
||||||
|
|
||||||
|
function lxc_transaction_begin() {
|
||||||
|
local name=$1 # not actually used but it helps when reading in the caller
|
||||||
|
local draft=$(lxc_transaction_draft_name)
|
||||||
|
|
||||||
|
lxc_transaction_lock
|
||||||
|
lxc_container_destroy $draft
|
||||||
|
}
|
||||||
|
|
||||||
|
function lxc_transaction_commit() {
|
||||||
|
local name=$1
|
||||||
|
local draft=$(lxc_transaction_draft_name)
|
||||||
|
|
||||||
|
# do not use lxc-copy because it is not atomic if lxc-copy is
|
||||||
|
# interrupted it may leave the $name container half populated
|
||||||
|
$LXC_SUDO sed -i -e "s/$draft/$name/g" \
|
||||||
|
$(lxc_config $draft) \
|
||||||
|
$(lxc_root $draft)/etc/hosts \
|
||||||
|
$(lxc_root $draft)/etc/hostname
|
||||||
|
$LXC_SUDO rm -f $(lxc_root $draft)/var/lib/dhcp/dhclient.*
|
||||||
|
$LXC_SUDO mv $(lxc_directory $draft) $(lxc_directory $name)
|
||||||
|
lxc_transaction_unlock
|
||||||
|
}
|
||||||
|
|
||||||
function lxc_container_run_script_as() {
|
function lxc_container_run_script_as() {
|
||||||
local name="$1"
|
local name="$1"
|
||||||
local user="$2"
|
local user="$2"
|
||||||
|
|
@ -242,7 +286,7 @@ function lxc_container_configure() {
|
||||||
function lxc_container_install_lxc_helpers() {
|
function lxc_container_install_lxc_helpers() {
|
||||||
local name="$1"
|
local name="$1"
|
||||||
|
|
||||||
$LXC_SUDO cp -a $LXC_SELF_DIR/lxc-helpers*.sh $root/$LXC_BIN
|
$LXC_SUDO cp -a $LXC_SELF_DIR/lxc-helpers*.sh $(lxc_root $name)/$LXC_BIN
|
||||||
#
|
#
|
||||||
# Wait for the network to come up
|
# Wait for the network to come up
|
||||||
#
|
#
|
||||||
|
|
@ -304,10 +348,9 @@ function lxc_container_stop() {
|
||||||
|
|
||||||
function lxc_container_destroy() {
|
function lxc_container_destroy() {
|
||||||
local name="$1"
|
local name="$1"
|
||||||
local root="$2"
|
|
||||||
|
|
||||||
if lxc_exists "$name"; then
|
if lxc_exists "$name"; then
|
||||||
lxc_container_stop $name $root
|
lxc_container_stop $name
|
||||||
$LXC_SUDO lxc-destroy --force --name="$name"
|
$LXC_SUDO lxc-destroy --force --name="$name"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
@ -342,36 +385,44 @@ function lxc_running() {
|
||||||
function lxc_build_template_release() {
|
function lxc_build_template_release() {
|
||||||
local name="$(lxc_template_release)"
|
local name="$(lxc_template_release)"
|
||||||
|
|
||||||
|
lxc_transaction_begin $name
|
||||||
|
|
||||||
if lxc_exists_and_apt_not_old $name; then
|
if lxc_exists_and_apt_not_old $name; then
|
||||||
|
lxc_transaction_unlock
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
|
|
||||||
local root=$(lxc_root $name)
|
local draft=$(lxc_transaction_draft_name)
|
||||||
$LXC_SUDO lxc-create --name $name --template debian -- --release=$LXC_CONTAINER_RELEASE
|
$LXC_SUDO lxc-create --name $draft --template debian -- --release=$LXC_CONTAINER_RELEASE
|
||||||
echo 'lxc.apparmor.profile = unconfined' | $LXC_SUDO tee -a $(lxc_config $name)
|
echo 'lxc.apparmor.profile = unconfined' | $LXC_SUDO tee -a $(lxc_config $draft)
|
||||||
lxc_container_install_lxc_helpers $name
|
lxc_container_install_lxc_helpers $draft
|
||||||
lxc_container_start $name
|
lxc_container_start $draft
|
||||||
lxc_container_run $name apt-get update -qq
|
lxc_container_run $draft apt-get update -qq
|
||||||
lxc_apt_install $name sudo git python3
|
lxc_apt_install $draft sudo git python3
|
||||||
lxc_container_stop $name
|
lxc_container_stop $draft
|
||||||
|
lxc_transaction_commit $name
|
||||||
}
|
}
|
||||||
|
|
||||||
function lxc_build_template() {
|
function lxc_build_template() {
|
||||||
local name="$1"
|
local name="$1"
|
||||||
local newname="$2"
|
local newname="$2"
|
||||||
|
|
||||||
if lxc_exists_and_apt_not_old $newname; then
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
|
|
||||||
if test "$name" = "$(lxc_template_release)"; then
|
if test "$name" = "$(lxc_template_release)"; then
|
||||||
lxc_build_template_release
|
lxc_build_template_release
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if ! $LXC_SUDO lxc-copy --name=$name --newname=$newname; then
|
lxc_transaction_begin $name
|
||||||
echo lxc-copy --name=$name --newname=$newname failed
|
if lxc_exists_and_apt_not_old $newname; then
|
||||||
|
lxc_transaction_unlock
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
local draft=$(lxc_transaction_draft_name)
|
||||||
|
if ! $LXC_SUDO lxc-copy --name=$name --newname=$draft; then
|
||||||
|
echo lxc-copy --name=$name --newname=$draft failed
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
lxc_transaction_commit $newname
|
||||||
lxc_container_configure $newname
|
lxc_container_configure $newname
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -413,7 +464,7 @@ function lxc_install_lxc_inside() {
|
||||||
local prefixv6="${2:-$LXC_IPV6_PREFIX_DEFAULT}"
|
local prefixv6="${2:-$LXC_IPV6_PREFIX_DEFAULT}"
|
||||||
|
|
||||||
local packages="make git libvirt0 libpam-cgfs bridge-utils uidmap dnsmasq-base dnsmasq dnsmasq-utils qemu-user-static lxc-templates debootstrap"
|
local packages="make git libvirt0 libpam-cgfs bridge-utils uidmap dnsmasq-base dnsmasq dnsmasq-utils qemu-user-static lxc-templates debootstrap"
|
||||||
if test "$(lxc_release)" = bookworm; then
|
if test "$(lxc_release)" != bullseye; then
|
||||||
packages="$packages distro-info"
|
packages="$packages distro-info"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -18,11 +18,11 @@ lxc-helpers.sh - LXC container management helpers
|
||||||
SYNOPSIS
|
SYNOPSIS
|
||||||
|
|
||||||
lxc-helpers.sh [-v|--verbose] [-h|--help]
|
lxc-helpers.sh [-v|--verbose] [-h|--help]
|
||||||
[-o|--os {bookworm|bullseye} (default bookworm)]
|
[-o|--os {trixie|bookworm|bullseye} (default bookworm)]
|
||||||
command [arguments]
|
command [arguments]
|
||||||
|
|
||||||
lxc-helpers.sh [-v|--verbose] [-h|--help]
|
lxc-helpers.sh [-v|--verbose] [-h|--help]
|
||||||
[-o|--os {bookworm|bullseye} (default bookworm)]
|
[-o|--os {trixie|bookworm|bullseye} (default bookworm)]
|
||||||
[-c|--config {unprivileged lxc libvirt docker k8s} (default "lxc libvirt docker")]
|
[-c|--config {unprivileged lxc libvirt docker k8s} (default "lxc libvirt docker")]
|
||||||
lxc_container_create [arguments]
|
lxc_container_create [arguments]
|
||||||
|
|
||||||
|
|
|
||||||
264
act/runner/mocks/FieldLogger.go
Normal file
264
act/runner/mocks/FieldLogger.go
Normal file
|
|
@ -0,0 +1,264 @@
|
||||||
|
// Code generated by mockery v2.53.5. DO NOT EDIT.
|
||||||
|
|
||||||
|
package mocks
|
||||||
|
|
||||||
|
import (
|
||||||
|
logrus "github.com/sirupsen/logrus"
|
||||||
|
mock "github.com/stretchr/testify/mock"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FieldLogger is an autogenerated mock type for the FieldLogger type
|
||||||
|
type FieldLogger struct {
|
||||||
|
mock.Mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debug provides a mock function with given fields: args
|
||||||
|
func (_m *FieldLogger) Debug(args ...interface{}) {
|
||||||
|
var _ca []interface{}
|
||||||
|
_ca = append(_ca, args...)
|
||||||
|
_m.Called(_ca...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debugf provides a mock function with given fields: format, args
|
||||||
|
func (_m *FieldLogger) Debugf(format string, args ...interface{}) {
|
||||||
|
var _ca []interface{}
|
||||||
|
_ca = append(_ca, format)
|
||||||
|
_ca = append(_ca, args...)
|
||||||
|
_m.Called(_ca...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debugln provides a mock function with given fields: args
|
||||||
|
func (_m *FieldLogger) Debugln(args ...interface{}) {
|
||||||
|
var _ca []interface{}
|
||||||
|
_ca = append(_ca, args...)
|
||||||
|
_m.Called(_ca...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error provides a mock function with given fields: args
|
||||||
|
func (_m *FieldLogger) Error(args ...interface{}) {
|
||||||
|
var _ca []interface{}
|
||||||
|
_ca = append(_ca, args...)
|
||||||
|
_m.Called(_ca...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Errorf provides a mock function with given fields: format, args
|
||||||
|
func (_m *FieldLogger) Errorf(format string, args ...interface{}) {
|
||||||
|
var _ca []interface{}
|
||||||
|
_ca = append(_ca, format)
|
||||||
|
_ca = append(_ca, args...)
|
||||||
|
_m.Called(_ca...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Errorln provides a mock function with given fields: args
|
||||||
|
func (_m *FieldLogger) Errorln(args ...interface{}) {
|
||||||
|
var _ca []interface{}
|
||||||
|
_ca = append(_ca, args...)
|
||||||
|
_m.Called(_ca...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fatal provides a mock function with given fields: args
|
||||||
|
func (_m *FieldLogger) Fatal(args ...interface{}) {
|
||||||
|
var _ca []interface{}
|
||||||
|
_ca = append(_ca, args...)
|
||||||
|
_m.Called(_ca...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fatalf provides a mock function with given fields: format, args
|
||||||
|
func (_m *FieldLogger) Fatalf(format string, args ...interface{}) {
|
||||||
|
var _ca []interface{}
|
||||||
|
_ca = append(_ca, format)
|
||||||
|
_ca = append(_ca, args...)
|
||||||
|
_m.Called(_ca...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fatalln provides a mock function with given fields: args
|
||||||
|
func (_m *FieldLogger) Fatalln(args ...interface{}) {
|
||||||
|
var _ca []interface{}
|
||||||
|
_ca = append(_ca, args...)
|
||||||
|
_m.Called(_ca...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Info provides a mock function with given fields: args
|
||||||
|
func (_m *FieldLogger) Info(args ...interface{}) {
|
||||||
|
var _ca []interface{}
|
||||||
|
_ca = append(_ca, args...)
|
||||||
|
_m.Called(_ca...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Infof provides a mock function with given fields: format, args
|
||||||
|
func (_m *FieldLogger) Infof(format string, args ...interface{}) {
|
||||||
|
var _ca []interface{}
|
||||||
|
_ca = append(_ca, format)
|
||||||
|
_ca = append(_ca, args...)
|
||||||
|
_m.Called(_ca...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Infoln provides a mock function with given fields: args
|
||||||
|
func (_m *FieldLogger) Infoln(args ...interface{}) {
|
||||||
|
var _ca []interface{}
|
||||||
|
_ca = append(_ca, args...)
|
||||||
|
_m.Called(_ca...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Panic provides a mock function with given fields: args
|
||||||
|
func (_m *FieldLogger) Panic(args ...interface{}) {
|
||||||
|
var _ca []interface{}
|
||||||
|
_ca = append(_ca, args...)
|
||||||
|
_m.Called(_ca...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Panicf provides a mock function with given fields: format, args
|
||||||
|
func (_m *FieldLogger) Panicf(format string, args ...interface{}) {
|
||||||
|
var _ca []interface{}
|
||||||
|
_ca = append(_ca, format)
|
||||||
|
_ca = append(_ca, args...)
|
||||||
|
_m.Called(_ca...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Panicln provides a mock function with given fields: args
|
||||||
|
func (_m *FieldLogger) Panicln(args ...interface{}) {
|
||||||
|
var _ca []interface{}
|
||||||
|
_ca = append(_ca, args...)
|
||||||
|
_m.Called(_ca...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print provides a mock function with given fields: args
|
||||||
|
func (_m *FieldLogger) Print(args ...interface{}) {
|
||||||
|
var _ca []interface{}
|
||||||
|
_ca = append(_ca, args...)
|
||||||
|
_m.Called(_ca...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Printf provides a mock function with given fields: format, args
|
||||||
|
func (_m *FieldLogger) Printf(format string, args ...interface{}) {
|
||||||
|
var _ca []interface{}
|
||||||
|
_ca = append(_ca, format)
|
||||||
|
_ca = append(_ca, args...)
|
||||||
|
_m.Called(_ca...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Println provides a mock function with given fields: args
|
||||||
|
func (_m *FieldLogger) Println(args ...interface{}) {
|
||||||
|
var _ca []interface{}
|
||||||
|
_ca = append(_ca, args...)
|
||||||
|
_m.Called(_ca...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warn provides a mock function with given fields: args
|
||||||
|
func (_m *FieldLogger) Warn(args ...interface{}) {
|
||||||
|
var _ca []interface{}
|
||||||
|
_ca = append(_ca, args...)
|
||||||
|
_m.Called(_ca...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warnf provides a mock function with given fields: format, args
|
||||||
|
func (_m *FieldLogger) Warnf(format string, args ...interface{}) {
|
||||||
|
var _ca []interface{}
|
||||||
|
_ca = append(_ca, format)
|
||||||
|
_ca = append(_ca, args...)
|
||||||
|
_m.Called(_ca...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warning provides a mock function with given fields: args
|
||||||
|
func (_m *FieldLogger) Warning(args ...interface{}) {
|
||||||
|
var _ca []interface{}
|
||||||
|
_ca = append(_ca, args...)
|
||||||
|
_m.Called(_ca...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warningf provides a mock function with given fields: format, args
|
||||||
|
func (_m *FieldLogger) Warningf(format string, args ...interface{}) {
|
||||||
|
var _ca []interface{}
|
||||||
|
_ca = append(_ca, format)
|
||||||
|
_ca = append(_ca, args...)
|
||||||
|
_m.Called(_ca...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warningln provides a mock function with given fields: args
|
||||||
|
func (_m *FieldLogger) Warningln(args ...interface{}) {
|
||||||
|
var _ca []interface{}
|
||||||
|
_ca = append(_ca, args...)
|
||||||
|
_m.Called(_ca...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warnln provides a mock function with given fields: args
|
||||||
|
func (_m *FieldLogger) Warnln(args ...interface{}) {
|
||||||
|
var _ca []interface{}
|
||||||
|
_ca = append(_ca, args...)
|
||||||
|
_m.Called(_ca...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithError provides a mock function with given fields: err
|
||||||
|
func (_m *FieldLogger) WithError(err error) *logrus.Entry {
|
||||||
|
ret := _m.Called(err)
|
||||||
|
|
||||||
|
if len(ret) == 0 {
|
||||||
|
panic("no return value specified for WithError")
|
||||||
|
}
|
||||||
|
|
||||||
|
var r0 *logrus.Entry
|
||||||
|
if rf, ok := ret.Get(0).(func(error) *logrus.Entry); ok {
|
||||||
|
r0 = rf(err)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).(*logrus.Entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithField provides a mock function with given fields: key, value
|
||||||
|
func (_m *FieldLogger) WithField(key string, value interface{}) *logrus.Entry {
|
||||||
|
ret := _m.Called(key, value)
|
||||||
|
|
||||||
|
if len(ret) == 0 {
|
||||||
|
panic("no return value specified for WithField")
|
||||||
|
}
|
||||||
|
|
||||||
|
var r0 *logrus.Entry
|
||||||
|
if rf, ok := ret.Get(0).(func(string, interface{}) *logrus.Entry); ok {
|
||||||
|
r0 = rf(key, value)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).(*logrus.Entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithFields provides a mock function with given fields: fields
|
||||||
|
func (_m *FieldLogger) WithFields(fields logrus.Fields) *logrus.Entry {
|
||||||
|
ret := _m.Called(fields)
|
||||||
|
|
||||||
|
if len(ret) == 0 {
|
||||||
|
panic("no return value specified for WithFields")
|
||||||
|
}
|
||||||
|
|
||||||
|
var r0 *logrus.Entry
|
||||||
|
if rf, ok := ret.Get(0).(func(logrus.Fields) *logrus.Entry); ok {
|
||||||
|
r0 = rf(fields)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).(*logrus.Entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFieldLogger creates a new instance of FieldLogger. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||||
|
// The first argument is typically a *testing.T value.
|
||||||
|
func NewFieldLogger(t interface {
|
||||||
|
mock.TestingT
|
||||||
|
Cleanup(func())
|
||||||
|
},
|
||||||
|
) *FieldLogger {
|
||||||
|
mock := &FieldLogger{}
|
||||||
|
mock.Mock.Test(t)
|
||||||
|
|
||||||
|
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||||
|
|
||||||
|
return mock
|
||||||
|
}
|
||||||
|
|
@ -16,6 +16,7 @@ import (
|
||||||
"code.forgejo.org/forgejo/runner/v11/act/common"
|
"code.forgejo.org/forgejo/runner/v11/act/common"
|
||||||
"code.forgejo.org/forgejo/runner/v11/act/common/git"
|
"code.forgejo.org/forgejo/runner/v11/act/common/git"
|
||||||
"code.forgejo.org/forgejo/runner/v11/act/model"
|
"code.forgejo.org/forgejo/runner/v11/act/model"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
func newLocalReusableWorkflowExecutor(rc *RunContext) common.Executor {
|
func newLocalReusableWorkflowExecutor(rc *RunContext) common.Executor {
|
||||||
|
|
@ -115,7 +116,10 @@ func newActionCacheReusableWorkflowExecutor(rc *RunContext, filename string, rem
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return runner.NewPlanExecutor(plan)(ctx)
|
planErr := runner.NewPlanExecutor(plan)(ctx)
|
||||||
|
|
||||||
|
// Finalize from parent context: one job-level banner
|
||||||
|
return finalizeReusableWorkflow(ctx, rc, planErr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -171,7 +175,10 @@ func newReusableWorkflowExecutor(rc *RunContext, directory, workflow string) com
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return runner.NewPlanExecutor(plan)(ctx)
|
planErr := runner.NewPlanExecutor(plan)(ctx)
|
||||||
|
|
||||||
|
// Finalize from parent context: one job-level banner
|
||||||
|
return finalizeReusableWorkflow(ctx, rc, planErr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -229,3 +236,29 @@ func newRemoteReusableWorkflowWithPlat(url, uses string) *remoteReusableWorkflow
|
||||||
URL: url,
|
URL: url,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// finalizeReusableWorkflow prints the final job banner from the parent job context.
|
||||||
|
//
|
||||||
|
// The Forgejo reporter waits for this banner (log entry with "jobResult"
|
||||||
|
// field and without stage="Main") before marking the job as complete and revoking
|
||||||
|
// tokens. Printing this banner from the child reusable workflow would cause
|
||||||
|
// premature token revocation, breaking subsequent steps in the parent workflow.
|
||||||
|
func finalizeReusableWorkflow(ctx context.Context, rc *RunContext, planErr error) error {
|
||||||
|
jobResult := "success"
|
||||||
|
jobResultMessage := "succeeded"
|
||||||
|
if planErr != nil {
|
||||||
|
jobResult = "failure"
|
||||||
|
jobResultMessage = "failed"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Outputs should already be present in the parent context:
|
||||||
|
// - copied by child's setJobResult branch (rc.caller != nil)
|
||||||
|
jobOutputs := rc.Run.Job().Outputs
|
||||||
|
|
||||||
|
common.Logger(ctx).WithFields(logrus.Fields{
|
||||||
|
"jobResult": jobResult,
|
||||||
|
"jobOutputs": jobOutputs,
|
||||||
|
}).Infof("\U0001F3C1 Job %s", jobResultMessage)
|
||||||
|
|
||||||
|
return planErr
|
||||||
|
}
|
||||||
|
|
|
||||||
247
act/runner/reusable_workflow_test.go
Normal file
247
act/runner/reusable_workflow_test.go
Normal file
|
|
@ -0,0 +1,247 @@
|
||||||
|
package runner
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"code.forgejo.org/forgejo/runner/v11/act/common"
|
||||||
|
"code.forgejo.org/forgejo/runner/v11/act/model"
|
||||||
|
"code.forgejo.org/forgejo/runner/v11/act/runner/mocks"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/mock"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestConfig_GetToken(t *testing.T) {
|
||||||
|
t.Run("returns GITEA_TOKEN when both GITEA_TOKEN and GITHUB_TOKEN present", func(t *testing.T) {
|
||||||
|
c := &Config{
|
||||||
|
Secrets: map[string]string{
|
||||||
|
"GITHUB_TOKEN": "github-token",
|
||||||
|
"GITEA_TOKEN": "gitea-token",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
assert.Equal(t, "gitea-token", c.GetToken())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("returns GITHUB_TOKEN when only GITHUB_TOKEN present", func(t *testing.T) {
|
||||||
|
c := &Config{
|
||||||
|
Secrets: map[string]string{
|
||||||
|
"GITHUB_TOKEN": "github-token",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
assert.Equal(t, "github-token", c.GetToken())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("returns empty string when no tokens present", func(t *testing.T) {
|
||||||
|
c := &Config{
|
||||||
|
Secrets: map[string]string{},
|
||||||
|
}
|
||||||
|
assert.Equal(t, "", c.GetToken())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("returns empty string when Secrets is nil", func(t *testing.T) {
|
||||||
|
c := &Config{}
|
||||||
|
assert.Equal(t, "", c.GetToken())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRemoteReusableWorkflow_CloneURL(t *testing.T) {
|
||||||
|
t.Run("adds https prefix when missing", func(t *testing.T) {
|
||||||
|
rw := &remoteReusableWorkflow{
|
||||||
|
URL: "code.forgejo.org",
|
||||||
|
Org: "owner",
|
||||||
|
Repo: "repo",
|
||||||
|
}
|
||||||
|
assert.Equal(t, "https://code.forgejo.org/owner/repo", rw.CloneURL())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("preserves https prefix", func(t *testing.T) {
|
||||||
|
rw := &remoteReusableWorkflow{
|
||||||
|
URL: "https://code.forgejo.org",
|
||||||
|
Org: "owner",
|
||||||
|
Repo: "repo",
|
||||||
|
}
|
||||||
|
assert.Equal(t, "https://code.forgejo.org/owner/repo", rw.CloneURL())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("preserves http prefix", func(t *testing.T) {
|
||||||
|
rw := &remoteReusableWorkflow{
|
||||||
|
URL: "http://localhost:3000",
|
||||||
|
Org: "owner",
|
||||||
|
Repo: "repo",
|
||||||
|
}
|
||||||
|
assert.Equal(t, "http://localhost:3000/owner/repo", rw.CloneURL())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRemoteReusableWorkflow_FilePath(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
gitPlatform string
|
||||||
|
filename string
|
||||||
|
expectedPath string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "github platform",
|
||||||
|
gitPlatform: "github",
|
||||||
|
filename: "test.yml",
|
||||||
|
expectedPath: "./.github/workflows/test.yml",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "gitea platform",
|
||||||
|
gitPlatform: "gitea",
|
||||||
|
filename: "build.yaml",
|
||||||
|
expectedPath: "./.gitea/workflows/build.yaml",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "forgejo platform",
|
||||||
|
gitPlatform: "forgejo",
|
||||||
|
filename: "deploy.yml",
|
||||||
|
expectedPath: "./.forgejo/workflows/deploy.yml",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
rw := &remoteReusableWorkflow{
|
||||||
|
GitPlatform: tt.gitPlatform,
|
||||||
|
Filename: tt.filename,
|
||||||
|
}
|
||||||
|
assert.Equal(t, tt.expectedPath, rw.FilePath())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewRemoteReusableWorkflowWithPlat(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
url string
|
||||||
|
uses string
|
||||||
|
expectedOrg string
|
||||||
|
expectedRepo string
|
||||||
|
expectedPlatform string
|
||||||
|
expectedFilename string
|
||||||
|
expectedRef string
|
||||||
|
shouldFail bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "valid github workflow",
|
||||||
|
url: "github.com",
|
||||||
|
uses: "owner/repo/.github/workflows/test.yml@main",
|
||||||
|
expectedOrg: "owner",
|
||||||
|
expectedRepo: "repo",
|
||||||
|
expectedPlatform: "github",
|
||||||
|
expectedFilename: "test.yml",
|
||||||
|
expectedRef: "main",
|
||||||
|
shouldFail: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid gitea workflow",
|
||||||
|
url: "code.forgejo.org",
|
||||||
|
uses: "forgejo/runner/.gitea/workflows/build.yaml@v1.0.0",
|
||||||
|
expectedOrg: "forgejo",
|
||||||
|
expectedRepo: "runner",
|
||||||
|
expectedPlatform: "gitea",
|
||||||
|
expectedFilename: "build.yaml",
|
||||||
|
expectedRef: "v1.0.0",
|
||||||
|
shouldFail: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid format - missing platform",
|
||||||
|
url: "github.com",
|
||||||
|
uses: "owner/repo/workflows/test.yml@main",
|
||||||
|
shouldFail: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid format - no ref",
|
||||||
|
url: "github.com",
|
||||||
|
uses: "owner/repo/.github/workflows/test.yml",
|
||||||
|
shouldFail: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
result := newRemoteReusableWorkflowWithPlat(tt.url, tt.uses)
|
||||||
|
|
||||||
|
if tt.shouldFail {
|
||||||
|
assert.Nil(t, result)
|
||||||
|
} else {
|
||||||
|
assert.NotNil(t, result)
|
||||||
|
assert.Equal(t, tt.expectedOrg, result.Org)
|
||||||
|
assert.Equal(t, tt.expectedRepo, result.Repo)
|
||||||
|
assert.Equal(t, tt.expectedPlatform, result.GitPlatform)
|
||||||
|
assert.Equal(t, tt.expectedFilename, result.Filename)
|
||||||
|
assert.Equal(t, tt.expectedRef, result.Ref)
|
||||||
|
assert.Equal(t, tt.url, result.URL)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFinalizeReusableWorkflow_PrintsBannerSuccess(t *testing.T) {
|
||||||
|
mockLogger := mocks.NewFieldLogger(t)
|
||||||
|
|
||||||
|
bannerCalled := false
|
||||||
|
mockLogger.On("WithFields",
|
||||||
|
mock.MatchedBy(func(fields logrus.Fields) bool {
|
||||||
|
result, ok := fields["jobResult"].(string)
|
||||||
|
if !ok || result != "success" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
outs, ok := fields["jobOutputs"].(map[string]string)
|
||||||
|
return ok && outs["foo"] == "bar"
|
||||||
|
}),
|
||||||
|
).Run(func(args mock.Arguments) {
|
||||||
|
bannerCalled = true
|
||||||
|
}).Return(&logrus.Entry{Logger: &logrus.Logger{}}).Once()
|
||||||
|
|
||||||
|
ctx := common.WithLogger(t.Context(), mockLogger)
|
||||||
|
rc := &RunContext{
|
||||||
|
Run: &model.Run{
|
||||||
|
JobID: "parent",
|
||||||
|
Workflow: &model.Workflow{
|
||||||
|
Jobs: map[string]*model.Job{
|
||||||
|
"parent": {
|
||||||
|
Outputs: map[string]string{"foo": "bar"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := finalizeReusableWorkflow(ctx, rc, nil)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, bannerCalled, "final banner should be printed from parent")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFinalizeReusableWorkflow_PrintsBannerFailure(t *testing.T) {
|
||||||
|
mockLogger := mocks.NewFieldLogger(t)
|
||||||
|
|
||||||
|
bannerCalled := false
|
||||||
|
mockLogger.On("WithFields",
|
||||||
|
mock.MatchedBy(func(fields logrus.Fields) bool {
|
||||||
|
result, ok := fields["jobResult"].(string)
|
||||||
|
return ok && result == "failure"
|
||||||
|
}),
|
||||||
|
).Run(func(args mock.Arguments) {
|
||||||
|
bannerCalled = true
|
||||||
|
}).Return(&logrus.Entry{Logger: &logrus.Logger{}}).Once()
|
||||||
|
|
||||||
|
ctx := common.WithLogger(t.Context(), mockLogger)
|
||||||
|
rc := &RunContext{
|
||||||
|
Run: &model.Run{
|
||||||
|
JobID: "parent",
|
||||||
|
Workflow: &model.Workflow{
|
||||||
|
Jobs: map[string]*model.Job{
|
||||||
|
"parent": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
planErr := errors.New("workflow failed")
|
||||||
|
err := finalizeReusableWorkflow(ctx, rc, planErr)
|
||||||
|
assert.EqualError(t, err, "workflow failed")
|
||||||
|
assert.True(t, bannerCalled, "banner should be printed even on failure")
|
||||||
|
}
|
||||||
|
|
@ -248,12 +248,18 @@ var stopTemplate = template.Must(template.New("stop").Parse(`#!/bin/bash
|
||||||
source $(dirname $0)/lxc-helpers-lib.sh
|
source $(dirname $0)/lxc-helpers-lib.sh
|
||||||
|
|
||||||
lxc_container_destroy "{{.Name}}"
|
lxc_container_destroy "{{.Name}}"
|
||||||
|
lxc_maybe_sudo
|
||||||
|
$LXC_SUDO rm -fr "{{ .Root }}"
|
||||||
`))
|
`))
|
||||||
|
|
||||||
func (rc *RunContext) stopHostEnvironment(ctx context.Context) error {
|
func (rc *RunContext) stopHostEnvironment(ctx context.Context) error {
|
||||||
logger := common.Logger(ctx)
|
logger := common.Logger(ctx)
|
||||||
logger.Debugf("stopHostEnvironment")
|
logger.Debugf("stopHostEnvironment")
|
||||||
|
|
||||||
|
if !rc.IsLXCHostEnv(ctx) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
var stopScript bytes.Buffer
|
var stopScript bytes.Buffer
|
||||||
if err := stopTemplate.Execute(&stopScript, struct {
|
if err := stopTemplate.Execute(&stopScript, struct {
|
||||||
Name string
|
Name string
|
||||||
|
|
@ -310,13 +316,18 @@ func (rc *RunContext) startHostEnvironment() common.Executor {
|
||||||
ToolCache: rc.getToolCache(ctx),
|
ToolCache: rc.getToolCache(ctx),
|
||||||
Workdir: rc.Config.Workdir,
|
Workdir: rc.Config.Workdir,
|
||||||
ActPath: actPath,
|
ActPath: actPath,
|
||||||
CleanUp: func() {
|
StdOut: logWriter,
|
||||||
os.RemoveAll(miscpath)
|
LXC: rc.IsLXCHostEnv(ctx),
|
||||||
},
|
}
|
||||||
StdOut: logWriter,
|
rc.cleanUpJobContainer = func(ctx context.Context) error {
|
||||||
LXC: rc.IsLXCHostEnv(ctx),
|
if err := rc.stopHostEnvironment(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if rc.JobContainer == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return rc.JobContainer.Remove()(ctx)
|
||||||
}
|
}
|
||||||
rc.cleanUpJobContainer = rc.JobContainer.Remove()
|
|
||||||
for k, v := range rc.JobContainer.GetRunnerContext(ctx) {
|
for k, v := range rc.JobContainer.GetRunnerContext(ctx) {
|
||||||
if v, ok := v.(string); ok {
|
if v, ok := v.(string); ok {
|
||||||
rc.Env[fmt.Sprintf("RUNNER_%s", strings.ToUpper(k))] = v
|
rc.Env[fmt.Sprintf("RUNNER_%s", strings.ToUpper(k))] = v
|
||||||
|
|
@ -890,9 +901,6 @@ func (rc *RunContext) IsHostEnv(ctx context.Context) bool {
|
||||||
|
|
||||||
func (rc *RunContext) stopContainer() common.Executor {
|
func (rc *RunContext) stopContainer() common.Executor {
|
||||||
return func(ctx context.Context) error {
|
return func(ctx context.Context) error {
|
||||||
if rc.IsLXCHostEnv(ctx) {
|
|
||||||
return rc.stopHostEnvironment(ctx)
|
|
||||||
}
|
|
||||||
return rc.stopJobContainer()(ctx)
|
return rc.stopJobContainer()(ctx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -900,9 +908,6 @@ func (rc *RunContext) stopContainer() common.Executor {
|
||||||
func (rc *RunContext) closeContainer() common.Executor {
|
func (rc *RunContext) closeContainer() common.Executor {
|
||||||
return func(ctx context.Context) error {
|
return func(ctx context.Context) error {
|
||||||
if rc.JobContainer != nil {
|
if rc.JobContainer != nil {
|
||||||
if rc.IsLXCHostEnv(ctx) {
|
|
||||||
return rc.stopHostEnvironment(ctx)
|
|
||||||
}
|
|
||||||
return rc.JobContainer.Close()(ctx)
|
return rc.JobContainer.Close()(ctx)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -943,7 +948,10 @@ func (rc *RunContext) Executor() (common.Executor, error) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if res {
|
if res {
|
||||||
return executor(ctx)
|
timeoutctx, cancelTimeOut := evaluateTimeout(ctx, "job", rc.ExprEval, rc.Run.Job().TimeoutMinutes)
|
||||||
|
defer cancelTimeOut()
|
||||||
|
|
||||||
|
return executor(timeoutctx)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, nil
|
}, nil
|
||||||
|
|
@ -1202,7 +1210,7 @@ func (rc *RunContext) getGithubContext(ctx context.Context) *model.GithubContext
|
||||||
ghc.RetentionDays = preset.RetentionDays
|
ghc.RetentionDays = preset.RetentionDays
|
||||||
|
|
||||||
instance := rc.Config.GitHubInstance
|
instance := rc.Config.GitHubInstance
|
||||||
if !strings.HasPrefix(instance, "http://") &&
|
if instance != "" && !strings.HasPrefix(instance, "http://") &&
|
||||||
!strings.HasPrefix(instance, "https://") {
|
!strings.HasPrefix(instance, "https://") {
|
||||||
instance = "https://" + instance
|
instance = "https://" + instance
|
||||||
}
|
}
|
||||||
|
|
@ -1245,7 +1253,7 @@ func (rc *RunContext) getGithubContext(ctx context.Context) *model.GithubContext
|
||||||
|
|
||||||
{ // Adapt to Gitea
|
{ // Adapt to Gitea
|
||||||
instance := rc.Config.GitHubInstance
|
instance := rc.Config.GitHubInstance
|
||||||
if !strings.HasPrefix(instance, "http://") &&
|
if instance != "" && !strings.HasPrefix(instance, "http://") &&
|
||||||
!strings.HasPrefix(instance, "https://") {
|
!strings.HasPrefix(instance, "https://") {
|
||||||
instance = "https://" + instance
|
instance = "https://" + instance
|
||||||
}
|
}
|
||||||
|
|
@ -1347,16 +1355,6 @@ func (rc *RunContext) withGithubEnv(ctx context.Context, github *model.GithubCon
|
||||||
set("SERVER_URL", github.ServerURL)
|
set("SERVER_URL", github.ServerURL)
|
||||||
set("API_URL", github.APIURL)
|
set("API_URL", github.APIURL)
|
||||||
|
|
||||||
{ // Adapt to Forgejo
|
|
||||||
instance := rc.Config.GitHubInstance
|
|
||||||
if !strings.HasPrefix(instance, "http://") &&
|
|
||||||
!strings.HasPrefix(instance, "https://") {
|
|
||||||
instance = "https://" + instance
|
|
||||||
}
|
|
||||||
set("SERVER_URL", instance)
|
|
||||||
set("API_URL", instance+"/api/v1")
|
|
||||||
}
|
|
||||||
|
|
||||||
if rc.Config.ArtifactServerPath != "" {
|
if rc.Config.ArtifactServerPath != "" {
|
||||||
setActionRuntimeVars(rc, env)
|
setActionRuntimeVars(rc, env)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -280,6 +280,39 @@ func TestRunContext_GetBindsAndMounts(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRunContext_GetGithubContextURL(t *testing.T) {
|
||||||
|
table := []struct {
|
||||||
|
instance string
|
||||||
|
serverURL string
|
||||||
|
APIURL string
|
||||||
|
}{
|
||||||
|
{instance: "", serverURL: "", APIURL: "/api/v1"},
|
||||||
|
{instance: "example.com", serverURL: "https://example.com", APIURL: "https://example.com/api/v1"},
|
||||||
|
{instance: "http://example.com", serverURL: "http://example.com", APIURL: "http://example.com/api/v1"},
|
||||||
|
{instance: "https://example.com", serverURL: "https://example.com", APIURL: "https://example.com/api/v1"},
|
||||||
|
}
|
||||||
|
for _, data := range table {
|
||||||
|
t.Run(data.instance, func(t *testing.T) {
|
||||||
|
rc := &RunContext{
|
||||||
|
EventJSON: "{}",
|
||||||
|
Config: &Config{
|
||||||
|
GitHubInstance: data.instance,
|
||||||
|
},
|
||||||
|
Run: &model.Run{
|
||||||
|
Workflow: &model.Workflow{
|
||||||
|
Name: "GitHubContextTest",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ghc := rc.getGithubContext(t.Context())
|
||||||
|
|
||||||
|
assert.Equal(t, data.serverURL, ghc.ServerURL)
|
||||||
|
assert.Equal(t, data.APIURL, ghc.APIURL)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestRunContext_GetGithubContextRef(t *testing.T) {
|
func TestRunContext_GetGithubContextRef(t *testing.T) {
|
||||||
table := []struct {
|
table := []struct {
|
||||||
event string
|
event string
|
||||||
|
|
|
||||||
|
|
@ -263,6 +263,7 @@ func TestRunner_RunEvent(t *testing.T) {
|
||||||
{workdir, "uses-workflow", "pull_request", "", platforms, map[string]string{"secret": "keep_it_private"}},
|
{workdir, "uses-workflow", "pull_request", "", platforms, map[string]string{"secret": "keep_it_private"}},
|
||||||
{workdir, "uses-docker-url", "push", "", platforms, secrets},
|
{workdir, "uses-docker-url", "push", "", platforms, secrets},
|
||||||
{workdir, "act-composite-env-test", "push", "", platforms, secrets},
|
{workdir, "act-composite-env-test", "push", "", platforms, secrets},
|
||||||
|
{workdir, "uses-workflow-env-input", "push", "", platforms, secrets},
|
||||||
|
|
||||||
// Eval
|
// Eval
|
||||||
{workdir, "evalmatrix", "push", "", platforms, secrets},
|
{workdir, "evalmatrix", "push", "", platforms, secrets},
|
||||||
|
|
@ -272,6 +273,8 @@ func TestRunner_RunEvent(t *testing.T) {
|
||||||
{workdir, "evalmatrix-merge-array", "push", "", platforms, secrets},
|
{workdir, "evalmatrix-merge-array", "push", "", platforms, secrets},
|
||||||
|
|
||||||
{workdir, "basic", "push", "", platforms, secrets},
|
{workdir, "basic", "push", "", platforms, secrets},
|
||||||
|
{workdir, "timeout-minutes-stop", "push", "Job 'check' failed", platforms, secrets},
|
||||||
|
{workdir, "timeout-minutes-job", "push", "context deadline exceeded", platforms, secrets},
|
||||||
{workdir, "fail", "push", "Job 'build' failed", platforms, secrets},
|
{workdir, "fail", "push", "Job 'build' failed", platforms, secrets},
|
||||||
{workdir, "runs-on", "push", "", platforms, secrets},
|
{workdir, "runs-on", "push", "", platforms, secrets},
|
||||||
{workdir, "checkout", "push", "", platforms, secrets},
|
{workdir, "checkout", "push", "", platforms, secrets},
|
||||||
|
|
|
||||||
|
|
@ -177,7 +177,7 @@ func runStepExecutor(step step, stage stepStage, executor common.Executor) commo
|
||||||
Mode: 0o666,
|
Mode: 0o666,
|
||||||
})(ctx)
|
})(ctx)
|
||||||
|
|
||||||
timeoutctx, cancelTimeOut := evaluateStepTimeout(ctx, rc.ExprEval, stepModel)
|
timeoutctx, cancelTimeOut := evaluateTimeout(ctx, "step", rc.ExprEval, stepModel.TimeoutMinutes)
|
||||||
defer cancelTimeOut()
|
defer cancelTimeOut()
|
||||||
err = executor(timeoutctx)
|
err = executor(timeoutctx)
|
||||||
|
|
||||||
|
|
@ -213,12 +213,15 @@ func runStepExecutor(step step, stage stepStage, executor common.Executor) commo
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func evaluateStepTimeout(ctx context.Context, exprEval ExpressionEvaluator, stepModel *model.Step) (context.Context, context.CancelFunc) {
|
func evaluateTimeout(ctx context.Context, contextType string, exprEval ExpressionEvaluator, timeoutMinutes string) (context.Context, context.CancelFunc) {
|
||||||
timeout := exprEval.Interpolate(ctx, stepModel.TimeoutMinutes)
|
timeout := exprEval.Interpolate(ctx, timeoutMinutes)
|
||||||
if timeout != "" {
|
if timeout != "" {
|
||||||
if timeOutMinutes, err := strconv.ParseInt(timeout, 10, 64); err == nil {
|
timeOutMinutes, err := strconv.ParseInt(timeout, 10, 64)
|
||||||
|
if err == nil {
|
||||||
|
common.Logger(ctx).Debugf("the %s will stop in timeout-minutes %s", contextType, timeout)
|
||||||
return context.WithTimeout(ctx, time.Duration(timeOutMinutes)*time.Minute)
|
return context.WithTimeout(ctx, time.Duration(timeOutMinutes)*time.Minute)
|
||||||
}
|
}
|
||||||
|
common.Logger(ctx).Errorf("timeout-minutes %s cannot be parsed and will be ignored: %w", timeout, err)
|
||||||
}
|
}
|
||||||
return ctx, func() {}
|
return ctx, func() {}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
21
act/runner/testdata/.github/workflows/local-reusable-env-input.yml
vendored
Normal file
21
act/runner/testdata/.github/workflows/local-reusable-env-input.yml
vendored
Normal file
|
|
@ -0,0 +1,21 @@
|
||||||
|
name: "use-inputs-impl"
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
inputs:
|
||||||
|
greet_target:
|
||||||
|
type: string
|
||||||
|
required: false
|
||||||
|
default: "Some Default Value"
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
works:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
MY_INPUT_TEST: ${{ inputs.greet_target }}
|
||||||
|
INPUT_TEST: ${{ inputs.greet_target }}
|
||||||
|
INPUT_GREET_TARGET: ${{ inputs.greet_target }}
|
||||||
|
steps:
|
||||||
|
- run: '[ "$MY_INPUT_TEST" = "Mona the Octocat" ] || exit 1'
|
||||||
|
- run: '[ "$INPUT_TEST" = "Mona the Octocat" ] || exit 1'
|
||||||
|
- run: '[ "$INPUT_GREET_TARGET" = "Mona the Octocat" ] || exit 1'
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
FROM alpine:3
|
FROM code.forgejo.org/oci/alpine:latest
|
||||||
|
|
||||||
COPY entrypoint.sh /entrypoint.sh
|
COPY entrypoint.sh /entrypoint.sh
|
||||||
|
|
||||||
|
|
|
||||||
3
act/runner/testdata/basic/push.yml
vendored
3
act/runner/testdata/basic/push.yml
vendored
|
|
@ -18,7 +18,8 @@ jobs:
|
||||||
- run: ls
|
- run: ls
|
||||||
- run: echo 'hello world'
|
- run: echo 'hello world'
|
||||||
- run: echo ${GITHUB_SHA} >> $(dirname "${GITHUB_WORKSPACE}")/sha.txt
|
- run: echo ${GITHUB_SHA} >> $(dirname "${GITHUB_WORKSPACE}")/sha.txt
|
||||||
- run: cat $(dirname "${GITHUB_WORKSPACE}")/sha.txt | grep ${GITHUB_SHA}
|
- timeout-minutes: 30
|
||||||
|
run: cat $(dirname "${GITHUB_WORKSPACE}")/sha.txt | grep ${GITHUB_SHA}
|
||||||
build:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: [check]
|
needs: [check]
|
||||||
|
|
|
||||||
12
act/runner/testdata/timeout-minutes-job/push.yml
vendored
Normal file
12
act/runner/testdata/timeout-minutes-job/push.yml
vendored
Normal file
|
|
@ -0,0 +1,12 @@
|
||||||
|
name: timeout-minutes
|
||||||
|
on: push
|
||||||
|
|
||||||
|
env:
|
||||||
|
TIMEOUT_MINUTES: 0
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
check:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
timeout-minutes: ${{ env.TIMEOUT_MINUTES }}
|
||||||
|
steps:
|
||||||
|
- run: sleep 10
|
||||||
12
act/runner/testdata/timeout-minutes-step/push.yml
vendored
Normal file
12
act/runner/testdata/timeout-minutes-step/push.yml
vendored
Normal file
|
|
@ -0,0 +1,12 @@
|
||||||
|
name: timeout-minutes
|
||||||
|
on: push
|
||||||
|
|
||||||
|
env:
|
||||||
|
TIMEOUT_MINUTES: 0
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
check:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- timeout-minutes: ${{ env.TIMEOUT_MINUTES }}
|
||||||
|
run: sleep 10
|
||||||
8
act/runner/testdata/uses-workflow-env-input/push.yml
vendored
Normal file
8
act/runner/testdata/uses-workflow-env-input/push.yml
vendored
Normal file
|
|
@ -0,0 +1,8 @@
|
||||||
|
name: local-action-env-input
|
||||||
|
on: push
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
runs-on: docker
|
||||||
|
uses: ./testdata/.github/workflows/local-reusable-env-input.yml
|
||||||
|
with:
|
||||||
|
greet_target: 'Mona the Octocat'
|
||||||
|
|
@ -157,6 +157,7 @@
|
||||||
"output-value": {
|
"output-value": {
|
||||||
"context": [
|
"context": [
|
||||||
"forge",
|
"forge",
|
||||||
|
"forgejo",
|
||||||
"github",
|
"github",
|
||||||
"strategy",
|
"strategy",
|
||||||
"matrix",
|
"matrix",
|
||||||
|
|
@ -171,6 +172,7 @@
|
||||||
"input-default-context": {
|
"input-default-context": {
|
||||||
"context": [
|
"context": [
|
||||||
"forge",
|
"forge",
|
||||||
|
"forgejo",
|
||||||
"github",
|
"github",
|
||||||
"inputs",
|
"inputs",
|
||||||
"env",
|
"env",
|
||||||
|
|
@ -191,6 +193,7 @@
|
||||||
"string-steps-context": {
|
"string-steps-context": {
|
||||||
"context": [
|
"context": [
|
||||||
"forge",
|
"forge",
|
||||||
|
"forgejo",
|
||||||
"github",
|
"github",
|
||||||
"inputs",
|
"inputs",
|
||||||
"strategy",
|
"strategy",
|
||||||
|
|
@ -207,6 +210,7 @@
|
||||||
"boolean-steps-context": {
|
"boolean-steps-context": {
|
||||||
"context": [
|
"context": [
|
||||||
"forge",
|
"forge",
|
||||||
|
"forgejo",
|
||||||
"github",
|
"github",
|
||||||
"inputs",
|
"inputs",
|
||||||
"strategy",
|
"strategy",
|
||||||
|
|
@ -223,6 +227,7 @@
|
||||||
"step-env": {
|
"step-env": {
|
||||||
"context": [
|
"context": [
|
||||||
"forge",
|
"forge",
|
||||||
|
"forgejo",
|
||||||
"github",
|
"github",
|
||||||
"inputs",
|
"inputs",
|
||||||
"strategy",
|
"strategy",
|
||||||
|
|
@ -242,6 +247,7 @@
|
||||||
"step-if": {
|
"step-if": {
|
||||||
"context": [
|
"context": [
|
||||||
"forge",
|
"forge",
|
||||||
|
"forgejo",
|
||||||
"github",
|
"github",
|
||||||
"inputs",
|
"inputs",
|
||||||
"strategy",
|
"strategy",
|
||||||
|
|
@ -262,6 +268,7 @@
|
||||||
"step-with": {
|
"step-with": {
|
||||||
"context": [
|
"context": [
|
||||||
"forge",
|
"forge",
|
||||||
|
"forgejo",
|
||||||
"github",
|
"github",
|
||||||
"inputs",
|
"inputs",
|
||||||
"strategy",
|
"strategy",
|
||||||
|
|
|
||||||
|
|
@ -237,6 +237,9 @@ func (s *Node) UnmarshalYAML(node *yaml.Node) error {
|
||||||
if node != nil && node.Kind == yaml.DocumentNode {
|
if node != nil && node.Kind == yaml.DocumentNode {
|
||||||
return s.UnmarshalYAML(node.Content[0])
|
return s.UnmarshalYAML(node.Content[0])
|
||||||
}
|
}
|
||||||
|
if node != nil && node.Kind == yaml.AliasNode {
|
||||||
|
return s.UnmarshalYAML(node.Alias)
|
||||||
|
}
|
||||||
def := s.Schema.GetDefinition(s.Definition)
|
def := s.Schema.GetDefinition(s.Definition)
|
||||||
if s.Context == nil {
|
if s.Context == nil {
|
||||||
s.Context = def.Context
|
s.Context = def.Context
|
||||||
|
|
@ -360,6 +363,15 @@ func (s *Node) checkMapping(node *yaml.Node, def Definition) error {
|
||||||
if node.Kind != yaml.MappingNode {
|
if node.Kind != yaml.MappingNode {
|
||||||
return fmt.Errorf("%sExpected a mapping got %v", formatLocation(node), getStringKind(node.Kind))
|
return fmt.Errorf("%sExpected a mapping got %v", formatLocation(node), getStringKind(node.Kind))
|
||||||
}
|
}
|
||||||
|
// merges cannot be conveniently validated and are skipped
|
||||||
|
// https://yaml.org/type/merge.html
|
||||||
|
for i, n := range node.Content {
|
||||||
|
if i%2 == 0 {
|
||||||
|
if n.Kind == yaml.ScalarNode && n.Value == "<<" && (n.Tag == "" || n.ShortTag() == "!!merge") {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
insertDirective := regexp.MustCompile(`\${{\s*insert\s*}}`)
|
insertDirective := regexp.MustCompile(`\${{\s*insert\s*}}`)
|
||||||
var allErrors error
|
var allErrors error
|
||||||
for i, k := range node.Content {
|
for i, k := range node.Content {
|
||||||
|
|
|
||||||
|
|
@ -30,6 +30,84 @@ jobs:
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestContextsInWorkflowMatrix(t *testing.T) {
|
||||||
|
t.Run("KnownContexts", func(t *testing.T) {
|
||||||
|
// Parse raw YAML snippet.
|
||||||
|
var node yaml.Node
|
||||||
|
err := yaml.Unmarshal([]byte(`
|
||||||
|
on: push
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
job:
|
||||||
|
uses: ./.forgejo/workflow/test.yaml
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
input1:
|
||||||
|
- ${{ forge.KEY }}
|
||||||
|
- ${{ forgejo.KEY }}
|
||||||
|
- ${{ github.KEY }}
|
||||||
|
- ${{ inputs.KEY }}
|
||||||
|
- ${{ vars.KEY }}
|
||||||
|
- ${{ needs.KEY }}
|
||||||
|
include:
|
||||||
|
- forge: ${{ forge.KEY }}
|
||||||
|
- forgejo: ${{ forgejo.KEY }}
|
||||||
|
- github: ${{ github.KEY }}
|
||||||
|
- inputs: ${{ inputs.KEY }}
|
||||||
|
- vars: ${{ vars.KEY }}
|
||||||
|
- needs: ${{ needs.KEY }}
|
||||||
|
exclude:
|
||||||
|
- forge: ${{ forge.KEY }}
|
||||||
|
- forgejo: ${{ forgejo.KEY }}
|
||||||
|
- github: ${{ github.KEY }}
|
||||||
|
- inputs: ${{ inputs.KEY }}
|
||||||
|
- vars: ${{ vars.KEY }}
|
||||||
|
- needs: ${{ needs.KEY }}
|
||||||
|
`), &node)
|
||||||
|
if !assert.NoError(t, err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse YAML node as a validated workflow.
|
||||||
|
err = (&Node{
|
||||||
|
Definition: "workflow-root",
|
||||||
|
Schema: GetWorkflowSchema(),
|
||||||
|
}).UnmarshalYAML(&node)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("UnknownContext", func(t *testing.T) {
|
||||||
|
for _, property := range []string{"include", "exclude", "input1"} {
|
||||||
|
t.Run(property, func(t *testing.T) {
|
||||||
|
for _, context := range []string{"secrets", "job", "steps", "runner", "matrix", "strategy"} {
|
||||||
|
t.Run(context, func(t *testing.T) {
|
||||||
|
var node yaml.Node
|
||||||
|
err := yaml.Unmarshal([]byte(fmt.Sprintf(`
|
||||||
|
on: push
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
job:
|
||||||
|
uses: ./.forgejo/workflow/test.yaml
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
%[1]s:
|
||||||
|
- input1: ${{ %[2]s.KEY }}
|
||||||
|
`, property, context)), &node)
|
||||||
|
if !assert.NoError(t, err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = (&Node{
|
||||||
|
Definition: "workflow-root",
|
||||||
|
Schema: GetWorkflowSchema(),
|
||||||
|
}).UnmarshalYAML(&node)
|
||||||
|
assert.ErrorContains(t, err, "Unknown Variable Access "+context)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestReusableWorkflow(t *testing.T) {
|
func TestReusableWorkflow(t *testing.T) {
|
||||||
t.Run("KnownContexts", func(t *testing.T) {
|
t.Run("KnownContexts", func(t *testing.T) {
|
||||||
var node yaml.Node
|
var node yaml.Node
|
||||||
|
|
@ -164,7 +242,7 @@ jobs:
|
||||||
name: Build Silo Frontend DEV
|
name: Build Silo Frontend DEV
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
container:
|
container:
|
||||||
image: code.forgejo.org/oci/node:22-bookworm
|
image: code.forgejo.org/oci/${{ env.IMAGE }}
|
||||||
uses: ./.forgejo/workflows/${{ vars.PATHNAME }}
|
uses: ./.forgejo/workflows/${{ vars.PATHNAME }}
|
||||||
with:
|
with:
|
||||||
STAGE: dev
|
STAGE: dev
|
||||||
|
|
@ -243,3 +321,39 @@ runs:
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// https://yaml.org/spec/1.2.1/#id2785586
|
||||||
|
// An anchor is denoted by the “&” indicator. It marks a node for future reference.
|
||||||
|
// https://yaml.org/type/merge.html
|
||||||
|
// Specify one or more mappings to be merged with the current one.
|
||||||
|
func TestSchema_AnchorAndReference(t *testing.T) {
|
||||||
|
var node yaml.Node
|
||||||
|
err := yaml.Unmarshal([]byte(`
|
||||||
|
on: [push]
|
||||||
|
jobs:
|
||||||
|
test1:
|
||||||
|
runs-on: docker
|
||||||
|
steps:
|
||||||
|
- &step
|
||||||
|
run: echo All good!
|
||||||
|
- *step
|
||||||
|
test2:
|
||||||
|
runs-on: docker
|
||||||
|
steps:
|
||||||
|
- << : *step
|
||||||
|
name: other name
|
||||||
|
test3:
|
||||||
|
runs-on: docker
|
||||||
|
steps:
|
||||||
|
- !!merge << : *step
|
||||||
|
name: other name
|
||||||
|
`), &node)
|
||||||
|
if !assert.NoError(t, err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = (&Node{
|
||||||
|
Definition: "workflow-root",
|
||||||
|
Schema: GetWorkflowSchema(),
|
||||||
|
}).UnmarshalYAML(&node)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -52,7 +52,7 @@
|
||||||
"boolean": {}
|
"boolean": {}
|
||||||
},
|
},
|
||||||
"run-name": {
|
"run-name": {
|
||||||
"context": ["forge", "github", "inputs", "vars"],
|
"context": ["forge", "forgejo", "github", "inputs", "vars"],
|
||||||
"string": {},
|
"string": {},
|
||||||
"description": "The name for workflow runs generated from the workflow. GitHub displays the workflow run name in the list of workflow runs on your repository's 'Actions' tab.\n\n[Documentation](https://docs.github.com/actions/using-workflows/workflow-syntax-for-github-actions#run-name)"
|
"description": "The name for workflow runs generated from the workflow. GitHub displays the workflow run name in the list of workflow runs on your repository's 'Actions' tab.\n\n[Documentation](https://docs.github.com/actions/using-workflows/workflow-syntax-for-github-actions#run-name)"
|
||||||
},
|
},
|
||||||
|
|
@ -1139,7 +1139,7 @@
|
||||||
},
|
},
|
||||||
"workflow-call-input-default": {
|
"workflow-call-input-default": {
|
||||||
"description": "If a `default` parameter is not set, the default value of the input is `false` for boolean, `0` for a number, and `\"\"` for a string.",
|
"description": "If a `default` parameter is not set, the default value of the input is `false` for boolean, `0` for a number, and `\"\"` for a string.",
|
||||||
"context": ["forge", "github", "inputs", "vars"],
|
"context": ["forge", "forgejo", "github", "inputs", "vars"],
|
||||||
"one-of": ["string", "boolean", "number"]
|
"one-of": ["string", "boolean", "number"]
|
||||||
},
|
},
|
||||||
"workflow-call-secrets": {
|
"workflow-call-secrets": {
|
||||||
|
|
@ -1201,7 +1201,7 @@
|
||||||
},
|
},
|
||||||
"workflow-output-context": {
|
"workflow-output-context": {
|
||||||
"description": "The value to assign to the output parameter.",
|
"description": "The value to assign to the output parameter.",
|
||||||
"context": ["forge", "github", "inputs", "vars", "jobs"],
|
"context": ["forge", "forgejo", "github", "inputs", "vars", "jobs"],
|
||||||
"string": {}
|
"string": {}
|
||||||
},
|
},
|
||||||
"workflow-dispatch-string": {
|
"workflow-dispatch-string": {
|
||||||
|
|
@ -1402,7 +1402,7 @@
|
||||||
},
|
},
|
||||||
"workflow-env": {
|
"workflow-env": {
|
||||||
"description": "A map of environment variables that are available to the steps of all jobs in the workflow. You can also set environment variables that are only available to the steps of a single job or to a single step.\n\n[Documentation](https://docs.github.com/actions/using-workflows/workflow-syntax-for-github-actions#env)",
|
"description": "A map of environment variables that are available to the steps of all jobs in the workflow. You can also set environment variables that are only available to the steps of a single job or to a single step.\n\n[Documentation](https://docs.github.com/actions/using-workflows/workflow-syntax-for-github-actions#env)",
|
||||||
"context": ["forge", "github", "inputs", "vars", "secrets", "env"],
|
"context": ["forge", "forgejo", "github", "inputs", "vars", "secrets", "env"],
|
||||||
"mapping": {
|
"mapping": {
|
||||||
"loose-key-type": "non-empty-string",
|
"loose-key-type": "non-empty-string",
|
||||||
"loose-value-type": "string"
|
"loose-value-type": "string"
|
||||||
|
|
@ -1517,6 +1517,7 @@
|
||||||
"description": "You can use the `if` conditional to prevent a job from running unless a condition is met. You can use any supported context and expression to create a conditional.",
|
"description": "You can use the `if` conditional to prevent a job from running unless a condition is met. You can use any supported context and expression to create a conditional.",
|
||||||
"context": [
|
"context": [
|
||||||
"forge",
|
"forge",
|
||||||
|
"forgejo",
|
||||||
"github",
|
"github",
|
||||||
"inputs",
|
"inputs",
|
||||||
"vars",
|
"vars",
|
||||||
|
|
@ -1534,6 +1535,7 @@
|
||||||
"job-if-result": {
|
"job-if-result": {
|
||||||
"context": [
|
"context": [
|
||||||
"forge",
|
"forge",
|
||||||
|
"forgejo",
|
||||||
"github",
|
"github",
|
||||||
"inputs",
|
"inputs",
|
||||||
"vars",
|
"vars",
|
||||||
|
|
@ -1548,7 +1550,7 @@
|
||||||
},
|
},
|
||||||
"strategy": {
|
"strategy": {
|
||||||
"description": "Use `strategy` to use a matrix strategy for your jobs. A matrix strategy lets you use variables in a single job definition to automatically create multiple job runs that are based on the combinations of the variables. ",
|
"description": "Use `strategy` to use a matrix strategy for your jobs. A matrix strategy lets you use variables in a single job definition to automatically create multiple job runs that are based on the combinations of the variables. ",
|
||||||
"context": ["forge", "github", "inputs", "vars", "needs"],
|
"context": ["forge", "forgejo", "github", "inputs", "vars", "needs"],
|
||||||
"mapping": {
|
"mapping": {
|
||||||
"properties": {
|
"properties": {
|
||||||
"fail-fast": {
|
"fail-fast": {
|
||||||
|
|
@ -1566,6 +1568,7 @@
|
||||||
"fail-fast": {
|
"fail-fast": {
|
||||||
"context": [
|
"context": [
|
||||||
"forge",
|
"forge",
|
||||||
|
"forgejo",
|
||||||
"github",
|
"github",
|
||||||
"inputs",
|
"inputs",
|
||||||
"vars",
|
"vars",
|
||||||
|
|
@ -1584,6 +1587,7 @@
|
||||||
"max-parallel": {
|
"max-parallel": {
|
||||||
"context": [
|
"context": [
|
||||||
"forge",
|
"forge",
|
||||||
|
"forgejo",
|
||||||
"github",
|
"github",
|
||||||
"inputs",
|
"inputs",
|
||||||
"vars",
|
"vars",
|
||||||
|
|
@ -1630,7 +1634,7 @@
|
||||||
"runs-on": {
|
"runs-on": {
|
||||||
"description": "Use `runs-on` to define the type of machine to run the job on.\n* The destination machine can be either a GitHub-hosted runner, larger runner, or a self-hosted runner.\n* You can target runners based on the labels assigned to them, or their group membership, or a combination of these.\n* You can provide `runs-on` as a single string or as an array of strings.\n* If you specify an array of strings, your workflow will execute on any runner that matches all of the specified `runs-on` values.\n* If you would like to run your workflow on multiple machines, use `jobs.<job_id>.strategy`.",
|
"description": "Use `runs-on` to define the type of machine to run the job on.\n* The destination machine can be either a GitHub-hosted runner, larger runner, or a self-hosted runner.\n* You can target runners based on the labels assigned to them, or their group membership, or a combination of these.\n* You can provide `runs-on` as a single string or as an array of strings.\n* If you specify an array of strings, your workflow will execute on any runner that matches all of the specified `runs-on` values.\n* If you would like to run your workflow on multiple machines, use `jobs.<job_id>.strategy`.",
|
||||||
"required": true,
|
"required": true,
|
||||||
"context": ["forge", "github", "inputs", "vars", "needs", "strategy", "matrix"],
|
"context": ["forge", "forgejo", "github", "inputs", "vars", "needs", "strategy", "matrix"],
|
||||||
"one-of": [
|
"one-of": [
|
||||||
"non-empty-string",
|
"non-empty-string",
|
||||||
"sequence-of-non-empty-string",
|
"sequence-of-non-empty-string",
|
||||||
|
|
@ -1656,6 +1660,7 @@
|
||||||
"description": "A map of variables that are available to all steps in the job.",
|
"description": "A map of variables that are available to all steps in the job.",
|
||||||
"context": [
|
"context": [
|
||||||
"forge",
|
"forge",
|
||||||
|
"forgejo",
|
||||||
"github",
|
"github",
|
||||||
"inputs",
|
"inputs",
|
||||||
"vars",
|
"vars",
|
||||||
|
|
@ -1671,12 +1676,12 @@
|
||||||
},
|
},
|
||||||
"workflow-concurrency": {
|
"workflow-concurrency": {
|
||||||
"description": "Concurrency ensures that only a single job or workflow using the same concurrency group will run at a time. A concurrency group can be any string or expression.\n\nYou can also specify `concurrency` at the job level.\n\n[Documentation](https://docs.github.com/actions/using-workflows/workflow-syntax-for-github-actions#concurrency)",
|
"description": "Concurrency ensures that only a single job or workflow using the same concurrency group will run at a time. A concurrency group can be any string or expression.\n\nYou can also specify `concurrency` at the job level.\n\n[Documentation](https://docs.github.com/actions/using-workflows/workflow-syntax-for-github-actions#concurrency)",
|
||||||
"context": ["forge", "github", "inputs", "vars"],
|
"context": ["forge", "forgejo", "github", "inputs", "vars"],
|
||||||
"one-of": ["string", "concurrency-mapping"]
|
"one-of": ["string", "concurrency-mapping"]
|
||||||
},
|
},
|
||||||
"job-concurrency": {
|
"job-concurrency": {
|
||||||
"description": "Concurrency ensures that only a single job using the same concurrency group will run at a time. A concurrency group can be any string or expression. The expression can use any context except for the `secrets` context.\n\nYou can also specify `concurrency` at the workflow level.",
|
"description": "Concurrency ensures that only a single job using the same concurrency group will run at a time. A concurrency group can be any string or expression. The expression can use any context except for the `secrets` context.\n\nYou can also specify `concurrency` at the workflow level.",
|
||||||
"context": ["forge", "github", "inputs", "vars", "needs", "strategy", "matrix"],
|
"context": ["forge", "forgejo", "github", "inputs", "vars", "needs", "strategy", "matrix"],
|
||||||
"one-of": ["non-empty-string", "concurrency-mapping"]
|
"one-of": ["non-empty-string", "concurrency-mapping"]
|
||||||
},
|
},
|
||||||
"concurrency-mapping": {
|
"concurrency-mapping": {
|
||||||
|
|
@ -1697,7 +1702,7 @@
|
||||||
},
|
},
|
||||||
"job-environment": {
|
"job-environment": {
|
||||||
"description": "The environment that the job references. All environment protection rules must pass before a job referencing the environment is sent to a runner.",
|
"description": "The environment that the job references. All environment protection rules must pass before a job referencing the environment is sent to a runner.",
|
||||||
"context": ["forge", "github", "inputs", "vars", "needs", "strategy", "matrix"],
|
"context": ["forge", "forgejo", "github", "inputs", "vars", "needs", "strategy", "matrix"],
|
||||||
"one-of": ["string", "job-environment-mapping"]
|
"one-of": ["string", "job-environment-mapping"]
|
||||||
},
|
},
|
||||||
"job-environment-mapping": {
|
"job-environment-mapping": {
|
||||||
|
|
@ -1716,7 +1721,7 @@
|
||||||
},
|
},
|
||||||
"job-environment-name": {
|
"job-environment-name": {
|
||||||
"description": "The name of the environment used by the job.",
|
"description": "The name of the environment used by the job.",
|
||||||
"context": ["forge", "github", "inputs", "vars", "needs", "strategy", "matrix"],
|
"context": ["forge", "forgejo", "github", "inputs", "vars", "needs", "strategy", "matrix"],
|
||||||
"string": {}
|
"string": {}
|
||||||
},
|
},
|
||||||
"job-defaults": {
|
"job-defaults": {
|
||||||
|
|
@ -1730,6 +1735,7 @@
|
||||||
"job-defaults-run": {
|
"job-defaults-run": {
|
||||||
"context": [
|
"context": [
|
||||||
"forge",
|
"forge",
|
||||||
|
"forgejo",
|
||||||
"github",
|
"github",
|
||||||
"inputs",
|
"inputs",
|
||||||
"vars",
|
"vars",
|
||||||
|
|
@ -1800,6 +1806,7 @@
|
||||||
"step-uses": {
|
"step-uses": {
|
||||||
"context": [
|
"context": [
|
||||||
"forge",
|
"forge",
|
||||||
|
"forgejo",
|
||||||
"github",
|
"github",
|
||||||
"inputs",
|
"inputs",
|
||||||
"vars",
|
"vars",
|
||||||
|
|
@ -1821,6 +1828,7 @@
|
||||||
"job-uses": {
|
"job-uses": {
|
||||||
"context": [
|
"context": [
|
||||||
"forge",
|
"forge",
|
||||||
|
"forgejo",
|
||||||
"github",
|
"github",
|
||||||
"inputs",
|
"inputs",
|
||||||
"vars",
|
"vars",
|
||||||
|
|
@ -1841,6 +1849,7 @@
|
||||||
"step-continue-on-error": {
|
"step-continue-on-error": {
|
||||||
"context": [
|
"context": [
|
||||||
"forge",
|
"forge",
|
||||||
|
"forgejo",
|
||||||
"github",
|
"github",
|
||||||
"inputs",
|
"inputs",
|
||||||
"vars",
|
"vars",
|
||||||
|
|
@ -1866,6 +1875,7 @@
|
||||||
"step-if": {
|
"step-if": {
|
||||||
"context": [
|
"context": [
|
||||||
"forge",
|
"forge",
|
||||||
|
"forgejo",
|
||||||
"github",
|
"github",
|
||||||
"inputs",
|
"inputs",
|
||||||
"vars",
|
"vars",
|
||||||
|
|
@ -1891,6 +1901,7 @@
|
||||||
"step-if-result": {
|
"step-if-result": {
|
||||||
"context": [
|
"context": [
|
||||||
"forge",
|
"forge",
|
||||||
|
"forgejo",
|
||||||
"github",
|
"github",
|
||||||
"inputs",
|
"inputs",
|
||||||
"vars",
|
"vars",
|
||||||
|
|
@ -1913,6 +1924,7 @@
|
||||||
"description": "Sets variables for steps to use in the runner environment. You can also set variables for the entire workflow or a job.",
|
"description": "Sets variables for steps to use in the runner environment. You can also set variables for the entire workflow or a job.",
|
||||||
"context": [
|
"context": [
|
||||||
"forge",
|
"forge",
|
||||||
|
"forgejo",
|
||||||
"github",
|
"github",
|
||||||
"inputs",
|
"inputs",
|
||||||
"vars",
|
"vars",
|
||||||
|
|
@ -1934,6 +1946,7 @@
|
||||||
"step-name": {
|
"step-name": {
|
||||||
"context": [
|
"context": [
|
||||||
"forge",
|
"forge",
|
||||||
|
"forgejo",
|
||||||
"github",
|
"github",
|
||||||
"inputs",
|
"inputs",
|
||||||
"vars",
|
"vars",
|
||||||
|
|
@ -1953,6 +1966,7 @@
|
||||||
"step-timeout-minutes": {
|
"step-timeout-minutes": {
|
||||||
"context": [
|
"context": [
|
||||||
"forge",
|
"forge",
|
||||||
|
"forgejo",
|
||||||
"github",
|
"github",
|
||||||
"inputs",
|
"inputs",
|
||||||
"vars",
|
"vars",
|
||||||
|
|
@ -1973,6 +1987,7 @@
|
||||||
"description": "A map of the input parameters defined by the action. Each input parameter is a key/value pair. Input parameters are set as variables. When you specify an input in a workflow file or use a default input value, GitHub creates a variable for the input with the name `INPUT_<VARIABLE_NAME>`. The variable created converts input names to uppercase letters and replaces spaces with `_`.",
|
"description": "A map of the input parameters defined by the action. Each input parameter is a key/value pair. Input parameters are set as variables. When you specify an input in a workflow file or use a default input value, GitHub creates a variable for the input with the name `INPUT_<VARIABLE_NAME>`. The variable created converts input names to uppercase letters and replaces spaces with `_`.",
|
||||||
"context": [
|
"context": [
|
||||||
"forge",
|
"forge",
|
||||||
|
"forgejo",
|
||||||
"github",
|
"github",
|
||||||
"inputs",
|
"inputs",
|
||||||
"vars",
|
"vars",
|
||||||
|
|
@ -1993,7 +2008,7 @@
|
||||||
},
|
},
|
||||||
"container": {
|
"container": {
|
||||||
"description": "A container to run any steps in a job that don't already specify a container. If you have steps that use both script and container actions, the container actions will run as sibling containers on the same network with the same volume mounts.\n\nIf you do not set a container, all steps will run directly on the host specified by runs-on unless a step refers to an action configured to run in a container.",
|
"description": "A container to run any steps in a job that don't already specify a container. If you have steps that use both script and container actions, the container actions will run as sibling containers on the same network with the same volume mounts.\n\nIf you do not set a container, all steps will run directly on the host specified by runs-on unless a step refers to an action configured to run in a container.",
|
||||||
"context": ["forge", "github", "inputs", "vars", "needs", "strategy", "matrix"],
|
"context": ["forge", "forgejo", "github", "inputs", "vars", "needs", "strategy", "matrix", "env"],
|
||||||
"one-of": ["string", "container-mapping"]
|
"one-of": ["string", "container-mapping"]
|
||||||
},
|
},
|
||||||
"container-mapping": {
|
"container-mapping": {
|
||||||
|
|
@ -2026,19 +2041,19 @@
|
||||||
},
|
},
|
||||||
"services": {
|
"services": {
|
||||||
"description": "Additional containers to host services for a job in a workflow. These are useful for creating databases or cache services like redis. The runner on the virtual machine will automatically create a network and manage the life cycle of the service containers. When you use a service container for a job or your step uses container actions, you don't need to set port information to access the service. Docker automatically exposes all ports between containers on the same network. When both the job and the action run in a container, you can directly reference the container by its hostname. The hostname is automatically mapped to the service name. When a step does not use a container action, you must access the service using localhost and bind the ports.",
|
"description": "Additional containers to host services for a job in a workflow. These are useful for creating databases or cache services like redis. The runner on the virtual machine will automatically create a network and manage the life cycle of the service containers. When you use a service container for a job or your step uses container actions, you don't need to set port information to access the service. Docker automatically exposes all ports between containers on the same network. When both the job and the action run in a container, you can directly reference the container by its hostname. The hostname is automatically mapped to the service name. When a step does not use a container action, you must access the service using localhost and bind the ports.",
|
||||||
"context": ["forge", "github", "inputs", "vars", "needs", "strategy", "matrix"],
|
"context": ["forge", "forgejo", "github", "inputs", "vars", "needs", "strategy", "matrix"],
|
||||||
"mapping": {
|
"mapping": {
|
||||||
"loose-key-type": "non-empty-string",
|
"loose-key-type": "non-empty-string",
|
||||||
"loose-value-type": "services-container"
|
"loose-value-type": "services-container"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"services-container": {
|
"services-container": {
|
||||||
"context": ["forge", "github", "inputs", "vars", "needs", "strategy", "matrix"],
|
"context": ["forge", "forgejo", "github", "inputs", "vars", "needs", "strategy", "matrix"],
|
||||||
"one-of": ["non-empty-string", "container-mapping"]
|
"one-of": ["non-empty-string", "container-mapping"]
|
||||||
},
|
},
|
||||||
"container-registry-credentials": {
|
"container-registry-credentials": {
|
||||||
"description": "If the image's container registry requires authentication to pull the image, you can use `jobs.<job_id>.container.credentials` to set a map of the username and password. The credentials are the same values that you would provide to the `docker login` command.",
|
"description": "If the image's container registry requires authentication to pull the image, you can use `jobs.<job_id>.container.credentials` to set a map of the username and password. The credentials are the same values that you would provide to the `docker login` command.",
|
||||||
"context": ["forge", "github", "inputs", "vars", "secrets", "env"],
|
"context": ["forge", "forgejo", "github", "inputs", "vars", "secrets", "env"],
|
||||||
"mapping": {
|
"mapping": {
|
||||||
"properties": {
|
"properties": {
|
||||||
"username": "non-empty-string",
|
"username": "non-empty-string",
|
||||||
|
|
@ -2064,24 +2079,25 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"boolean-needs-context": {
|
"boolean-needs-context": {
|
||||||
"context": ["forge", "github", "inputs", "vars", "needs"],
|
"context": ["forge", "forgejo", "github", "inputs", "vars", "needs"],
|
||||||
"boolean": {}
|
"boolean": {}
|
||||||
},
|
},
|
||||||
"number-needs-context": {
|
"number-needs-context": {
|
||||||
"context": ["forge", "github", "inputs", "vars", "needs"],
|
"context": ["forge", "forgejo", "github", "inputs", "vars", "needs"],
|
||||||
"number": {}
|
"number": {}
|
||||||
},
|
},
|
||||||
"string-needs-context": {
|
"string-needs-context": {
|
||||||
"context": ["forge", "github", "inputs", "vars", "needs"],
|
"context": ["forge", "forgejo", "github", "inputs", "vars", "needs"],
|
||||||
"string": {}
|
"string": {}
|
||||||
},
|
},
|
||||||
"scalar-needs-context": {
|
"scalar-needs-context": {
|
||||||
"context": ["forge", "github", "inputs", "vars", "env", "needs", "strategy", "matrix"],
|
"context": ["forge", "forgejo", "github", "inputs", "vars", "env", "needs", "strategy", "matrix"],
|
||||||
"one-of": ["string", "boolean", "number"]
|
"one-of": ["string", "boolean", "number"]
|
||||||
},
|
},
|
||||||
"scalar-needs-context-with-secrets": {
|
"scalar-needs-context-with-secrets": {
|
||||||
"context": [
|
"context": [
|
||||||
"forge",
|
"forge",
|
||||||
|
"forgejo",
|
||||||
"github",
|
"github",
|
||||||
"inputs",
|
"inputs",
|
||||||
"vars",
|
"vars",
|
||||||
|
|
@ -2093,24 +2109,25 @@
|
||||||
"one-of": ["string", "boolean", "number"]
|
"one-of": ["string", "boolean", "number"]
|
||||||
},
|
},
|
||||||
"boolean-strategy-context": {
|
"boolean-strategy-context": {
|
||||||
"context": ["forge", "github", "inputs", "vars", "needs", "strategy", "matrix"],
|
"context": ["forge", "forgejo", "github", "inputs", "vars", "needs", "strategy", "matrix"],
|
||||||
"boolean": {}
|
"boolean": {}
|
||||||
},
|
},
|
||||||
"number-strategy-context": {
|
"number-strategy-context": {
|
||||||
"context": ["forge", "github", "inputs", "vars", "needs", "strategy", "matrix"],
|
"context": ["forge", "forgejo", "github", "inputs", "vars", "needs", "strategy", "matrix"],
|
||||||
"number": {}
|
"number": {}
|
||||||
},
|
},
|
||||||
"string-strategy-context": {
|
"string-strategy-context": {
|
||||||
"context": ["forge", "github", "inputs", "vars", "needs", "strategy", "matrix"],
|
"context": ["forge", "forgejo", "github", "inputs", "vars", "needs", "strategy", "matrix"],
|
||||||
"string": {}
|
"string": {}
|
||||||
},
|
},
|
||||||
"job-timeout-minutes": {
|
"job-timeout-minutes": {
|
||||||
"context": ["forge", "github", "inputs", "vars", "needs", "strategy", "matrix"],
|
"context": ["forge", "forgejo", "github", "inputs", "vars", "needs", "strategy", "matrix"],
|
||||||
"one-of": ["number", "string"]
|
"one-of": ["number", "string"]
|
||||||
},
|
},
|
||||||
"boolean-steps-context": {
|
"boolean-steps-context": {
|
||||||
"context": [
|
"context": [
|
||||||
"forge",
|
"forge",
|
||||||
|
"forgejo",
|
||||||
"github",
|
"github",
|
||||||
"inputs",
|
"inputs",
|
||||||
"vars",
|
"vars",
|
||||||
|
|
@ -2129,6 +2146,7 @@
|
||||||
"number-steps-context": {
|
"number-steps-context": {
|
||||||
"context": [
|
"context": [
|
||||||
"forge",
|
"forge",
|
||||||
|
"forgejo",
|
||||||
"github",
|
"github",
|
||||||
"inputs",
|
"inputs",
|
||||||
"vars",
|
"vars",
|
||||||
|
|
@ -2147,6 +2165,7 @@
|
||||||
"string-runner-context": {
|
"string-runner-context": {
|
||||||
"context": [
|
"context": [
|
||||||
"forge",
|
"forge",
|
||||||
|
"forgejo",
|
||||||
"github",
|
"github",
|
||||||
"inputs",
|
"inputs",
|
||||||
"vars",
|
"vars",
|
||||||
|
|
@ -2164,6 +2183,7 @@
|
||||||
"string-runner-context-no-secrets": {
|
"string-runner-context-no-secrets": {
|
||||||
"context": [
|
"context": [
|
||||||
"forge",
|
"forge",
|
||||||
|
"forgejo",
|
||||||
"github",
|
"github",
|
||||||
"inputs",
|
"inputs",
|
||||||
"vars",
|
"vars",
|
||||||
|
|
@ -2180,6 +2200,7 @@
|
||||||
"string-steps-context": {
|
"string-steps-context": {
|
||||||
"context": [
|
"context": [
|
||||||
"forge",
|
"forge",
|
||||||
|
"forgejo",
|
||||||
"github",
|
"github",
|
||||||
"inputs",
|
"inputs",
|
||||||
"vars",
|
"vars",
|
||||||
|
|
|
||||||
|
|
@ -35,7 +35,7 @@ services:
|
||||||
bash -c '
|
bash -c '
|
||||||
/usr/bin/s6-svscan /etc/s6 &
|
/usr/bin/s6-svscan /etc/s6 &
|
||||||
sleep 10 ;
|
sleep 10 ;
|
||||||
su -c "forgejo forgejo-cli actions register --secret {SHARED_SECRET}" git ;
|
su -c "forgejo forgejo-cli actions register --keep-labels --secret {SHARED_SECRET}" git ;
|
||||||
su -c "forgejo admin user create --admin --username root --password {ROOT_PASSWORD} --email root@example.com" git ;
|
su -c "forgejo admin user create --admin --username root --password {ROOT_PASSWORD} --email root@example.com" git ;
|
||||||
sleep infinity
|
sleep infinity
|
||||||
'
|
'
|
||||||
|
|
@ -51,7 +51,7 @@ services:
|
||||||
- 8080:3000
|
- 8080:3000
|
||||||
|
|
||||||
runner-register:
|
runner-register:
|
||||||
image: code.forgejo.org/forgejo/runner:9.1.1
|
image: code.forgejo.org/forgejo/runner:11.1.2
|
||||||
links:
|
links:
|
||||||
- docker-in-docker
|
- docker-in-docker
|
||||||
- forgejo
|
- forgejo
|
||||||
|
|
@ -77,7 +77,7 @@ services:
|
||||||
'
|
'
|
||||||
|
|
||||||
runner-daemon:
|
runner-daemon:
|
||||||
image: code.forgejo.org/forgejo/runner:9.1.1
|
image: code.forgejo.org/forgejo/runner:11.1.2
|
||||||
links:
|
links:
|
||||||
- docker-in-docker
|
- docker-in-docker
|
||||||
- forgejo
|
- forgejo
|
||||||
|
|
|
||||||
|
|
@ -20,14 +20,14 @@ trap "rm -fr $TMPDIR" EXIT
|
||||||
: ${INPUTS_TOKEN:=}
|
: ${INPUTS_TOKEN:=}
|
||||||
: ${INPUTS_FORGEJO:=https://code.forgejo.org}
|
: ${INPUTS_FORGEJO:=https://code.forgejo.org}
|
||||||
: ${INPUTS_LIFETIME:=7d}
|
: ${INPUTS_LIFETIME:=7d}
|
||||||
DEFAULT_LXC_HELPERS_VERSION=1.0.3 # renovate: datasource=forgejo-tags depName=forgejo/lxc-helpers
|
DEFAULT_LXC_HELPERS_VERSION=1.1.3 # renovate: datasource=forgejo-tags depName=forgejo/lxc-helpers
|
||||||
: ${INPUTS_LXC_HELPERS_VERSION:=$DEFAULT_LXC_HELPERS_VERSION}
|
: ${INPUTS_LXC_HELPERS_VERSION:=$DEFAULT_LXC_HELPERS_VERSION}
|
||||||
DEFAULT_RUNNER_VERSION=10.0.1 # renovate: datasource=forgejo-releases depName=forgejo/runner
|
DEFAULT_RUNNER_VERSION=11.1.2 # renovate: datasource=forgejo-releases depName=forgejo/runner
|
||||||
: ${INPUTS_RUNNER_VERSION:=$DEFAULT_RUNNER_VERSION}
|
: ${INPUTS_RUNNER_VERSION:=$DEFAULT_RUNNER_VERSION}
|
||||||
|
|
||||||
: ${KILL_AFTER:=21600} # 6h == 21600
|
: ${KILL_AFTER:=21600} # 6h == 21600
|
||||||
NODEJS_VERSION=20
|
NODEJS_VERSION=20
|
||||||
DEBIAN_RELEASE=bookworm
|
DEBIAN_RELEASE=trixie
|
||||||
YQ_VERSION=v4.45.1
|
YQ_VERSION=v4.45.1
|
||||||
SELF=${BASH_SOURCE[0]}
|
SELF=${BASH_SOURCE[0]}
|
||||||
SELF_FILENAME=$(basename "$SELF")
|
SELF_FILENAME=$(basename "$SELF")
|
||||||
|
|
|
||||||
33
go.mod
33
go.mod
|
|
@ -2,23 +2,24 @@ module code.forgejo.org/forgejo/runner/v11
|
||||||
|
|
||||||
go 1.24.0
|
go 1.24.0
|
||||||
|
|
||||||
toolchain go1.24.7
|
toolchain go1.24.9
|
||||||
|
|
||||||
require (
|
require (
|
||||||
code.forgejo.org/forgejo/actions-proto v0.5.1
|
code.forgejo.org/forgejo/actions-proto v0.5.3
|
||||||
connectrpc.com/connect v1.18.1
|
connectrpc.com/connect v1.19.1
|
||||||
dario.cat/mergo v1.0.2
|
dario.cat/mergo v1.0.2
|
||||||
github.com/Masterminds/semver v1.5.0
|
github.com/Masterminds/semver v1.5.0
|
||||||
github.com/avast/retry-go/v4 v4.6.1
|
github.com/avast/retry-go/v4 v4.7.0
|
||||||
github.com/containerd/errdefs v1.0.0
|
github.com/containerd/errdefs v1.0.0
|
||||||
github.com/creack/pty v1.1.24
|
github.com/creack/pty v1.1.24
|
||||||
github.com/distribution/reference v0.6.0
|
github.com/distribution/reference v0.6.0
|
||||||
github.com/docker/cli v28.4.0+incompatible
|
github.com/docker/cli v28.5.1+incompatible
|
||||||
github.com/docker/docker v28.4.0+incompatible
|
github.com/docker/docker v28.5.1+incompatible
|
||||||
github.com/docker/go-connections v0.6.0
|
github.com/docker/go-connections v0.6.0
|
||||||
github.com/go-git/go-billy/v5 v5.6.2
|
github.com/go-git/go-billy/v5 v5.6.2
|
||||||
github.com/go-git/go-git/v5 v5.16.2
|
github.com/go-git/go-git/v5 v5.16.3
|
||||||
github.com/gobwas/glob v0.2.3
|
github.com/gobwas/glob v0.2.3
|
||||||
|
github.com/google/go-cmp v0.7.0
|
||||||
github.com/google/uuid v1.6.0
|
github.com/google/uuid v1.6.0
|
||||||
github.com/joho/godotenv v1.5.1
|
github.com/joho/godotenv v1.5.1
|
||||||
github.com/julienschmidt/httprouter v1.3.0
|
github.com/julienschmidt/httprouter v1.3.0
|
||||||
|
|
@ -28,7 +29,7 @@ require (
|
||||||
github.com/moby/patternmatcher v0.6.0
|
github.com/moby/patternmatcher v0.6.0
|
||||||
github.com/opencontainers/image-spec v1.1.1
|
github.com/opencontainers/image-spec v1.1.1
|
||||||
github.com/opencontainers/selinux v1.12.0
|
github.com/opencontainers/selinux v1.12.0
|
||||||
github.com/rhysd/actionlint v1.7.7
|
github.com/rhysd/actionlint v1.7.8
|
||||||
github.com/sirupsen/logrus v1.9.3
|
github.com/sirupsen/logrus v1.9.3
|
||||||
github.com/spf13/cobra v1.10.1
|
github.com/spf13/cobra v1.10.1
|
||||||
github.com/spf13/pflag v1.0.10
|
github.com/spf13/pflag v1.0.10
|
||||||
|
|
@ -36,9 +37,9 @@ require (
|
||||||
github.com/timshannon/bolthold v0.0.0-20240314194003-30aac6950928
|
github.com/timshannon/bolthold v0.0.0-20240314194003-30aac6950928
|
||||||
go.etcd.io/bbolt v1.4.3
|
go.etcd.io/bbolt v1.4.3
|
||||||
go.yaml.in/yaml/v3 v3.0.4
|
go.yaml.in/yaml/v3 v3.0.4
|
||||||
golang.org/x/term v0.34.0
|
golang.org/x/term v0.36.0
|
||||||
golang.org/x/time v0.13.0
|
golang.org/x/time v0.14.0
|
||||||
google.golang.org/protobuf v1.36.8
|
google.golang.org/protobuf v1.36.10
|
||||||
gotest.tools/v3 v3.5.2
|
gotest.tools/v3 v3.5.2
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -46,7 +47,7 @@ require (
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||||
github.com/ProtonMail/go-crypto v1.1.6 // indirect
|
github.com/ProtonMail/go-crypto v1.1.6 // indirect
|
||||||
github.com/bmatcuk/doublestar/v4 v4.8.0 // indirect
|
github.com/bmatcuk/doublestar/v4 v4.9.1 // indirect
|
||||||
github.com/cloudflare/circl v1.6.1 // indirect
|
github.com/cloudflare/circl v1.6.1 // indirect
|
||||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||||
github.com/containerd/log v0.1.0 // indirect
|
github.com/containerd/log v0.1.0 // indirect
|
||||||
|
|
@ -62,14 +63,13 @@ require (
|
||||||
github.com/go-logr/stdr v1.2.2 // indirect
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
||||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
||||||
github.com/google/go-cmp v0.7.0 // indirect
|
|
||||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
|
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
|
||||||
github.com/kevinburke/ssh_config v1.2.0 // indirect
|
github.com/kevinburke/ssh_config v1.2.0 // indirect
|
||||||
github.com/klauspost/compress v1.18.0 // indirect
|
github.com/klauspost/compress v1.18.0 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
github.com/mattn/go-runewidth v0.0.17 // indirect
|
||||||
github.com/mattn/go-shellwords v1.0.12 // indirect
|
github.com/mattn/go-shellwords v1.0.12 // indirect
|
||||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||||
github.com/moby/sys/atomicwriter v0.1.0 // indirect
|
github.com/moby/sys/atomicwriter v0.1.0 // indirect
|
||||||
|
|
@ -98,10 +98,11 @@ require (
|
||||||
go.opentelemetry.io/otel/metric v1.36.0 // indirect
|
go.opentelemetry.io/otel/metric v1.36.0 // indirect
|
||||||
go.opentelemetry.io/otel/sdk v1.21.0 // indirect
|
go.opentelemetry.io/otel/sdk v1.21.0 // indirect
|
||||||
go.opentelemetry.io/otel/trace v1.36.0 // indirect
|
go.opentelemetry.io/otel/trace v1.36.0 // indirect
|
||||||
|
go.yaml.in/yaml/v4 v4.0.0-rc.2 // indirect
|
||||||
golang.org/x/crypto v0.37.0 // indirect
|
golang.org/x/crypto v0.37.0 // indirect
|
||||||
golang.org/x/net v0.39.0 // indirect
|
golang.org/x/net v0.39.0 // indirect
|
||||||
golang.org/x/sync v0.12.0 // indirect
|
golang.org/x/sync v0.17.0 // indirect
|
||||||
golang.org/x/sys v0.35.0 // indirect
|
golang.org/x/sys v0.37.0 // indirect
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f // indirect
|
google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f // indirect
|
||||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
|
|
|
||||||
58
go.sum
58
go.sum
|
|
@ -1,7 +1,7 @@
|
||||||
code.forgejo.org/forgejo/actions-proto v0.5.1 h1:GCJHR/Y/Apk7Yl7CH9qOsKrdf/k0tRVFeVhz1EIZvb4=
|
code.forgejo.org/forgejo/actions-proto v0.5.3 h1:dDProRNB4CDvEl9gfo8jkiVfGdiW7fXAt5TM9Irka28=
|
||||||
code.forgejo.org/forgejo/actions-proto v0.5.1/go.mod h1:nu8N1HQLsu3c4T/PpYWbqwNBxsZnEOVxqV0mQWtIQvE=
|
code.forgejo.org/forgejo/actions-proto v0.5.3/go.mod h1:33iTdur/jVa/wAQP+BuciRTK9WZcVaxy0BNEnSWWFDM=
|
||||||
connectrpc.com/connect v1.18.1 h1:PAg7CjSAGvscaf6YZKUefjoih5Z/qYkyaTrBW8xvYPw=
|
connectrpc.com/connect v1.19.1 h1:R5M57z05+90EfEvCY1b7hBxDVOUl45PrtXtAV2fOC14=
|
||||||
connectrpc.com/connect v1.18.1/go.mod h1:0292hj1rnx8oFrStN7cB4jjVBeqs+Yx5yDIC2prWDO8=
|
connectrpc.com/connect v1.19.1/go.mod h1:tN20fjdGlewnSFeZxLKb0xwIZ6ozc3OQs2hTXy4du9w=
|
||||||
dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
|
dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
|
||||||
dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
|
dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
|
||||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk=
|
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk=
|
||||||
|
|
@ -19,10 +19,10 @@ github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFI
|
||||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
|
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
|
||||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||||
github.com/avast/retry-go/v4 v4.6.1 h1:VkOLRubHdisGrHnTu89g08aQEWEgRU7LVEop3GbIcMk=
|
github.com/avast/retry-go/v4 v4.7.0 h1:yjDs35SlGvKwRNSykujfjdMxMhMQQM0TnIjJaHB+Zio=
|
||||||
github.com/avast/retry-go/v4 v4.6.1/go.mod h1:V6oF8njAwxJ5gRo1Q7Cxab24xs5NCWZBeaHHBklR8mA=
|
github.com/avast/retry-go/v4 v4.7.0/go.mod h1:ZMPDa3sY2bKgpLtap9JRUgk2yTAba7cgiFhqxY2Sg6Q=
|
||||||
github.com/bmatcuk/doublestar/v4 v4.8.0 h1:DSXtrypQddoug1459viM9X9D3dp1Z7993fw36I2kNcQ=
|
github.com/bmatcuk/doublestar/v4 v4.9.1 h1:X8jg9rRZmJd4yRy7ZeNDRnM+T3ZfHv15JiBJ/avrEXE=
|
||||||
github.com/bmatcuk/doublestar/v4 v4.8.0/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
|
github.com/bmatcuk/doublestar/v4 v4.9.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
|
||||||
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||||
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||||
github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
|
github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
|
||||||
|
|
@ -43,10 +43,10 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||||
github.com/docker/cli v28.4.0+incompatible h1:RBcf3Kjw2pMtwui5V0DIMdyeab8glEw5QY0UUU4C9kY=
|
github.com/docker/cli v28.5.1+incompatible h1:ESutzBALAD6qyCLqbQSEf1a/U8Ybms5agw59yGVc+yY=
|
||||||
github.com/docker/cli v28.4.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
github.com/docker/cli v28.5.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||||
github.com/docker/docker v28.4.0+incompatible h1:KVC7bz5zJY/4AZe/78BIvCnPsLaC9T/zh72xnlrTTOk=
|
github.com/docker/docker v28.5.1+incompatible h1:Bm8DchhSD2J6PsFzxC35TZo4TLGR2PdW/E69rU45NhM=
|
||||||
github.com/docker/docker v28.4.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
github.com/docker/docker v28.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||||
github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8=
|
github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8=
|
||||||
github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40=
|
github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40=
|
||||||
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
|
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
|
||||||
|
|
@ -69,8 +69,8 @@ github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UN
|
||||||
github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU=
|
github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU=
|
||||||
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=
|
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=
|
||||||
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII=
|
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII=
|
||||||
github.com/go-git/go-git/v5 v5.16.2 h1:fT6ZIOjE5iEnkzKyxTHK1W4HGAsPhqEqiSAssSO77hM=
|
github.com/go-git/go-git/v5 v5.16.3 h1:Z8BtvxZ09bYm/yYNgPKCzgWtaRqDTgIKRgIRHBfU6Z8=
|
||||||
github.com/go-git/go-git/v5 v5.16.2/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8=
|
github.com/go-git/go-git/v5 v5.16.3/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8=
|
||||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||||
|
|
@ -115,8 +115,8 @@ github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHP
|
||||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
github.com/mattn/go-runewidth v0.0.17 h1:78v8ZlW0bP43XfmAfPsdXcoNCelfMHsDmd/pkENfrjQ=
|
||||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
github.com/mattn/go-runewidth v0.0.17/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||||
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
|
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
|
||||||
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
||||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||||
|
|
@ -151,8 +151,8 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/rhysd/actionlint v1.7.7 h1:0KgkoNTrYY7vmOCs9BW2AHxLvvpoY9nEUzgBHiPUr0k=
|
github.com/rhysd/actionlint v1.7.8 h1:3d+N9ourgAxVYG4z2IFxFIk/YiT6V+VnKASfXGwT60E=
|
||||||
github.com/rhysd/actionlint v1.7.7/go.mod h1:AE6I6vJEkNaIfWqC2GNE5spIJNhxf8NCtLEKU4NnUXg=
|
github.com/rhysd/actionlint v1.7.8/go.mod h1:3kiS6egcbXG+vQsJIhFxTz+UKaF1JprsE0SKrpCZKvU=
|
||||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||||
|
|
@ -222,6 +222,8 @@ go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lI
|
||||||
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
|
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
|
||||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||||
|
go.yaml.in/yaml/v4 v4.0.0-rc.2 h1:/FrI8D64VSr4HtGIlUtlFMGsm7H7pWTbj6vOLVZcA6s=
|
||||||
|
go.yaml.in/yaml/v4 v4.0.0-rc.2/go.mod h1:aZqd9kCMsGL7AuUv/m/PvWLdg5sjJsZ4oHDEnfPPfY0=
|
||||||
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||||
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
|
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
|
||||||
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
|
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
|
||||||
|
|
@ -230,8 +232,8 @@ golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbR
|
||||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
|
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
|
||||||
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
|
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
|
||||||
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||||
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
|
@ -242,16 +244,16 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||||
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
|
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
|
||||||
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4=
|
golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
|
||||||
golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw=
|
golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
|
||||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
|
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
|
||||||
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
|
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
|
||||||
golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI=
|
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
|
||||||
golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
|
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ=
|
google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ=
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f h1:2yNACc1O40tTnrsbk9Cv6oxiW8pxI/pXj0wRtdlYmgY=
|
google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f h1:2yNACc1O40tTnrsbk9Cv6oxiW8pxI/pXj0wRtdlYmgY=
|
||||||
|
|
@ -260,8 +262,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
|
||||||
google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw=
|
google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw=
|
||||||
google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
|
google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
|
||||||
google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc=
|
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
|
||||||
google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
|
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
|
|
|
||||||
|
|
@ -206,18 +206,19 @@ var createRunner = func(ctx context.Context, cfg *config.Config, reg *config.Reg
|
||||||
resp, err := runner.Declare(ctx, ls.Names())
|
resp, err := runner.Declare(ctx, ls.Names())
|
||||||
if err != nil && connect.CodeOf(err) == connect.CodeUnimplemented {
|
if err != nil && connect.CodeOf(err) == connect.CodeUnimplemented {
|
||||||
log.Warn("Because the Forgejo instance is an old version, skipping declaring the labels and version.")
|
log.Warn("Because the Forgejo instance is an old version, skipping declaring the labels and version.")
|
||||||
|
return runner, "runner", nil
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
log.WithError(err).Error("fail to invoke Declare")
|
log.WithError(err).Error("fail to invoke Declare")
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
} else {
|
}
|
||||||
log.Infof("runner: %s, with version: %s, with labels: %v, declared successfully",
|
|
||||||
resp.Msg.GetRunner().GetName(), resp.Msg.GetRunner().GetVersion(), resp.Msg.GetRunner().GetLabels())
|
log.Infof("runner: %s, with version: %s, with labels: %v, declared successfully",
|
||||||
// if declared successfully, override the labels in the.runner file with valid labels in the config file (if specified)
|
resp.Msg.GetRunner().GetName(), resp.Msg.GetRunner().GetVersion(), resp.Msg.GetRunner().GetLabels())
|
||||||
runner.Update(ctx, ls)
|
// if declared successfully, override the labels in the.runner file with valid labels in the config file (if specified)
|
||||||
reg.Labels = ls.ToStrings()
|
runner.Update(ctx, ls)
|
||||||
if err := config.SaveRegistration(cfg.Runner.File, reg); err != nil {
|
reg.Labels = ls.ToStrings()
|
||||||
return nil, "", fmt.Errorf("failed to save runner config: %w", err)
|
if err := config.SaveRegistration(cfg.Runner.File, reg); err != nil {
|
||||||
}
|
return nil, "", fmt.Errorf("failed to save runner config: %w", err)
|
||||||
}
|
}
|
||||||
return runner, resp.Msg.GetRunner().GetName(), nil
|
return runner, resp.Msg.GetRunner().GetName(), nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -371,6 +371,10 @@ func runExec(ctx context.Context, execArgs *executeArgs) func(cmd *cobra.Command
|
||||||
log.Infof("cache handler listens on: %v", handler.ExternalURL())
|
log.Infof("cache handler listens on: %v", handler.ExternalURL())
|
||||||
execArgs.cacheHandler = handler
|
execArgs.cacheHandler = handler
|
||||||
|
|
||||||
|
if execArgs.containerDaemonSocket != "/var/run/docker.sock" {
|
||||||
|
log.Warnf("--container-daemon-socket %s: please use the DOCKER_HOST environment variable as documented at https://forgejo.org/docs/next/admin/actions/runner-installation/#setting-up-the-container-environment instead. See https://code.forgejo.org/forgejo/runner/issues/577 for more information.", execArgs.containerDaemonSocket)
|
||||||
|
}
|
||||||
|
|
||||||
// run the plan
|
// run the plan
|
||||||
config := &runner.Config{
|
config := &runner.Config{
|
||||||
Workdir: execArgs.Workdir(),
|
Workdir: execArgs.Workdir(),
|
||||||
|
|
@ -462,7 +466,7 @@ func loadExecCmd(ctx context.Context) *cobra.Command {
|
||||||
execCmd.Flags().BoolVar(&execArg.privileged, "privileged", false, "use privileged mode")
|
execCmd.Flags().BoolVar(&execArg.privileged, "privileged", false, "use privileged mode")
|
||||||
execCmd.Flags().StringVar(&execArg.usernsMode, "userns", "", "user namespace to use")
|
execCmd.Flags().StringVar(&execArg.usernsMode, "userns", "", "user namespace to use")
|
||||||
execCmd.PersistentFlags().StringVarP(&execArg.containerArchitecture, "container-architecture", "", "", "Architecture which should be used to run containers, e.g.: linux/amd64. If not specified, will use host default architecture. Requires Docker server API Version 1.41+. Ignored on earlier Docker server platforms.")
|
execCmd.PersistentFlags().StringVarP(&execArg.containerArchitecture, "container-architecture", "", "", "Architecture which should be used to run containers, e.g.: linux/amd64. If not specified, will use host default architecture. Requires Docker server API Version 1.41+. Ignored on earlier Docker server platforms.")
|
||||||
execCmd.PersistentFlags().StringVarP(&execArg.containerDaemonSocket, "container-daemon-socket", "", "/var/run/docker.sock", "Path to Docker daemon socket which will be mounted to containers")
|
execCmd.PersistentFlags().StringVarP(&execArg.containerDaemonSocket, "container-daemon-socket", "", "/var/run/docker.sock", "Please use the DOCKER_HOST environment variable as documented at https://forgejo.org/docs/next/admin/actions/runner-installation/#setting-up-the-container-environment instead.")
|
||||||
execCmd.Flags().BoolVar(&execArg.useGitIgnore, "use-gitignore", true, "Controls whether paths specified in .gitignore should be copied into container")
|
execCmd.Flags().BoolVar(&execArg.useGitIgnore, "use-gitignore", true, "Controls whether paths specified in .gitignore should be copied into container")
|
||||||
execCmd.Flags().StringArrayVarP(&execArg.containerCapAdd, "container-cap-add", "", []string{}, "kernel capabilities to add to the workflow containers (e.g. --container-cap-add SYS_PTRACE)")
|
execCmd.Flags().StringArrayVarP(&execArg.containerCapAdd, "container-cap-add", "", []string{}, "kernel capabilities to add to the workflow containers (e.g. --container-cap-add SYS_PTRACE)")
|
||||||
execCmd.Flags().StringArrayVarP(&execArg.containerCapDrop, "container-cap-drop", "", []string{}, "kernel capabilities to remove from the workflow containers (e.g. --container-cap-drop SYS_PTRACE)")
|
execCmd.Flags().StringArrayVarP(&execArg.containerCapDrop, "container-cap-drop", "", []string{}, "kernel capabilities to remove from the workflow containers (e.g. --container-cap-drop SYS_PTRACE)")
|
||||||
|
|
|
||||||
6
internal/app/cmd/testdata/validate/bad-directory/.forgejo/workflows/workflow1.yml
vendored
Normal file
6
internal/app/cmd/testdata/validate/bad-directory/.forgejo/workflows/workflow1.yml
vendored
Normal file
|
|
@ -0,0 +1,6 @@
|
||||||
|
on: [push]
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
ruins-on: docker
|
||||||
|
steps:
|
||||||
|
- run: echo All good!
|
||||||
67
internal/app/cmd/testdata/validate/bad-directory/action.yml
vendored
Normal file
67
internal/app/cmd/testdata/validate/bad-directory/action.yml
vendored
Normal file
|
|
@ -0,0 +1,67 @@
|
||||||
|
name: 'Forgejo release download and upload'
|
||||||
|
author: 'Forgejo authors'
|
||||||
|
description: |
|
||||||
|
Upload or download the assets of a release to a Forgejo instance.
|
||||||
|
inputs:
|
||||||
|
badinput: scalarinsteadofmap
|
||||||
|
url:
|
||||||
|
description: 'URL of the Forgejo instance'
|
||||||
|
default: '${{ env.FORGEJO_SERVER_URL }}'
|
||||||
|
repo:
|
||||||
|
description: 'owner/project relative to the URL'
|
||||||
|
default: '${{ forge.repository }}'
|
||||||
|
tag:
|
||||||
|
description: 'Tag of the release'
|
||||||
|
default: '${{ forge.ref_name }}'
|
||||||
|
title:
|
||||||
|
description: 'Title of the release (defaults to tag)'
|
||||||
|
sha:
|
||||||
|
description: 'SHA of the release'
|
||||||
|
default: '${{ forge.sha }}'
|
||||||
|
token:
|
||||||
|
description: 'Forgejo application token'
|
||||||
|
default: '${{ forge.token }}'
|
||||||
|
release-dir:
|
||||||
|
description: 'Directory in whichs release assets are uploaded or downloaded'
|
||||||
|
required: true
|
||||||
|
release-notes:
|
||||||
|
description: 'Release notes'
|
||||||
|
direction:
|
||||||
|
description: 'Can either be `download` or `upload`'
|
||||||
|
required: true
|
||||||
|
gpg-private-key:
|
||||||
|
description: 'GPG Private Key to sign the release artifacts'
|
||||||
|
gpg-passphrase:
|
||||||
|
description: 'Passphrase of the GPG Private Key'
|
||||||
|
download-retry:
|
||||||
|
description: 'Number of times to retry if the release is not ready (default 1)'
|
||||||
|
download-latest:
|
||||||
|
description: 'Download the latest release'
|
||||||
|
default: false
|
||||||
|
verbose:
|
||||||
|
description: 'Increase the verbosity level'
|
||||||
|
default: false
|
||||||
|
override:
|
||||||
|
description: 'Override an existing release by the same `{tag}`'
|
||||||
|
default: false
|
||||||
|
prerelease:
|
||||||
|
description: 'Mark Release as Pre-Release'
|
||||||
|
default: false
|
||||||
|
release-notes-assistant:
|
||||||
|
description: 'Generate release notes with Release Notes Assistant'
|
||||||
|
default: false
|
||||||
|
hide-archive-link:
|
||||||
|
description: 'Hide the archive links'
|
||||||
|
default: false
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
steps:
|
||||||
|
- if: ${{ inputs.release-notes-assistant }}
|
||||||
|
uses: https://data.forgejo.org/actions/cache@v4
|
||||||
|
with:
|
||||||
|
key: rna-${{ inputs.repo }}
|
||||||
|
path: ${{ forge.action_path }}/rna
|
||||||
|
|
||||||
|
- run: echo "${{ forge.action_path }}" >> $FORGEJO_PATH
|
||||||
|
shell: bash
|
||||||
6
internal/app/cmd/testdata/validate/good-directory/.forgejo/workflows/action.yml
vendored
Normal file
6
internal/app/cmd/testdata/validate/good-directory/.forgejo/workflows/action.yml
vendored
Normal file
|
|
@ -0,0 +1,6 @@
|
||||||
|
on: [push]
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
runs-on: docker
|
||||||
|
steps:
|
||||||
|
- run: echo All good!
|
||||||
6
internal/app/cmd/testdata/validate/good-directory/.forgejo/workflows/workflow1.yml
vendored
Normal file
6
internal/app/cmd/testdata/validate/good-directory/.forgejo/workflows/workflow1.yml
vendored
Normal file
|
|
@ -0,0 +1,6 @@
|
||||||
|
on: [push]
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
runs-on: docker
|
||||||
|
steps:
|
||||||
|
- run: echo All good!
|
||||||
6
internal/app/cmd/testdata/validate/good-directory/.forgejo/workflows/workflow2.yaml
vendored
Normal file
6
internal/app/cmd/testdata/validate/good-directory/.forgejo/workflows/workflow2.yaml
vendored
Normal file
|
|
@ -0,0 +1,6 @@
|
||||||
|
on: [push]
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
runs-on: docker
|
||||||
|
steps:
|
||||||
|
- run: echo All good!
|
||||||
6
internal/app/cmd/testdata/validate/good-directory/.gitea/workflows/bad.yml
vendored
Normal file
6
internal/app/cmd/testdata/validate/good-directory/.gitea/workflows/bad.yml
vendored
Normal file
|
|
@ -0,0 +1,6 @@
|
||||||
|
on: [push]
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
ruins-on: docker
|
||||||
|
steps:
|
||||||
|
- run: echo All good!
|
||||||
6
internal/app/cmd/testdata/validate/good-directory/.github/workflows/bad.yml
vendored
Normal file
6
internal/app/cmd/testdata/validate/good-directory/.github/workflows/bad.yml
vendored
Normal file
|
|
@ -0,0 +1,6 @@
|
||||||
|
on: [push]
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
ruins-on: docker
|
||||||
|
steps:
|
||||||
|
- run: echo All good!
|
||||||
67
internal/app/cmd/testdata/validate/good-directory/action.yml
vendored
Normal file
67
internal/app/cmd/testdata/validate/good-directory/action.yml
vendored
Normal file
|
|
@ -0,0 +1,67 @@
|
||||||
|
# SPDX-License-Identifier: MIT
|
||||||
|
name: 'Forgejo release download and upload'
|
||||||
|
author: 'Forgejo authors'
|
||||||
|
description: |
|
||||||
|
Upload or download the assets of a release to a Forgejo instance.
|
||||||
|
inputs:
|
||||||
|
url:
|
||||||
|
description: 'URL of the Forgejo instance'
|
||||||
|
default: '${{ env.FORGEJO_SERVER_URL }}'
|
||||||
|
repo:
|
||||||
|
description: 'owner/project relative to the URL'
|
||||||
|
default: '${{ forge.repository }}'
|
||||||
|
tag:
|
||||||
|
description: 'Tag of the release'
|
||||||
|
default: '${{ forge.ref_name }}'
|
||||||
|
title:
|
||||||
|
description: 'Title of the release (defaults to tag)'
|
||||||
|
sha:
|
||||||
|
description: 'SHA of the release'
|
||||||
|
default: '${{ forge.sha }}'
|
||||||
|
token:
|
||||||
|
description: 'Forgejo application token'
|
||||||
|
default: '${{ forge.token }}'
|
||||||
|
release-dir:
|
||||||
|
description: 'Directory in whichs release assets are uploaded or downloaded'
|
||||||
|
required: true
|
||||||
|
release-notes:
|
||||||
|
description: 'Release notes'
|
||||||
|
direction:
|
||||||
|
description: 'Can either be `download` or `upload`'
|
||||||
|
required: true
|
||||||
|
gpg-private-key:
|
||||||
|
description: 'GPG Private Key to sign the release artifacts'
|
||||||
|
gpg-passphrase:
|
||||||
|
description: 'Passphrase of the GPG Private Key'
|
||||||
|
download-retry:
|
||||||
|
description: 'Number of times to retry if the release is not ready (default 1)'
|
||||||
|
download-latest:
|
||||||
|
description: 'Download the latest release'
|
||||||
|
default: false
|
||||||
|
verbose:
|
||||||
|
description: 'Increase the verbosity level'
|
||||||
|
default: false
|
||||||
|
override:
|
||||||
|
description: 'Override an existing release by the same `{tag}`'
|
||||||
|
default: false
|
||||||
|
prerelease:
|
||||||
|
description: 'Mark Release as Pre-Release'
|
||||||
|
default: false
|
||||||
|
release-notes-assistant:
|
||||||
|
description: 'Generate release notes with Release Notes Assistant'
|
||||||
|
default: false
|
||||||
|
hide-archive-link:
|
||||||
|
description: 'Hide the archive links'
|
||||||
|
default: false
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
steps:
|
||||||
|
- if: ${{ inputs.release-notes-assistant }}
|
||||||
|
uses: https://data.forgejo.org/actions/cache@v4
|
||||||
|
with:
|
||||||
|
key: rna-${{ inputs.repo }}
|
||||||
|
path: ${{ forge.action_path }}/rna
|
||||||
|
|
||||||
|
- run: echo "${{ forge.action_path }}" >> $FORGEJO_PATH
|
||||||
|
shell: bash
|
||||||
67
internal/app/cmd/testdata/validate/good-directory/subaction/action.yaml
vendored
Normal file
67
internal/app/cmd/testdata/validate/good-directory/subaction/action.yaml
vendored
Normal file
|
|
@ -0,0 +1,67 @@
|
||||||
|
# SPDX-License-Identifier: MIT
|
||||||
|
name: 'Forgejo release download and upload'
|
||||||
|
author: 'Forgejo authors'
|
||||||
|
description: |
|
||||||
|
Upload or download the assets of a release to a Forgejo instance.
|
||||||
|
inputs:
|
||||||
|
url:
|
||||||
|
description: 'URL of the Forgejo instance'
|
||||||
|
default: '${{ env.FORGEJO_SERVER_URL }}'
|
||||||
|
repo:
|
||||||
|
description: 'owner/project relative to the URL'
|
||||||
|
default: '${{ forge.repository }}'
|
||||||
|
tag:
|
||||||
|
description: 'Tag of the release'
|
||||||
|
default: '${{ forge.ref_name }}'
|
||||||
|
title:
|
||||||
|
description: 'Title of the release (defaults to tag)'
|
||||||
|
sha:
|
||||||
|
description: 'SHA of the release'
|
||||||
|
default: '${{ forge.sha }}'
|
||||||
|
token:
|
||||||
|
description: 'Forgejo application token'
|
||||||
|
default: '${{ forge.token }}'
|
||||||
|
release-dir:
|
||||||
|
description: 'Directory in whichs release assets are uploaded or downloaded'
|
||||||
|
required: true
|
||||||
|
release-notes:
|
||||||
|
description: 'Release notes'
|
||||||
|
direction:
|
||||||
|
description: 'Can either be `download` or `upload`'
|
||||||
|
required: true
|
||||||
|
gpg-private-key:
|
||||||
|
description: 'GPG Private Key to sign the release artifacts'
|
||||||
|
gpg-passphrase:
|
||||||
|
description: 'Passphrase of the GPG Private Key'
|
||||||
|
download-retry:
|
||||||
|
description: 'Number of times to retry if the release is not ready (default 1)'
|
||||||
|
download-latest:
|
||||||
|
description: 'Download the latest release'
|
||||||
|
default: false
|
||||||
|
verbose:
|
||||||
|
description: 'Increase the verbosity level'
|
||||||
|
default: false
|
||||||
|
override:
|
||||||
|
description: 'Override an existing release by the same `{tag}`'
|
||||||
|
default: false
|
||||||
|
prerelease:
|
||||||
|
description: 'Mark Release as Pre-Release'
|
||||||
|
default: false
|
||||||
|
release-notes-assistant:
|
||||||
|
description: 'Generate release notes with Release Notes Assistant'
|
||||||
|
default: false
|
||||||
|
hide-archive-link:
|
||||||
|
description: 'Hide the archive links'
|
||||||
|
default: false
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
steps:
|
||||||
|
- if: ${{ inputs.release-notes-assistant }}
|
||||||
|
uses: https://data.forgejo.org/actions/cache@v4
|
||||||
|
with:
|
||||||
|
key: rna-${{ inputs.repo }}
|
||||||
|
path: ${{ forge.action_path }}/rna
|
||||||
|
|
||||||
|
- run: echo "${{ forge.action_path }}" >> $FORGEJO_PATH
|
||||||
|
shell: bash
|
||||||
|
|
@ -35,6 +35,10 @@ git clone --bare $tmpdir/good good-repository
|
||||||
rm -fr good-repository/hooks
|
rm -fr good-repository/hooks
|
||||||
touch good-repository/refs/placeholder
|
touch good-repository/refs/placeholder
|
||||||
|
|
||||||
|
rm -fr good-directory
|
||||||
|
git clone $tmpdir/good good-directory
|
||||||
|
rm -fr good-directory/.git
|
||||||
|
|
||||||
# bad
|
# bad
|
||||||
|
|
||||||
mkdir $tmpdir/bad
|
mkdir $tmpdir/bad
|
||||||
|
|
@ -54,3 +58,7 @@ rm -fr bad-repository
|
||||||
git clone --bare $tmpdir/bad bad-repository
|
git clone --bare $tmpdir/bad bad-repository
|
||||||
rm -fr bad-repository/hooks
|
rm -fr bad-repository/hooks
|
||||||
touch bad-repository/refs/placeholder
|
touch bad-repository/refs/placeholder
|
||||||
|
|
||||||
|
rm -fr bad-directory
|
||||||
|
git clone $tmpdir/bad bad-directory
|
||||||
|
rm -fr bad-directory/.git
|
||||||
|
|
|
||||||
|
|
@ -23,6 +23,7 @@ type validateArgs struct {
|
||||||
path string
|
path string
|
||||||
repository string
|
repository string
|
||||||
clonedir string
|
clonedir string
|
||||||
|
directory string
|
||||||
workflow bool
|
workflow bool
|
||||||
action bool
|
action bool
|
||||||
}
|
}
|
||||||
|
|
@ -49,12 +50,13 @@ func validate(dir, path string, isWorkflow, isAction bool) error {
|
||||||
kind = "action"
|
kind = "action"
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("%s %s schema validation failed:\n%s\n", shortPath, kind, err.Error())
|
err = fmt.Errorf("%s %s schema validation failed:\n%s", shortPath, kind, err.Error())
|
||||||
|
fmt.Printf("%s\n", err.Error())
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf("%s %s schema validation OK\n", shortPath, kind)
|
fmt.Printf("%s %s schema validation OK\n", shortPath, kind)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func validatePath(validateArgs *validateArgs) error {
|
func validatePath(validateArgs *validateArgs) error {
|
||||||
|
|
@ -64,8 +66,17 @@ func validatePath(validateArgs *validateArgs) error {
|
||||||
return validate("", validateArgs.path, validateArgs.workflow, validateArgs.action)
|
return validate("", validateArgs.path, validateArgs.workflow, validateArgs.action)
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateHasYamlSuffix(s, suffix string) bool {
|
func validatePathMatch(existing, search string) bool {
|
||||||
return strings.HasSuffix(s, suffix+".yml") || strings.HasSuffix(s, suffix+".yaml")
|
if !validateHasYamlSuffix(existing) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
existing = strings.TrimSuffix(existing, ".yml")
|
||||||
|
existing = strings.TrimSuffix(existing, ".yaml")
|
||||||
|
return existing == search || strings.HasSuffix(existing, "/"+search)
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateHasYamlSuffix(s string) bool {
|
||||||
|
return strings.HasSuffix(s, ".yml") || strings.HasSuffix(s, ".yaml")
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateRepository(validateArgs *validateArgs) error {
|
func validateRepository(validateArgs *validateArgs) error {
|
||||||
|
|
@ -105,15 +116,17 @@ func validateRepository(validateArgs *validateArgs) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var validationErrors error
|
||||||
|
|
||||||
if err := filepath.Walk(clonedir, func(path string, fi fs.FileInfo, err error) error {
|
if err := filepath.Walk(clonedir, func(path string, fi fs.FileInfo, err error) error {
|
||||||
if validateHasYamlSuffix(path, "/.forgejo/workflows/action") {
|
if validatePathMatch(path, ".forgejo/workflows/action") {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
isWorkflow := false
|
isWorkflow := false
|
||||||
isAction := true
|
isAction := true
|
||||||
if validateHasYamlSuffix(path, "/action") {
|
if validatePathMatch(path, "action") {
|
||||||
if err := validate(clonedir, path, isWorkflow, isAction); err != nil {
|
if err := validate(clonedir, path, isWorkflow, isAction); err != nil {
|
||||||
return err
|
validationErrors = errors.Join(validationErrors, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -131,9 +144,9 @@ func validateRepository(validateArgs *validateArgs) error {
|
||||||
if err := filepath.Walk(workflowdir, func(path string, fi fs.FileInfo, err error) error {
|
if err := filepath.Walk(workflowdir, func(path string, fi fs.FileInfo, err error) error {
|
||||||
isWorkflow := true
|
isWorkflow := true
|
||||||
isAction := false
|
isAction := false
|
||||||
if validateHasYamlSuffix(path, "") {
|
if validateHasYamlSuffix(path) {
|
||||||
if err := validate(clonedir, path, isWorkflow, isAction); err != nil {
|
if err := validate(clonedir, path, isWorkflow, isAction); err != nil {
|
||||||
return err
|
validationErrors = errors.Join(validationErrors, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -142,11 +155,19 @@ func validateRepository(validateArgs *validateArgs) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return validationErrors
|
||||||
|
}
|
||||||
|
|
||||||
|
func processDirectory(validateArgs *validateArgs) {
|
||||||
|
if len(validateArgs.directory) > 0 {
|
||||||
|
validateArgs.repository = validateArgs.directory
|
||||||
|
validateArgs.clonedir = validateArgs.directory
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func runValidate(_ context.Context, validateArgs *validateArgs) func(cmd *cobra.Command, args []string) error {
|
func runValidate(_ context.Context, validateArgs *validateArgs) func(cmd *cobra.Command, args []string) error {
|
||||||
return func(cmd *cobra.Command, args []string) error {
|
return func(cmd *cobra.Command, args []string) error {
|
||||||
|
processDirectory(validateArgs)
|
||||||
if len(validateArgs.path) > 0 {
|
if len(validateArgs.path) > 0 {
|
||||||
return validatePath(validateArgs)
|
return validatePath(validateArgs)
|
||||||
} else if len(validateArgs.repository) > 0 {
|
} else if len(validateArgs.repository) > 0 {
|
||||||
|
|
@ -168,9 +189,15 @@ Validate workflows or actions with a schema verifying they are conformant.
|
||||||
The --path argument is a filename that will be validated as a workflow
|
The --path argument is a filename that will be validated as a workflow
|
||||||
(if the --workflow flag is set) or as an action (if the --action flag is set).
|
(if the --workflow flag is set) or as an action (if the --action flag is set).
|
||||||
|
|
||||||
The --repository argument is a URL to a Git repository. It will be
|
The --repository argument is a URL to a Git repository that contains
|
||||||
cloned (in the --clonedir directory or a temporary location removed
|
workflows or actions. It will be cloned (in the --clonedir directory
|
||||||
when the validation completes). The following files will be validated:
|
or a temporary location removed when the validation completes).
|
||||||
|
|
||||||
|
The --directory argument is the path a repository to be explored for
|
||||||
|
files to validate.
|
||||||
|
|
||||||
|
The following files will be validated when exploring the clone of a repository
|
||||||
|
(--repository) or a directory (--directory):
|
||||||
|
|
||||||
- All .forgejo/workflows/*.{yml,yaml} files as workflows
|
- All .forgejo/workflows/*.{yml,yaml} files as workflows
|
||||||
- All **/action.{yml,yaml} files as actions
|
- All **/action.{yml,yaml} files as actions
|
||||||
|
|
@ -185,9 +212,11 @@ when the validation completes). The following files will be validated:
|
||||||
|
|
||||||
validateCmd.Flags().StringVar(&validateArgs.clonedir, "clonedir", "", "directory in which the repository will be cloned")
|
validateCmd.Flags().StringVar(&validateArgs.clonedir, "clonedir", "", "directory in which the repository will be cloned")
|
||||||
validateCmd.Flags().StringVar(&validateArgs.repository, "repository", "", "URL to a repository to validate")
|
validateCmd.Flags().StringVar(&validateArgs.repository, "repository", "", "URL to a repository to validate")
|
||||||
|
validateCmd.Flags().StringVar(&validateArgs.directory, "directory", "", "directory to a repository to validate")
|
||||||
validateCmd.Flags().StringVar(&validateArgs.path, "path", "", "path to the file")
|
validateCmd.Flags().StringVar(&validateArgs.path, "path", "", "path to the file")
|
||||||
validateCmd.MarkFlagsOneRequired("repository", "path")
|
validateCmd.MarkFlagsOneRequired("repository", "path", "directory")
|
||||||
validateCmd.MarkFlagsMutuallyExclusive("repository", "path")
|
validateCmd.MarkFlagsMutuallyExclusive("repository", "path", "directory")
|
||||||
|
validateCmd.MarkFlagsMutuallyExclusive("directory", "clonedir")
|
||||||
|
|
||||||
return validateCmd
|
return validateCmd
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -10,6 +10,15 @@ import (
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func Test_validatePathMatch(t *testing.T) {
|
||||||
|
assert.False(t, validatePathMatch("nosuffix", "nosuffix"))
|
||||||
|
assert.True(t, validatePathMatch("something.yml", "something"))
|
||||||
|
assert.True(t, validatePathMatch("something.yaml", "something"))
|
||||||
|
assert.False(t, validatePathMatch("entire_something.yaml", "something"))
|
||||||
|
assert.True(t, validatePathMatch("nested/in/directory/something.yaml", "something"))
|
||||||
|
assert.False(t, validatePathMatch("nested/in/directory/entire_something.yaml", "something"))
|
||||||
|
}
|
||||||
|
|
||||||
func Test_validateCmd(t *testing.T) {
|
func Test_validateCmd(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
for _, testCase := range []struct {
|
for _, testCase := range []struct {
|
||||||
|
|
@ -27,19 +36,30 @@ func Test_validateCmd(t *testing.T) {
|
||||||
message: "one of --workflow or --action must be set",
|
message: "one of --workflow or --action must be set",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "MutuallyExclusive",
|
name: "MutuallyExclusiveActionWorkflow",
|
||||||
args: []string{"--action", "--workflow", "--path", "/tmp"},
|
args: []string{"--action", "--workflow", "--path", "/tmp"},
|
||||||
message: "[action workflow] were all set",
|
message: "[action workflow] were all set",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "MutuallyExclusiveRepositoryDirectory",
|
||||||
|
args: []string{"--repository", "example.com", "--directory", "."},
|
||||||
|
message: "[directory repository] were all set",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "MutuallyExclusiveClonedirDirectory",
|
||||||
|
args: []string{"--clonedir", ".", "--directory", "."},
|
||||||
|
message: "[clonedir directory] were all set",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "PathActionOK",
|
name: "PathActionOK",
|
||||||
args: []string{"--action", "--path", "testdata/validate/good-action.yml"},
|
args: []string{"--action", "--path", "testdata/validate/good-action.yml"},
|
||||||
stdOut: "schema validation OK",
|
stdOut: "schema validation OK",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "PathActionNOK",
|
name: "PathActionNOK",
|
||||||
args: []string{"--action", "--path", "testdata/validate/bad-action.yml"},
|
args: []string{"--action", "--path", "testdata/validate/bad-action.yml"},
|
||||||
stdOut: "Expected a mapping got scalar",
|
stdOut: "Expected a mapping got scalar",
|
||||||
|
message: "testdata/validate/bad-action.yml action schema validation failed",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "PathWorkflowOK",
|
name: "PathWorkflowOK",
|
||||||
|
|
@ -47,9 +67,27 @@ func Test_validateCmd(t *testing.T) {
|
||||||
stdOut: "schema validation OK",
|
stdOut: "schema validation OK",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "PathWorkflowNOK",
|
name: "PathWorkflowNOK",
|
||||||
args: []string{"--workflow", "--path", "testdata/validate/bad-workflow.yml"},
|
args: []string{"--workflow", "--path", "testdata/validate/bad-workflow.yml"},
|
||||||
stdOut: "Unknown Property ruins-on",
|
stdOut: "Unknown Property ruins-on",
|
||||||
|
message: "testdata/validate/bad-workflow.yml workflow schema validation failed",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "DirectoryOK",
|
||||||
|
args: []string{"--directory", "testdata/validate/good-directory"},
|
||||||
|
stdOut: "action.yml action schema validation OK\nsubaction/action.yaml action schema validation OK\n.forgejo/workflows/action.yml workflow schema validation OK\n.forgejo/workflows/workflow1.yml workflow schema validation OK\n.forgejo/workflows/workflow2.yaml workflow schema validation OK",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "DirectoryActionNOK",
|
||||||
|
args: []string{"--directory", "testdata/validate/bad-directory"},
|
||||||
|
stdOut: "action.yml action schema validation failed",
|
||||||
|
message: "action.yml action schema validation failed",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "DirectoryWorkflowNOK",
|
||||||
|
args: []string{"--directory", "testdata/validate/bad-directory"},
|
||||||
|
stdOut: ".forgejo/workflows/workflow1.yml workflow schema validation failed",
|
||||||
|
message: ".forgejo/workflows/workflow1.yml workflow schema validation failed",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "RepositoryOK",
|
name: "RepositoryOK",
|
||||||
|
|
@ -57,14 +95,16 @@ func Test_validateCmd(t *testing.T) {
|
||||||
stdOut: "action.yml action schema validation OK\nsubaction/action.yaml action schema validation OK\n.forgejo/workflows/action.yml workflow schema validation OK\n.forgejo/workflows/workflow1.yml workflow schema validation OK\n.forgejo/workflows/workflow2.yaml workflow schema validation OK",
|
stdOut: "action.yml action schema validation OK\nsubaction/action.yaml action schema validation OK\n.forgejo/workflows/action.yml workflow schema validation OK\n.forgejo/workflows/workflow1.yml workflow schema validation OK\n.forgejo/workflows/workflow2.yaml workflow schema validation OK",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "RepositoryActionNOK",
|
name: "RepositoryActionNOK",
|
||||||
args: []string{"--repository", "testdata/validate/bad-repository"},
|
args: []string{"--repository", "testdata/validate/bad-repository"},
|
||||||
stdOut: "action.yml action schema validation failed",
|
stdOut: "action.yml action schema validation failed",
|
||||||
|
message: "action.yml action schema validation failed",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "RepositoryWorkflowNOK",
|
name: "RepositoryWorkflowNOK",
|
||||||
args: []string{"--repository", "testdata/validate/bad-repository"},
|
args: []string{"--repository", "testdata/validate/bad-repository"},
|
||||||
stdOut: ".forgejo/workflows/workflow1.yml workflow schema validation failed",
|
stdOut: ".forgejo/workflows/workflow1.yml workflow schema validation failed",
|
||||||
|
message: ".forgejo/workflows/workflow1.yml workflow schema validation failed",
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
t.Run(testCase.name, func(t *testing.T) {
|
t.Run(testCase.name, func(t *testing.T) {
|
||||||
|
|
|
||||||
|
|
@ -36,11 +36,20 @@ func NewJob(cfg *config.Config, client client.Client, runner run.RunnerInterface
|
||||||
}
|
}
|
||||||
|
|
||||||
func (j *Job) Run(ctx context.Context) error {
|
func (j *Job) Run(ctx context.Context) error {
|
||||||
task, ok := j.fetchTask(ctx)
|
log.Info("Polling for a job...")
|
||||||
if !ok {
|
for {
|
||||||
return fmt.Errorf("could not fetch task")
|
task, ok := j.fetchTask(ctx)
|
||||||
|
if ok {
|
||||||
|
return j.runTaskWithRecover(ctx, task)
|
||||||
|
}
|
||||||
|
// No task available, continue polling
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
default:
|
||||||
|
// Continue to next iteration
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return j.runTaskWithRecover(ctx, task)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (j *Job) runTaskWithRecover(ctx context.Context, task *runnerv1.Task) error {
|
func (j *Job) runTaskWithRecover(ctx context.Context, task *runnerv1.Task) error {
|
||||||
|
|
|
||||||
|
|
@ -90,10 +90,10 @@ func (p *poller) Shutdown(ctx context.Context) error {
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
log.Trace("forcing the jobs to shutdown")
|
log.Info("forcing the jobs to shutdown")
|
||||||
p.shutdownJobs()
|
p.shutdownJobs()
|
||||||
<-p.done
|
<-p.done
|
||||||
log.Trace("all jobs have been shutdown")
|
log.Info("all jobs have been shutdown")
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -117,13 +117,13 @@ func setupCache(cfg *config.Config, envs map[string]string) *cacheproxy.Handler
|
||||||
|
|
||||||
cacheServer, err := artifactcache.StartHandler(
|
cacheServer, err := artifactcache.StartHandler(
|
||||||
cfg.Cache.Dir,
|
cfg.Cache.Dir,
|
||||||
cfg.Cache.Host,
|
"", // automatically detect
|
||||||
cfg.Cache.Port,
|
cfg.Cache.Port,
|
||||||
cacheSecret,
|
cacheSecret,
|
||||||
log.StandardLogger().WithField("module", "cache_request"),
|
log.StandardLogger().WithField("module", "cache_request"),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Could not start the cache server, cache will be disabled")
|
log.Errorf("Could not start the cache server, cache will be disabled: %v", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -144,16 +144,14 @@ func setupCache(cfg *config.Config, envs map[string]string) *cacheproxy.Handler
|
||||||
cacheURL,
|
cacheURL,
|
||||||
cfg.Cache.Host,
|
cfg.Cache.Host,
|
||||||
cfg.Cache.ProxyPort,
|
cfg.Cache.ProxyPort,
|
||||||
|
cfg.Cache.ActionsCacheURLOverride,
|
||||||
cacheSecret,
|
cacheSecret,
|
||||||
log.StandardLogger().WithField("module", "cache_proxy"),
|
log.StandardLogger().WithField("module", "cache_proxy"),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("cannot init cache proxy, cache will be disabled: %v", err)
|
log.Errorf("cannot init cache proxy, cache will be disabled: %v", err)
|
||||||
}
|
} else {
|
||||||
|
envs["ACTIONS_CACHE_URL"] = cacheProxy.ExternalURL()
|
||||||
envs["ACTIONS_CACHE_URL"] = cacheProxy.ExternalURL()
|
|
||||||
if cfg.Cache.ActionsCacheURLOverride != "" {
|
|
||||||
envs["ACTIONS_CACHE_URL"] = cfg.Cache.ActionsCacheURLOverride
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return cacheProxy
|
return cacheProxy
|
||||||
|
|
@ -195,6 +193,44 @@ func explainFailedGenerateWorkflow(task *runnerv1.Task, log func(message string,
|
||||||
return fmt.Errorf("the workflow file is not usable")
|
return fmt.Errorf("the workflow file is not usable")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getWriteIsolationKey(ctx context.Context, eventName, ref string, event map[string]any) (string, error) {
|
||||||
|
if eventName == "pull_request" {
|
||||||
|
// The "closed" action of a pull request event runs in the context of the base repository
|
||||||
|
// and was merged by a user with write access to the base repository. It is authorized to
|
||||||
|
// write the repository cache.
|
||||||
|
if event["action"] == "closed" {
|
||||||
|
pullRequest, ok := event["pull_request"].(map[string]any)
|
||||||
|
if !ok {
|
||||||
|
return "", fmt.Errorf("getWriteIsolationKey: event.pull_request is not a map[string]any but %T", event["pull_request"])
|
||||||
|
}
|
||||||
|
merged, ok := pullRequest["merged"].(bool)
|
||||||
|
if !ok {
|
||||||
|
return "", fmt.Errorf("getWriteIsolationKey: event.pull_request.merged is not a bool but %T", pullRequest["merged"])
|
||||||
|
}
|
||||||
|
if merged {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
// a pull request that is closed but not merged falls thru and is expected to obey the same
|
||||||
|
// constraints as an opened pull request, it may be closed by a user with no write permissions to the
|
||||||
|
// base repository
|
||||||
|
}
|
||||||
|
// When performing an action on an event from an opened PR, provide a "write isolation key" to the cache. The generated
|
||||||
|
// ACTIONS_CACHE_URL will be able to read the cache, and write to a cache, but its writes will be isolated to
|
||||||
|
// future runs of the PR's workflows and won't be shared with other pull requests or actions. This is a security
|
||||||
|
// measure to prevent a malicious pull request from poisoning the cache with secret-stealing code which would
|
||||||
|
// later be executed on another action.
|
||||||
|
// Ensure that `ref` has the expected format so that we don't end up with a useless write isolation key
|
||||||
|
if !strings.HasPrefix(ref, "refs/pull/") {
|
||||||
|
return "", fmt.Errorf("getWriteIsolationKey: expected ref to be refs/pull/..., but was %q", ref)
|
||||||
|
}
|
||||||
|
return ref, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Other events do not allow the trigger user to modify the content of the repository and
|
||||||
|
// are allowed to write the cache without an isolation key
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
func (r *Runner) run(ctx context.Context, task *runnerv1.Task, reporter *report.Reporter) (err error) {
|
func (r *Runner) run(ctx context.Context, task *runnerv1.Task, reporter *report.Reporter) (err error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if r := recover(); r != nil {
|
if r := recover(); r != nil {
|
||||||
|
|
@ -228,15 +264,18 @@ func (r *Runner) run(ctx context.Context, task *runnerv1.Task, reporter *report.
|
||||||
defaultActionURL,
|
defaultActionURL,
|
||||||
r.client.Address())
|
r.client.Address())
|
||||||
|
|
||||||
|
eventName := taskContext["event_name"].GetStringValue()
|
||||||
|
ref := taskContext["ref"].GetStringValue()
|
||||||
|
event := taskContext["event"].GetStructValue().AsMap()
|
||||||
preset := &model.GithubContext{
|
preset := &model.GithubContext{
|
||||||
Event: taskContext["event"].GetStructValue().AsMap(),
|
Event: event,
|
||||||
RunID: taskContext["run_id"].GetStringValue(),
|
RunID: taskContext["run_id"].GetStringValue(),
|
||||||
RunNumber: taskContext["run_number"].GetStringValue(),
|
RunNumber: taskContext["run_number"].GetStringValue(),
|
||||||
Actor: taskContext["actor"].GetStringValue(),
|
Actor: taskContext["actor"].GetStringValue(),
|
||||||
Repository: taskContext["repository"].GetStringValue(),
|
Repository: taskContext["repository"].GetStringValue(),
|
||||||
EventName: taskContext["event_name"].GetStringValue(),
|
EventName: eventName,
|
||||||
Sha: taskContext["sha"].GetStringValue(),
|
Sha: taskContext["sha"].GetStringValue(),
|
||||||
Ref: taskContext["ref"].GetStringValue(),
|
Ref: ref,
|
||||||
RefName: taskContext["ref_name"].GetStringValue(),
|
RefName: taskContext["ref_name"].GetStringValue(),
|
||||||
RefType: taskContext["ref_type"].GetStringValue(),
|
RefType: taskContext["ref_type"].GetStringValue(),
|
||||||
HeadRef: taskContext["head_ref"].GetStringValue(),
|
HeadRef: taskContext["head_ref"].GetStringValue(),
|
||||||
|
|
@ -266,19 +305,9 @@ func (r *Runner) run(ctx context.Context, task *runnerv1.Task, reporter *report.
|
||||||
|
|
||||||
// Register the run with the cacheproxy and modify the CACHE_URL
|
// Register the run with the cacheproxy and modify the CACHE_URL
|
||||||
if r.cacheProxy != nil {
|
if r.cacheProxy != nil {
|
||||||
writeIsolationKey := ""
|
writeIsolationKey, err := getWriteIsolationKey(ctx, eventName, ref, event)
|
||||||
|
if err != nil {
|
||||||
// When performing an action on an event from a PR, provide a "write isolation key" to the cache. The generated
|
return err
|
||||||
// ACTIONS_CACHE_URL will be able to read the cache, and write to a cache, but its writes will be isolated to
|
|
||||||
// future runs of the PR's workflows and won't be shared with other pull requests or actions. This is a security
|
|
||||||
// measure to prevent a malicious pull request from poisoning the cache with secret-stealing code which would
|
|
||||||
// later be executed on another action.
|
|
||||||
if taskContext["event_name"].GetStringValue() == "pull_request" {
|
|
||||||
// Ensure that `Ref` has the expected format so that we don't end up with a useless write isolation key
|
|
||||||
if !strings.HasPrefix(preset.Ref, "refs/pull/") {
|
|
||||||
return fmt.Errorf("write isolation key: expected preset.Ref to be refs/pull/..., but was %q", preset.Ref)
|
|
||||||
}
|
|
||||||
writeIsolationKey = preset.Ref
|
|
||||||
}
|
}
|
||||||
|
|
||||||
timestamp := strconv.FormatInt(time.Now().Unix(), 10)
|
timestamp := strconv.FormatInt(time.Now().Unix(), 10)
|
||||||
|
|
@ -366,7 +395,6 @@ func (r *Runner) run(ctx context.Context, task *runnerv1.Task, reporter *report.
|
||||||
}
|
}
|
||||||
|
|
||||||
execErr := executor(ctx)
|
execErr := executor(ctx)
|
||||||
_ = reporter.SetOutputs(job.Outputs)
|
|
||||||
return execErr
|
return execErr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,8 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
|
@ -13,6 +15,7 @@ import (
|
||||||
"code.forgejo.org/forgejo/runner/v11/internal/pkg/labels"
|
"code.forgejo.org/forgejo/runner/v11/internal/pkg/labels"
|
||||||
"code.forgejo.org/forgejo/runner/v11/internal/pkg/report"
|
"code.forgejo.org/forgejo/runner/v11/internal/pkg/report"
|
||||||
"connectrpc.com/connect"
|
"connectrpc.com/connect"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
"google.golang.org/protobuf/types/known/structpb"
|
"google.golang.org/protobuf/types/known/structpb"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
@ -20,6 +23,10 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
log.SetLevel(log.TraceLevel)
|
||||||
|
}
|
||||||
|
|
||||||
func TestExplainFailedGenerateWorkflow(t *testing.T) {
|
func TestExplainFailedGenerateWorkflow(t *testing.T) {
|
||||||
logged := ""
|
logged := ""
|
||||||
log := func(message string, args ...any) {
|
log := func(message string, args ...any) {
|
||||||
|
|
@ -64,6 +71,7 @@ func TestLabelUpdate(t *testing.T) {
|
||||||
|
|
||||||
type forgejoClientMock struct {
|
type forgejoClientMock struct {
|
||||||
mock.Mock
|
mock.Mock
|
||||||
|
sent string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *forgejoClientMock) Address() string {
|
func (m *forgejoClientMock) Address() string {
|
||||||
|
|
@ -116,11 +124,20 @@ func (m *forgejoClientMock) UpdateTask(ctx context.Context, request *connect.Req
|
||||||
return args.Get(0).(*connect.Response[runnerv1.UpdateTaskResponse]), args.Error(1)
|
return args.Get(0).(*connect.Response[runnerv1.UpdateTaskResponse]), args.Error(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func rowsToString(rows []*runnerv1.LogRow) string {
|
||||||
|
s := ""
|
||||||
|
for _, row := range rows {
|
||||||
|
s += row.Content + "\n"
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
func (m *forgejoClientMock) UpdateLog(ctx context.Context, request *connect.Request[runnerv1.UpdateLogRequest]) (*connect.Response[runnerv1.UpdateLogResponse], error) {
|
func (m *forgejoClientMock) UpdateLog(ctx context.Context, request *connect.Request[runnerv1.UpdateLogRequest]) (*connect.Response[runnerv1.UpdateLogResponse], error) {
|
||||||
// Enable for log output from runs if needed.
|
// Enable for log output from runs if needed.
|
||||||
// for _, row := range request.Msg.Rows {
|
// for _, row := range request.Msg.Rows {
|
||||||
// println(fmt.Sprintf("UpdateLog: %q", row.Content))
|
// println(fmt.Sprintf("UpdateLog: %q", row.Content))
|
||||||
// }
|
// }
|
||||||
|
m.sent += rowsToString(request.Msg.Rows)
|
||||||
args := m.Called(ctx, request)
|
args := m.Called(ctx, request)
|
||||||
mockRetval := args.Get(0)
|
mockRetval := args.Get(0)
|
||||||
mockError := args.Error(1)
|
mockError := args.Error(1)
|
||||||
|
|
@ -135,6 +152,83 @@ func (m *forgejoClientMock) UpdateLog(ctx context.Context, request *connect.Requ
|
||||||
}), nil
|
}), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRunner_getWriteIsolationKey(t *testing.T) {
|
||||||
|
t.Run("push", func(t *testing.T) {
|
||||||
|
key, err := getWriteIsolationKey(t.Context(), "push", "whatever", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Empty(t, key)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("pull_request synchronized key is ref", func(t *testing.T) {
|
||||||
|
expectedKey := "refs/pull/1/head"
|
||||||
|
actualKey, err := getWriteIsolationKey(t.Context(), "pull_request", expectedKey, map[string]any{
|
||||||
|
"action": "synchronized",
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, expectedKey, actualKey)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("pull_request synchronized ref is invalid", func(t *testing.T) {
|
||||||
|
invalidKey := "refs/is/invalid"
|
||||||
|
key, err := getWriteIsolationKey(t.Context(), "pull_request", invalidKey, map[string]any{
|
||||||
|
"action": "synchronized",
|
||||||
|
})
|
||||||
|
require.Empty(t, key)
|
||||||
|
assert.ErrorContains(t, err, invalidKey)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("pull_request closed and not merged key is ref", func(t *testing.T) {
|
||||||
|
expectedKey := "refs/pull/1/head"
|
||||||
|
actualKey, err := getWriteIsolationKey(t.Context(), "pull_request", expectedKey, map[string]any{
|
||||||
|
"action": "closed",
|
||||||
|
"pull_request": map[string]any{
|
||||||
|
"merged": false,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, expectedKey, actualKey)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("pull_request closed and merged key is empty", func(t *testing.T) {
|
||||||
|
key, err := getWriteIsolationKey(t.Context(), "pull_request", "whatever", map[string]any{
|
||||||
|
"action": "closed",
|
||||||
|
"pull_request": map[string]any{
|
||||||
|
"merged": true,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Empty(t, key)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("pull_request missing event.pull_request", func(t *testing.T) {
|
||||||
|
key, err := getWriteIsolationKey(t.Context(), "pull_request", "whatever", map[string]any{
|
||||||
|
"action": "closed",
|
||||||
|
})
|
||||||
|
require.Empty(t, key)
|
||||||
|
assert.ErrorContains(t, err, "event.pull_request is not a map")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("pull_request missing event.pull_request.merge", func(t *testing.T) {
|
||||||
|
key, err := getWriteIsolationKey(t.Context(), "pull_request", "whatever", map[string]any{
|
||||||
|
"action": "closed",
|
||||||
|
"pull_request": map[string]any{},
|
||||||
|
})
|
||||||
|
require.Empty(t, key)
|
||||||
|
assert.ErrorContains(t, err, "event.pull_request.merged is not a bool")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("pull_request with event.pull_request.merge of an unexpected type", func(t *testing.T) {
|
||||||
|
key, err := getWriteIsolationKey(t.Context(), "pull_request", "whatever", map[string]any{
|
||||||
|
"action": "closed",
|
||||||
|
"pull_request": map[string]any{
|
||||||
|
"merged": "string instead of bool",
|
||||||
|
},
|
||||||
|
})
|
||||||
|
require.Empty(t, key)
|
||||||
|
assert.ErrorContains(t, err, "not a bool but string")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestRunnerCacheConfiguration(t *testing.T) {
|
func TestRunnerCacheConfiguration(t *testing.T) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("skipping integration test")
|
t.Skip("skipping integration test")
|
||||||
|
|
@ -337,3 +431,327 @@ jobs:
|
||||||
runWorkflow(ctx, cancel, checkKey2Yaml, "push", "refs/heads/main", "step 5: push cache should not be polluted by PR")
|
runWorkflow(ctx, cancel, checkKey2Yaml, "push", "refs/heads/main", "step 5: push cache should not be polluted by PR")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRunnerCacheStartupFailure(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping integration test")
|
||||||
|
}
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
desc string
|
||||||
|
listen string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "disable cache server",
|
||||||
|
listen: "127.0.0.1:40715",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "disable cache proxy server",
|
||||||
|
listen: "127.0.0.1:40716",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.desc, func(t *testing.T) {
|
||||||
|
forgejoClient := &forgejoClientMock{}
|
||||||
|
|
||||||
|
forgejoClient.On("Address").Return("https://127.0.0.1:8080") // not expected to be used in this test
|
||||||
|
forgejoClient.On("UpdateLog", mock.Anything, mock.Anything).Return(nil, nil)
|
||||||
|
forgejoClient.On("UpdateTask", mock.Anything, mock.Anything).
|
||||||
|
Return(connect.NewResponse(&runnerv1.UpdateTaskResponse{}), nil)
|
||||||
|
|
||||||
|
// We'll be listening on some network port in this test that will conflict with the cache configuration...
|
||||||
|
l, err := net.Listen("tcp4", tc.listen)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer l.Close()
|
||||||
|
|
||||||
|
runner := NewRunner(
|
||||||
|
&config.Config{
|
||||||
|
Cache: config.Cache{
|
||||||
|
Port: 40715,
|
||||||
|
ProxyPort: 40716,
|
||||||
|
Dir: t.TempDir(),
|
||||||
|
},
|
||||||
|
Host: config.Host{
|
||||||
|
WorkdirParent: t.TempDir(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
&config.Registration{
|
||||||
|
Labels: []string{"ubuntu-latest:docker://code.forgejo.org/oci/node:20-bookworm"},
|
||||||
|
},
|
||||||
|
forgejoClient)
|
||||||
|
require.NotNil(t, runner)
|
||||||
|
|
||||||
|
// Ensure that cacheProxy failed to start
|
||||||
|
assert.Nil(t, runner.cacheProxy)
|
||||||
|
|
||||||
|
runWorkflow := func(ctx context.Context, cancel context.CancelFunc, yamlContent string) {
|
||||||
|
task := &runnerv1.Task{
|
||||||
|
WorkflowPayload: []byte(yamlContent),
|
||||||
|
Context: &structpb.Struct{
|
||||||
|
Fields: map[string]*structpb.Value{
|
||||||
|
"token": structpb.NewStringValue("some token here"),
|
||||||
|
"forgejo_default_actions_url": structpb.NewStringValue("https://data.forgejo.org"),
|
||||||
|
"repository": structpb.NewStringValue("runner"),
|
||||||
|
"event_name": structpb.NewStringValue("push"),
|
||||||
|
"ref": structpb.NewStringValue("refs/heads/main"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
reporter := report.NewReporter(ctx, cancel, forgejoClient, task, time.Second)
|
||||||
|
err := runner.run(ctx, task, reporter)
|
||||||
|
reporter.Close(nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(t.Context())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
checkCacheYaml := `
|
||||||
|
name: Verify No ACTIONS_CACHE_URL
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
jobs:
|
||||||
|
job-cache-check-1:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- run: echo $ACTIONS_CACHE_URL
|
||||||
|
- run: '[[ "$ACTIONS_CACHE_URL" = "" ]] || exit 1'
|
||||||
|
`
|
||||||
|
runWorkflow(ctx, cancel, checkCacheYaml)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRunnerLXC(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping integration test")
|
||||||
|
}
|
||||||
|
|
||||||
|
forgejoClient := &forgejoClientMock{}
|
||||||
|
|
||||||
|
forgejoClient.On("Address").Return("https://127.0.0.1:8080") // not expected to be used in this test
|
||||||
|
forgejoClient.On("UpdateLog", mock.Anything, mock.Anything).Return(nil, nil)
|
||||||
|
forgejoClient.On("UpdateTask", mock.Anything, mock.Anything).
|
||||||
|
Return(connect.NewResponse(&runnerv1.UpdateTaskResponse{}), nil)
|
||||||
|
|
||||||
|
workdirParent := t.TempDir()
|
||||||
|
runner := NewRunner(
|
||||||
|
&config.Config{
|
||||||
|
Log: config.Log{
|
||||||
|
JobLevel: "trace",
|
||||||
|
},
|
||||||
|
Host: config.Host{
|
||||||
|
WorkdirParent: workdirParent,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
&config.Registration{
|
||||||
|
Labels: []string{"lxc:lxc://debian:bookworm"},
|
||||||
|
},
|
||||||
|
forgejoClient)
|
||||||
|
require.NotNil(t, runner)
|
||||||
|
|
||||||
|
runWorkflow := func(ctx context.Context, cancel context.CancelFunc, yamlContent, eventName, ref, description string) {
|
||||||
|
task := &runnerv1.Task{
|
||||||
|
WorkflowPayload: []byte(yamlContent),
|
||||||
|
Context: &structpb.Struct{
|
||||||
|
Fields: map[string]*structpb.Value{
|
||||||
|
"token": structpb.NewStringValue("some token here"),
|
||||||
|
"forgejo_default_actions_url": structpb.NewStringValue("https://data.forgejo.org"),
|
||||||
|
"repository": structpb.NewStringValue("runner"),
|
||||||
|
"event_name": structpb.NewStringValue(eventName),
|
||||||
|
"ref": structpb.NewStringValue(ref),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
reporter := report.NewReporter(ctx, cancel, forgejoClient, task, time.Second)
|
||||||
|
err := runner.run(ctx, task, reporter)
|
||||||
|
reporter.Close(nil)
|
||||||
|
require.NoError(t, err, description)
|
||||||
|
// verify there are no leftovers
|
||||||
|
assertDirectoryEmpty := func(t *testing.T, dir string) {
|
||||||
|
f, err := os.Open(dir)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
names, err := f.Readdirnames(-1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Empty(t, names)
|
||||||
|
}
|
||||||
|
assertDirectoryEmpty(t, workdirParent)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("OK", func(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(t.Context())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
workflow := `
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
jobs:
|
||||||
|
job:
|
||||||
|
runs-on: lxc
|
||||||
|
steps:
|
||||||
|
- run: mkdir -p some/directory/owned/by/root
|
||||||
|
`
|
||||||
|
runWorkflow(ctx, cancel, workflow, "push", "refs/heads/main", "OK")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRunnerResources(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping integration test")
|
||||||
|
}
|
||||||
|
|
||||||
|
forgejoClient := &forgejoClientMock{}
|
||||||
|
|
||||||
|
forgejoClient.On("Address").Return("https://127.0.0.1:8080") // not expected to be used in this test
|
||||||
|
forgejoClient.On("UpdateLog", mock.Anything, mock.Anything).Return(nil, nil)
|
||||||
|
forgejoClient.On("UpdateTask", mock.Anything, mock.Anything).
|
||||||
|
Return(connect.NewResponse(&runnerv1.UpdateTaskResponse{}), nil)
|
||||||
|
|
||||||
|
workdirParent := t.TempDir()
|
||||||
|
|
||||||
|
runWorkflow := func(ctx context.Context, cancel context.CancelFunc, yamlContent, options, errorMessage, logMessage string) {
|
||||||
|
task := &runnerv1.Task{
|
||||||
|
WorkflowPayload: []byte(yamlContent),
|
||||||
|
Context: &structpb.Struct{
|
||||||
|
Fields: map[string]*structpb.Value{
|
||||||
|
"token": structpb.NewStringValue("some token here"),
|
||||||
|
"forgejo_default_actions_url": structpb.NewStringValue("https://data.forgejo.org"),
|
||||||
|
"repository": structpb.NewStringValue("runner"),
|
||||||
|
"event_name": structpb.NewStringValue("push"),
|
||||||
|
"ref": structpb.NewStringValue("refs/heads/main"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
runner := NewRunner(
|
||||||
|
&config.Config{
|
||||||
|
Log: config.Log{
|
||||||
|
JobLevel: "trace",
|
||||||
|
},
|
||||||
|
Host: config.Host{
|
||||||
|
WorkdirParent: workdirParent,
|
||||||
|
},
|
||||||
|
Container: config.Container{
|
||||||
|
Options: options,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
&config.Registration{
|
||||||
|
Labels: []string{"docker:docker://code.forgejo.org/oci/node:20-bookworm"},
|
||||||
|
},
|
||||||
|
forgejoClient)
|
||||||
|
require.NotNil(t, runner)
|
||||||
|
|
||||||
|
reporter := report.NewReporter(ctx, cancel, forgejoClient, task, time.Second)
|
||||||
|
err := runner.run(ctx, task, reporter)
|
||||||
|
reporter.Close(nil)
|
||||||
|
if len(errorMessage) > 0 {
|
||||||
|
require.Error(t, err)
|
||||||
|
assert.ErrorContains(t, err, errorMessage)
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
if len(logMessage) > 0 {
|
||||||
|
assert.Contains(t, forgejoClient.sent, logMessage)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("config.yaml --memory set and enforced", func(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(t.Context())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
workflow := `
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
jobs:
|
||||||
|
job:
|
||||||
|
runs-on: docker
|
||||||
|
steps:
|
||||||
|
- run: |
|
||||||
|
# more than 300MB
|
||||||
|
perl -e '$a = "a" x (300 * 1024 * 1024)'
|
||||||
|
`
|
||||||
|
runWorkflow(ctx, cancel, workflow, "--memory 200M", "Job 'job' failed", "Killed")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("config.yaml --memory set and within limits", func(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(t.Context())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
workflow := `
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
jobs:
|
||||||
|
job:
|
||||||
|
runs-on: docker
|
||||||
|
steps:
|
||||||
|
- run: echo OK
|
||||||
|
`
|
||||||
|
runWorkflow(ctx, cancel, workflow, "--memory 200M", "", "")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("config.yaml --memory set and container fails to increase it", func(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(t.Context())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
workflow := `
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
jobs:
|
||||||
|
job:
|
||||||
|
runs-on: docker
|
||||||
|
container:
|
||||||
|
image: code.forgejo.org/oci/node:20-bookworm
|
||||||
|
options: --memory 4G
|
||||||
|
steps:
|
||||||
|
- run: |
|
||||||
|
# more than 300MB
|
||||||
|
perl -e '$a = "a" x (300 * 1024 * 1024)'
|
||||||
|
`
|
||||||
|
runWorkflow(ctx, cancel, workflow, "--memory 200M", "option found in the workflow cannot be greater than", "")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("container --memory set and enforced", func(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(t.Context())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
workflow := `
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
jobs:
|
||||||
|
job:
|
||||||
|
runs-on: docker
|
||||||
|
container:
|
||||||
|
image: code.forgejo.org/oci/node:20-bookworm
|
||||||
|
options: --memory 200M
|
||||||
|
steps:
|
||||||
|
- run: |
|
||||||
|
# more than 300MB
|
||||||
|
perl -e '$a = "a" x (300 * 1024 * 1024)'
|
||||||
|
`
|
||||||
|
runWorkflow(ctx, cancel, workflow, "", "Job 'job' failed", "Killed")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("container --memory set and within limits", func(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(t.Context())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
workflow := `
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
jobs:
|
||||||
|
job:
|
||||||
|
runs-on: docker
|
||||||
|
container:
|
||||||
|
image: code.forgejo.org/oci/node:20-bookworm
|
||||||
|
options: --memory 200M
|
||||||
|
steps:
|
||||||
|
- run: echo OK
|
||||||
|
`
|
||||||
|
runWorkflow(ctx, cancel, workflow, "", "", "")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -110,25 +110,20 @@ cache:
|
||||||
#
|
#
|
||||||
external_server: ""
|
external_server: ""
|
||||||
#
|
#
|
||||||
#######################################################################
|
|
||||||
#
|
|
||||||
# Common to the internal and external cache server
|
|
||||||
#
|
|
||||||
#######################################################################
|
|
||||||
#
|
|
||||||
# The shared cache secret used to secure the communications between
|
# The shared cache secret used to secure the communications between
|
||||||
# the cache proxy and the cache server.
|
# the cache proxy and the cache server.
|
||||||
#
|
#
|
||||||
# If empty, it will be generated to a new secret automatically when
|
# If empty, it will be generated to a new secret automatically when
|
||||||
# the server starts and it will stay the same until it restarts.
|
# the server starts and it will stay the same until it restarts.
|
||||||
#
|
#
|
||||||
# Every time the secret is modified, all cache entries that were
|
|
||||||
# created with it are invalidated. In order to ensure that the cache
|
|
||||||
# content is reused when the runner restarts, this secret must be
|
|
||||||
# set, for instance with the output of openssl rand -hex 40.
|
|
||||||
#
|
|
||||||
secret: ""
|
secret: ""
|
||||||
#
|
#
|
||||||
|
#######################################################################
|
||||||
|
#
|
||||||
|
# Common to the internal and external cache server
|
||||||
|
#
|
||||||
|
#######################################################################
|
||||||
|
#
|
||||||
# The IP or hostname (195.84.20.30 or example.com) to use when constructing
|
# The IP or hostname (195.84.20.30 or example.com) to use when constructing
|
||||||
# ACTIONS_CACHE_URL which is the URL of the cache proxy.
|
# ACTIONS_CACHE_URL which is the URL of the cache proxy.
|
||||||
#
|
#
|
||||||
|
|
@ -138,7 +133,7 @@ cache:
|
||||||
# different network than the Forgejo runner (for instance when the
|
# different network than the Forgejo runner (for instance when the
|
||||||
# docker server used to create containers is not running on the same
|
# docker server used to create containers is not running on the same
|
||||||
# host as the Forgejo runner), it may be impossible to figure that
|
# host as the Forgejo runner), it may be impossible to figure that
|
||||||
# out automatically. In that case you can specifify which IP or
|
# out automatically. In that case you can specify which IP or
|
||||||
# hostname to use to reach the internal cache server created by the
|
# hostname to use to reach the internal cache server created by the
|
||||||
# Forgejo runner.
|
# Forgejo runner.
|
||||||
#
|
#
|
||||||
|
|
@ -181,10 +176,12 @@ container:
|
||||||
# valid_volumes:
|
# valid_volumes:
|
||||||
# - '**'
|
# - '**'
|
||||||
valid_volumes: []
|
valid_volumes: []
|
||||||
# overrides the docker client host with the specified one.
|
# Overrides the docker host set by the DOCKER_HOST environment variable, and mounts on the job container.
|
||||||
# If "-" or "", an available docker host will automatically be found.
|
# If "-" or "", no docker host will be mounted in the job container
|
||||||
# If "automount", an available docker host will automatically be found and mounted in the job container (e.g. /var/run/docker.sock).
|
# If "automount", an available docker host will automatically be found and mounted in the job container (e.g. /var/run/docker.sock).
|
||||||
# Otherwise the specified docker host will be used and an error will be returned if it doesn't work.
|
# If it's a url, the specified docker host will be mounted in the job container
|
||||||
|
# Example urls: unix:///run/docker.socket or ssh://user@host
|
||||||
|
# The specified socket is mounted within the job container at /var/run/docker.sock
|
||||||
docker_host: "-"
|
docker_host: "-"
|
||||||
# Pull docker image(s) even if already present
|
# Pull docker image(s) even if already present
|
||||||
force_pull: false
|
force_pull: false
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,4 @@
|
||||||
|
// Copyright 2025 The Forgejo Authors. All rights reserved.
|
||||||
// Copyright 2023 The Gitea Authors. All rights reserved.
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||||
// SPDX-License-Identifier: MIT
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
|
@ -6,6 +7,8 @@ package config
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
)
|
)
|
||||||
|
|
||||||
const registrationWarning = "This file is automatically generated by act-runner. Do not edit it manually unless you know what you are doing. Removing this file will cause act runner to re-register as a new runner."
|
const registrationWarning = "This file is automatically generated by act-runner. Do not edit it manually unless you know what you are doing. Removing this file will cause act runner to re-register as a new runner."
|
||||||
|
|
@ -34,12 +37,28 @@ func LoadRegistration(file string) (*Registration, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
reg.Warning = ""
|
|
||||||
|
|
||||||
return ®, nil
|
return ®, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isEqualRegistration(file string, reg *Registration) (bool, error) {
|
||||||
|
existing, err := LoadRegistration(file)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return cmp.Equal(*reg, *existing), nil
|
||||||
|
}
|
||||||
|
|
||||||
func SaveRegistration(file string, reg *Registration) error {
|
func SaveRegistration(file string, reg *Registration) error {
|
||||||
|
equal, err := isEqualRegistration(file, reg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if equal {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
f, err := os.Create(file)
|
f, err := os.Create(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
||||||
63
internal/pkg/config/registration_test.go
Normal file
63
internal/pkg/config/registration_test.go
Normal file
|
|
@ -0,0 +1,63 @@
|
||||||
|
// Copyright 2025 The Forgejo Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestConfig_Registration(t *testing.T) {
|
||||||
|
reg := Registration{
|
||||||
|
Warning: registrationWarning,
|
||||||
|
ID: 1234,
|
||||||
|
UUID: "UUID",
|
||||||
|
Name: "NAME",
|
||||||
|
Token: "TOKEN",
|
||||||
|
Address: "ADDRESS",
|
||||||
|
Labels: []string{"LABEL1", "LABEL2"},
|
||||||
|
}
|
||||||
|
|
||||||
|
file := filepath.Join(t.TempDir(), ".runner")
|
||||||
|
|
||||||
|
// when the file does not exist, it is never equal
|
||||||
|
equal, err := isEqualRegistration(file, ®)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.False(t, equal)
|
||||||
|
|
||||||
|
require.NoError(t, SaveRegistration(file, ®))
|
||||||
|
|
||||||
|
regReloaded, err := LoadRegistration(file)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, reg, *regReloaded)
|
||||||
|
|
||||||
|
equal, err = isEqualRegistration(file, ®)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.True(t, equal)
|
||||||
|
|
||||||
|
// if the registration is not modified, it is not saved
|
||||||
|
time.Sleep(2 * time.Second) // file system precision on modification time is one second
|
||||||
|
before, err := os.Stat(file)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, SaveRegistration(file, ®))
|
||||||
|
after, err := os.Stat(file)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, before.ModTime(), after.ModTime())
|
||||||
|
|
||||||
|
reg.Labels = []string{"LABEL3"}
|
||||||
|
equal, err = isEqualRegistration(file, ®)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.False(t, equal)
|
||||||
|
|
||||||
|
// if the registration is modified, it is saved
|
||||||
|
require.NoError(t, SaveRegistration(file, ®))
|
||||||
|
after, err = os.Stat(file)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.NotEqual(t, before.ModTime(), after.ModTime())
|
||||||
|
}
|
||||||
|
|
@ -38,7 +38,7 @@ func (o *masker) add(secret string) {
|
||||||
})
|
})
|
||||||
// a multiline secret transformed into a single line by replacing
|
// a multiline secret transformed into a single line by replacing
|
||||||
// newlines with \ followed by n must also be redacted
|
// newlines with \ followed by n must also be redacted
|
||||||
secret = strings.Join(lines, "\\n")
|
o.lines = append(o.lines, strings.Join(lines, "\\n"))
|
||||||
}
|
}
|
||||||
|
|
||||||
o.lines = append(o.lines, secret)
|
o.lines = append(o.lines, secret)
|
||||||
|
|
|
||||||
|
|
@ -7,6 +7,8 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
runnerv1 "code.forgejo.org/forgejo/actions-proto/runner/v1"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -267,4 +269,17 @@ SIX`
|
||||||
assert.Equal(t, testCase.out, rowsToString(rows))
|
assert.Equal(t, testCase.out, rowsToString(rows))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
t.Run("MultilineSecretInSingleRow", func(t *testing.T) {
|
||||||
|
secret := "ABC\nDEF\nGHI"
|
||||||
|
m := newMasker()
|
||||||
|
m.add(secret)
|
||||||
|
rows := []*runnerv1.LogRow{
|
||||||
|
{Content: fmt.Sprintf("BEFORE%sAFTER", secret)},
|
||||||
|
}
|
||||||
|
noMore := false
|
||||||
|
needMore := m.replace(rows, noMore)
|
||||||
|
assert.False(t, needMore)
|
||||||
|
assert.Equal(t, "BEFORE***AFTER\n", rowsToString(rows))
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -13,6 +13,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
runnerv1 "code.forgejo.org/forgejo/actions-proto/runner/v1"
|
runnerv1 "code.forgejo.org/forgejo/actions-proto/runner/v1"
|
||||||
|
"code.forgejo.org/forgejo/runner/v11/act/runner"
|
||||||
"connectrpc.com/connect"
|
"connectrpc.com/connect"
|
||||||
retry "github.com/avast/retry-go/v4"
|
retry "github.com/avast/retry-go/v4"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
@ -47,6 +48,7 @@ type Reporter struct {
|
||||||
|
|
||||||
debugOutputEnabled bool
|
debugOutputEnabled bool
|
||||||
stopCommandEndToken string
|
stopCommandEndToken string
|
||||||
|
issuedLocalCancel bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewReporter(ctx context.Context, cancel context.CancelFunc, c client.Client, task *runnerv1.Task, reportInterval time.Duration) *Reporter {
|
func NewReporter(ctx context.Context, cancel context.CancelFunc, c client.Client, task *runnerv1.Task, reportInterval time.Duration) *Reporter {
|
||||||
|
|
@ -130,6 +132,13 @@ func (r *Reporter) Fire(entry *log.Entry) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if r.state.Result == runnerv1.Result_RESULT_SUCCESS {
|
||||||
|
if v, ok := entry.Data["jobOutputs"]; ok {
|
||||||
|
_ = r.setOutputs(v.(map[string]string))
|
||||||
|
} else {
|
||||||
|
log.Panicf("received log entry with successful jobResult, but without jobOutputs -- outputs will be corrupted for this job")
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if !r.duringSteps() {
|
if !r.duringSteps() {
|
||||||
r.logRows = appendIfNotNil(r.logRows, r.parseLogRow(entry))
|
r.logRows = appendIfNotNil(r.logRows, r.parseLogRow(entry))
|
||||||
|
|
@ -166,7 +175,7 @@ func (r *Reporter) Fire(entry *log.Entry) error {
|
||||||
} else if !r.duringSteps() {
|
} else if !r.duringSteps() {
|
||||||
r.logRows = appendIfNotNil(r.logRows, r.parseLogRow(entry))
|
r.logRows = appendIfNotNil(r.logRows, r.parseLogRow(entry))
|
||||||
}
|
}
|
||||||
if v, ok := entry.Data["stepResult"]; ok {
|
if v := runner.GetOuterStepResult(entry); v != nil {
|
||||||
if stepResult, ok := r.parseResult(v); ok {
|
if stepResult, ok := r.parseResult(v); ok {
|
||||||
if step.LogLength == 0 {
|
if step.LogLength == 0 {
|
||||||
step.LogIndex = int64(r.logOffset + len(r.logRows))
|
step.LogIndex = int64(r.logOffset + len(r.logRows))
|
||||||
|
|
@ -184,11 +193,19 @@ func (r *Reporter) RunDaemon() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if r.ctx.Err() != nil {
|
if r.ctx.Err() != nil {
|
||||||
|
// This shouldn't happen because DaemonContext is used for `r.ctx` which should outlive any running job.
|
||||||
|
log.Warnf("Terminating RunDaemon on an active job due to error: %v", r.ctx.Err())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
_ = r.ReportLog(false)
|
err := r.ReportLog(false)
|
||||||
_ = r.ReportState()
|
if err != nil {
|
||||||
|
log.Warnf("ReportLog error: %v", err)
|
||||||
|
}
|
||||||
|
err = r.ReportState()
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("ReportState error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
time.AfterFunc(r.reportInterval, r.RunDaemon)
|
time.AfterFunc(r.reportInterval, r.RunDaemon)
|
||||||
}
|
}
|
||||||
|
|
@ -209,10 +226,20 @@ func (r *Reporter) logf(format string, a ...any) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Reporter) SetOutputs(outputs map[string]string) error {
|
func (r *Reporter) cloneOutputs() map[string]string {
|
||||||
r.stateMu.Lock()
|
outputs := make(map[string]string)
|
||||||
defer r.stateMu.Unlock()
|
r.outputs.Range(func(k, v any) bool {
|
||||||
|
if val, ok := v.(string); ok {
|
||||||
|
outputs[k.(string)] = val
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
return outputs
|
||||||
|
}
|
||||||
|
|
||||||
|
// Errors from setOutputs are logged into the reporter automatically; the `errors` return value is only used for unit
|
||||||
|
// tests.
|
||||||
|
func (r *Reporter) setOutputs(outputs map[string]string) error {
|
||||||
var errs []error
|
var errs []error
|
||||||
recordError := func(format string, a ...any) {
|
recordError := func(format string, a ...any) {
|
||||||
r.logf(format, a...)
|
r.logf(format, a...)
|
||||||
|
|
@ -358,16 +385,9 @@ func (r *Reporter) ReportState() error {
|
||||||
|
|
||||||
r.stateMu.RLock()
|
r.stateMu.RLock()
|
||||||
state := proto.Clone(r.state).(*runnerv1.TaskState)
|
state := proto.Clone(r.state).(*runnerv1.TaskState)
|
||||||
|
outputs := r.cloneOutputs()
|
||||||
r.stateMu.RUnlock()
|
r.stateMu.RUnlock()
|
||||||
|
|
||||||
outputs := make(map[string]string)
|
|
||||||
r.outputs.Range(func(k, v any) bool {
|
|
||||||
if val, ok := v.(string); ok {
|
|
||||||
outputs[k.(string)] = val
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
|
|
||||||
resp, err := r.client.UpdateTask(r.ctx, connect.NewRequest(&runnerv1.UpdateTaskRequest{
|
resp, err := r.client.UpdateTask(r.ctx, connect.NewRequest(&runnerv1.UpdateTaskRequest{
|
||||||
State: state,
|
State: state,
|
||||||
Outputs: outputs,
|
Outputs: outputs,
|
||||||
|
|
@ -380,7 +400,17 @@ func (r *Reporter) ReportState() error {
|
||||||
r.outputs.Store(k, struct{}{})
|
r.outputs.Store(k, struct{}{})
|
||||||
}
|
}
|
||||||
|
|
||||||
if resp.Msg.GetState().GetResult() == runnerv1.Result_RESULT_CANCELLED {
|
localResultState := state.GetResult()
|
||||||
|
remoteResultState := resp.Msg.GetState().GetResult()
|
||||||
|
switch remoteResultState {
|
||||||
|
case runnerv1.Result_RESULT_CANCELLED, runnerv1.Result_RESULT_FAILURE:
|
||||||
|
// issuedLocalCancel is just used to deduplicate this log message if our local state doesn't catch up with our
|
||||||
|
// remote state as quickly as the report-interval, which would cause this message to repeat in the logs.
|
||||||
|
if !r.issuedLocalCancel && remoteResultState != localResultState {
|
||||||
|
log.Infof("UpdateTask returned task result %v for a task that was in local state %v - beginning local task termination",
|
||||||
|
remoteResultState, localResultState)
|
||||||
|
r.issuedLocalCancel = true
|
||||||
|
}
|
||||||
r.cancel()
|
r.cancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -78,7 +78,7 @@ func TestReporterSetOutputs(t *testing.T) {
|
||||||
reporter, _, _ := mockReporter(t)
|
reporter, _, _ := mockReporter(t)
|
||||||
|
|
||||||
expected := map[string]string{"a": "b", "c": "d"}
|
expected := map[string]string{"a": "b", "c": "d"}
|
||||||
assert.NoError(t, reporter.SetOutputs(expected))
|
assert.NoError(t, reporter.setOutputs(expected))
|
||||||
assertEqual(t, expected, &reporter.outputs)
|
assertEqual(t, expected, &reporter.outputs)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
@ -93,7 +93,7 @@ func TestReporterSetOutputs(t *testing.T) {
|
||||||
"c": "ABCDEFG", // value too big
|
"c": "ABCDEFG", // value too big
|
||||||
"d": "e",
|
"d": "e",
|
||||||
}
|
}
|
||||||
err := reporter.SetOutputs(in)
|
err := reporter.setOutputs(in)
|
||||||
assert.ErrorContains(t, err, "ignore output because the length of the value for \"c\" is 7 (the maximum is 5)")
|
assert.ErrorContains(t, err, "ignore output because the length of the value for \"c\" is 7 (the maximum is 5)")
|
||||||
assert.ErrorContains(t, err, "ignore output because the key is longer than 5: \"0123456\"")
|
assert.ErrorContains(t, err, "ignore output because the key is longer than 5: \"0123456\"")
|
||||||
expected := map[string]string{"d": "e"}
|
expected := map[string]string{"d": "e"}
|
||||||
|
|
@ -104,11 +104,11 @@ func TestReporterSetOutputs(t *testing.T) {
|
||||||
reporter, _, _ := mockReporter(t)
|
reporter, _, _ := mockReporter(t)
|
||||||
|
|
||||||
first := map[string]string{"a": "b", "c": "d"}
|
first := map[string]string{"a": "b", "c": "d"}
|
||||||
assert.NoError(t, reporter.SetOutputs(first))
|
assert.NoError(t, reporter.setOutputs(first))
|
||||||
assertEqual(t, first, &reporter.outputs)
|
assertEqual(t, first, &reporter.outputs)
|
||||||
|
|
||||||
second := map[string]string{"c": "d", "e": "f"}
|
second := map[string]string{"c": "d", "e": "f"}
|
||||||
assert.ErrorContains(t, reporter.SetOutputs(second), "ignore output because a value already exists for the key \"c\"")
|
assert.ErrorContains(t, reporter.setOutputs(second), "ignore output because a value already exists for the key \"c\"")
|
||||||
|
|
||||||
expected := map[string]string{"a": "b", "c": "d", "e": "f"}
|
expected := map[string]string{"a": "b", "c": "d", "e": "f"}
|
||||||
assertEqual(t, expected, &reporter.outputs)
|
assertEqual(t, expected, &reporter.outputs)
|
||||||
|
|
@ -284,6 +284,143 @@ func TestReporter_Fire(t *testing.T) {
|
||||||
|
|
||||||
assert.Equal(t, int64(3), reporter.state.Steps[0].LogLength)
|
assert.Equal(t, int64(3), reporter.state.Steps[0].LogLength)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
t.Run("jobResult jobOutputs extracted from log entry", func(t *testing.T) {
|
||||||
|
reporter, _, _ := mockReporter(t)
|
||||||
|
|
||||||
|
dataStep0 := map[string]any{
|
||||||
|
"stage": "Post",
|
||||||
|
"stepNumber": 0,
|
||||||
|
"raw_output": true,
|
||||||
|
"jobResult": "success",
|
||||||
|
"jobOutputs": map[string]string{"key1": "value1"},
|
||||||
|
}
|
||||||
|
assert.NoError(t, reporter.Fire(&log.Entry{Message: "success!", Data: dataStep0}))
|
||||||
|
|
||||||
|
assert.EqualValues(t, runnerv1.Result_RESULT_SUCCESS, reporter.state.Result)
|
||||||
|
value, _ := reporter.outputs.Load("key1")
|
||||||
|
assert.EqualValues(t, "value1", value)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("jobResult jobOutputs is absent if not success", func(t *testing.T) {
|
||||||
|
reporter, _, _ := mockReporter(t)
|
||||||
|
|
||||||
|
dataStep0 := map[string]any{
|
||||||
|
"stage": "Post",
|
||||||
|
"stepNumber": 0,
|
||||||
|
"raw_output": true,
|
||||||
|
"jobResult": "skipped",
|
||||||
|
}
|
||||||
|
assert.NoError(t, reporter.Fire(&log.Entry{Message: "skipped!", Data: dataStep0}))
|
||||||
|
|
||||||
|
assert.EqualValues(t, runnerv1.Result_RESULT_SKIPPED, reporter.state.Result)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReporterReportState(t *testing.T) {
|
||||||
|
for _, testCase := range []struct {
|
||||||
|
name string
|
||||||
|
fixture func(t *testing.T, reporter *Reporter, client *mocks.Client)
|
||||||
|
assert func(t *testing.T, reporter *Reporter, ctx context.Context, err error)
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "PartialOutputs",
|
||||||
|
fixture: func(t *testing.T, reporter *Reporter, client *mocks.Client) {
|
||||||
|
t.Helper()
|
||||||
|
outputKey1 := "KEY1"
|
||||||
|
outputValue1 := "VALUE1"
|
||||||
|
outputKey2 := "KEY2"
|
||||||
|
outputValue2 := "VALUE2"
|
||||||
|
reporter.setOutputs(map[string]string{
|
||||||
|
outputKey1: outputValue1,
|
||||||
|
outputKey2: outputValue2,
|
||||||
|
})
|
||||||
|
|
||||||
|
client.On("UpdateTask", mock.Anything, mock.Anything).Return(func(_ context.Context, req *connect_go.Request[runnerv1.UpdateTaskRequest]) (*connect_go.Response[runnerv1.UpdateTaskResponse], error) {
|
||||||
|
t.Logf("Received UpdateTask: %s", req.Msg.String())
|
||||||
|
return connect_go.NewResponse(&runnerv1.UpdateTaskResponse{
|
||||||
|
SentOutputs: []string{outputKey1},
|
||||||
|
}), nil
|
||||||
|
})
|
||||||
|
},
|
||||||
|
assert: func(t *testing.T, reporter *Reporter, ctx context.Context, err error) {
|
||||||
|
t.Helper()
|
||||||
|
require.ErrorContains(t, err, "not all logs are submitted 1 remain")
|
||||||
|
outputs := reporter.cloneOutputs()
|
||||||
|
assert.Equal(t, map[string]string{
|
||||||
|
"KEY2": "VALUE2",
|
||||||
|
}, outputs)
|
||||||
|
assert.NoError(t, ctx.Err())
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "AllDone",
|
||||||
|
fixture: func(t *testing.T, reporter *Reporter, client *mocks.Client) {
|
||||||
|
t.Helper()
|
||||||
|
client.On("UpdateTask", mock.Anything, mock.Anything).Return(func(_ context.Context, req *connect_go.Request[runnerv1.UpdateTaskRequest]) (*connect_go.Response[runnerv1.UpdateTaskResponse], error) {
|
||||||
|
t.Logf("Received UpdateTask: %s", req.Msg.String())
|
||||||
|
return connect_go.NewResponse(&runnerv1.UpdateTaskResponse{}), nil
|
||||||
|
})
|
||||||
|
},
|
||||||
|
assert: func(t *testing.T, reporter *Reporter, ctx context.Context, err error) {
|
||||||
|
t.Helper()
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.NoError(t, ctx.Err())
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Canceled",
|
||||||
|
fixture: func(t *testing.T, reporter *Reporter, client *mocks.Client) {
|
||||||
|
t.Helper()
|
||||||
|
client.On("UpdateTask", mock.Anything, mock.Anything).Return(func(_ context.Context, req *connect_go.Request[runnerv1.UpdateTaskRequest]) (*connect_go.Response[runnerv1.UpdateTaskResponse], error) {
|
||||||
|
t.Logf("Received UpdateTask: %s", req.Msg.String())
|
||||||
|
return connect_go.NewResponse(&runnerv1.UpdateTaskResponse{
|
||||||
|
State: &runnerv1.TaskState{
|
||||||
|
Result: runnerv1.Result_RESULT_CANCELLED,
|
||||||
|
},
|
||||||
|
}), nil
|
||||||
|
})
|
||||||
|
},
|
||||||
|
assert: func(t *testing.T, reporter *Reporter, ctx context.Context, err error) {
|
||||||
|
t.Helper()
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.ErrorIs(t, ctx.Err(), context.Canceled)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Failed",
|
||||||
|
fixture: func(t *testing.T, reporter *Reporter, client *mocks.Client) {
|
||||||
|
t.Helper()
|
||||||
|
client.On("UpdateTask", mock.Anything, mock.Anything).Return(func(_ context.Context, req *connect_go.Request[runnerv1.UpdateTaskRequest]) (*connect_go.Response[runnerv1.UpdateTaskResponse], error) {
|
||||||
|
t.Logf("Received UpdateTask: %s", req.Msg.String())
|
||||||
|
return connect_go.NewResponse(&runnerv1.UpdateTaskResponse{
|
||||||
|
State: &runnerv1.TaskState{
|
||||||
|
Result: runnerv1.Result_RESULT_FAILURE,
|
||||||
|
},
|
||||||
|
}), nil
|
||||||
|
})
|
||||||
|
},
|
||||||
|
assert: func(t *testing.T, reporter *Reporter, ctx context.Context, err error) {
|
||||||
|
t.Helper()
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.ErrorIs(t, ctx.Err(), context.Canceled)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(testCase.name, func(t *testing.T) {
|
||||||
|
client := mocks.NewClient(t)
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
taskCtx, err := structpb.NewStruct(map[string]any{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
reporter := NewReporter(common.WithDaemonContext(ctx, t.Context()), cancel, client, &runnerv1.Task{
|
||||||
|
Context: taskCtx,
|
||||||
|
}, time.Second)
|
||||||
|
|
||||||
|
testCase.fixture(t, reporter, client)
|
||||||
|
err = reporter.ReportState()
|
||||||
|
testCase.assert(t, reporter, ctx, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReporterReportLogLost(t *testing.T) {
|
func TestReporterReportLogLost(t *testing.T) {
|
||||||
|
|
|
||||||
|
|
@ -4,11 +4,22 @@
|
||||||
"prConcurrentLimit": 1,
|
"prConcurrentLimit": 1,
|
||||||
"packageRules": [
|
"packageRules": [
|
||||||
{
|
{
|
||||||
"description": "Separate minor and patch for some packages",
|
"description": "separate minor and patch for some packages",
|
||||||
"matchDepNames": ["github.com/rhysd/actionlint"],
|
"matchDepNames": ["github.com/rhysd/actionlint"],
|
||||||
"separateMinorPatch": true
|
"separateMinorPatch": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "separate multiple major and minor for forgejo",
|
||||||
|
"matchDepNames": ["code.forgejo.org/forgejo/forgejo"],
|
||||||
|
"separateMultipleMajor": true,
|
||||||
|
"separateMultipleMinor": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "group runner updates",
|
||||||
|
"matchDepNames": ["code.forgejo.org/forgejo/runner", "forgejo/runner"],
|
||||||
|
"groupName": "forgejo-runner"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"labels": ["Kind/Chore", "run-end-to-end-tests"],
|
"labels": ["Kind/DependencyUpdate", "run-end-to-end-tests"],
|
||||||
"ignorePaths": ["**/testdata/**", "**/node_modules/**"]
|
"ignorePaths": ["**/testdata/**", "**/node_modules/**"]
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue