Compare commits

..

No commits in common. "main" and "v11.1.0" have entirely different histories.

53 changed files with 430 additions and 1601 deletions

View file

@ -1,27 +0,0 @@
name: ci
on:
push:
tags:
- v*
jobs:
goreleaser:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v6
with:
go-version: ">=1.25.1"
- name: Test code
run: make test
- name: Run GoReleaser
uses: https://github.com/goreleaser/goreleaser-action@v6
env:
GITEA_TOKEN: ${{ secrets.PACKAGES_TOKEN }}
with:
args: release --clean

View file

@ -19,7 +19,7 @@ on:
enable-email-notifications: true
env:
FORGEJO_VERSION: 11.0.7 # renovate: datasource=docker depName=code.forgejo.org/forgejo/forgejo
FORGEJO_VERSION: 11.0.5 # renovate: datasource=docker depName=code.forgejo.org/forgejo/forgejo
jobs:
release-simulation:
@ -29,7 +29,7 @@ jobs:
- uses: https://data.forgejo.org/actions/checkout@v4
- id: forgejo
uses: https://data.forgejo.org/actions/setup-forgejo@v3.0.4
uses: https://data.forgejo.org/actions/setup-forgejo@v3.0.3
with:
user: root
password: admin1234

View file

@ -21,7 +21,7 @@ on:
enable-email-notifications: true
env:
FORGEJO_VERSION: 11.0.7 # renovate: datasource=docker depName=code.forgejo.org/forgejo/forgejo
FORGEJO_VERSION: 11.0.5 # renovate: datasource=docker depName=code.forgejo.org/forgejo/forgejo
FORGEJO_USER: root
FORGEJO_PASSWORD: admin1234
@ -34,7 +34,7 @@ jobs:
- name: install Forgejo so it can be used as a container registry
id: registry
uses: https://data.forgejo.org/actions/setup-forgejo@v3.0.4
uses: https://data.forgejo.org/actions/setup-forgejo@v3.0.3
with:
user: ${{ env.FORGEJO_USER }}
password: ${{ env.FORGEJO_PASSWORD }}

View file

@ -14,12 +14,11 @@ env:
SERIAL: "30"
LIFETIME: "60"
SYSTEMD_OPTIONS: "--no-pager --full"
USE_VERSION: 11.0.7 # renovate: datasource=docker depName=code.forgejo.org/forgejo/forgejo
jobs:
example-lxc-systemd:
if: vars.ROLE == 'forgejo-coding'
runs-on: lxc-trixie
runs-on: lxc-bookworm
steps:
- uses: https://data.forgejo.org/actions/checkout@v4
@ -54,11 +53,11 @@ jobs:
done
- id: forgejo
uses: https://data.forgejo.org/actions/setup-forgejo@v3.0.4
uses: https://data.forgejo.org/actions/setup-forgejo@v3.0.3
with:
user: root
password: admin1234
binary: https://code.forgejo.org/forgejo/forgejo/releases/download/v${{ env.USE_VERSION }}/forgejo-${{ env.USE_VERSION }}-linux-amd64
binary: https://code.forgejo.org/forgejo/forgejo/releases/download/v7.0.12/forgejo-7.0.12-linux-amd64
# must be the same as LXC_IPV4_PREFIX in examples/lxc-systemd/forgejo-runner-service.sh
lxc-ip-prefix: 10.105.7

View file

@ -1,60 +0,0 @@
version: 2
before:
hooks:
- go mod download
builds:
- env:
- CGO_ENABLED=0
goos:
- linux
- darwin
- windows
goarch:
- amd64
- arm64
archives:
- formats: [binary]
# this name template makes the OS and Arch compatible with the results of `uname`.
name_template: >-
{{ .ProjectName }}_
{{- title .Os }}_
{{- if eq .Arch "amd64" }}x86_64
{{- else if eq .Arch "386" }}i386
{{- else }}{{ .Arch }}{{ end }}
{{- if .Arm }}v{{ .Arm }}{{ end }}
changelog:
abbrev: 10
filters:
exclude:
- "^docs:"
- "^test:"
format: "{{.SHA}}: {{.Message}}"
groups:
- title: Features
regexp: '^.*?feat(\([[:word:]]+\))??!?:.+$'
order: 0
- title: "Bug fixes"
regexp: '^.*?fix(\([[:word:]]+\))??!?:.+$'
order: 1
- title: "Chores"
regexp: '^.*?chore(\([[:word:]]+\))??!?:.+$'
order: 2
- title: Others
order: 999
sort: asc
release:
gitea:
owner: DevFW-CICD
name: runner
force_token: gitea
gitea_urls:
api: https://edp.buildth.ing/api/v1
download: https://edp.buildth.ing
# set to true if you use a self-signed certificate
skip_tls_verify: false

View file

@ -11,3 +11,6 @@
# Old names (without the pre- prefix) are deprecated since 4.0.0.
minimum_pre_commit_version: '3.2.0'
stages: [pre-commit, pre-merge-commit, pre-push, manual]
# validate doesnt currently provide non-success exit codes,
# so falling back to always printing verbose output for now.
verbose: true

View file

@ -1,6 +1,6 @@
FROM --platform=$BUILDPLATFORM data.forgejo.org/oci/xx AS xx
FROM --platform=$BUILDPLATFORM data.forgejo.org/oci/golang:1.25-alpine3.22 AS build-env
FROM --platform=$BUILDPLATFORM data.forgejo.org/oci/golang:1.24-alpine3.22 AS build-env
#
# Transparently cross compile for the target platform

View file

@ -14,7 +14,7 @@ GO_FMT_FILES := $(shell find . -type f -name "*.go" ! -name "generated.*")
GOFILES := $(shell find . -type f -name "*.go" -o -name "go.mod" ! -name "generated.*")
MOCKERY_PACKAGE ?= github.com/vektra/mockery/v2@v2.53.5 # renovate: datasource=go
GOLANGCI_LINT_PACKAGE ?= github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.5.0 # renovate: datasource=go
GOLANGCI_LINT_PACKAGE ?= github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.4.0 # renovate: datasource=go
DOCKER_IMAGE ?= gitea/act_runner
DOCKER_TAG ?= nightly

View file

@ -19,13 +19,12 @@ import (
//go:generate mockery --inpackage --name caches
type caches interface {
getDB() *bolthold.Store
openDB() (*bolthold.Store, error)
validateMac(rundata RunData) (string, error)
readCache(id uint64, repo string) (*Cache, error)
useCache(id uint64) error
setgcAt(at time.Time)
gcCache()
close()
serve(w http.ResponseWriter, r *http.Request, id uint64)
commit(id uint64, size int64) (int64, error)
@ -39,8 +38,6 @@ type cachesImpl struct {
logger logrus.FieldLogger
secret string
db *bolthold.Store
gcing atomic.Bool
gcAt time.Time
}
@ -71,6 +68,12 @@ func newCaches(dir, secret string, logger logrus.FieldLogger) (caches, error) {
}
c.storage = storage
c.gcCache()
return c, nil
}
func (c *cachesImpl) openDB() (*bolthold.Store, error) {
file := filepath.Join(c.dir, "bolt.db")
db, err := bolthold.Open(file, 0o644, &bolthold.Options{
Encoder: json.Marshal,
@ -84,22 +87,7 @@ func newCaches(dir, secret string, logger logrus.FieldLogger) (caches, error) {
if err != nil {
return nil, fmt.Errorf("Open(%s): %w", file, err)
}
c.db = db
c.gcCache()
return c, nil
}
func (c *cachesImpl) close() {
if c.db != nil {
c.db.Close()
c.db = nil
}
}
func (c *cachesImpl) getDB() *bolthold.Store {
return c.db
return db, nil
}
var findCacheWithIsolationKeyFallback = func(db *bolthold.Store, repo string, keys []string, version, writeIsolationKey string) (*Cache, error) {
@ -168,7 +156,11 @@ func insertCache(db *bolthold.Store, cache *Cache) error {
}
func (c *cachesImpl) readCache(id uint64, repo string) (*Cache, error) {
db := c.getDB()
db, err := c.openDB()
if err != nil {
return nil, err
}
defer db.Close()
cache := &Cache{}
if err := db.Get(id, cache); err != nil {
return nil, fmt.Errorf("readCache: Get(%v): %w", id, err)
@ -181,7 +173,11 @@ func (c *cachesImpl) readCache(id uint64, repo string) (*Cache, error) {
}
func (c *cachesImpl) useCache(id uint64) error {
db := c.getDB()
db, err := c.openDB()
if err != nil {
return err
}
defer db.Close()
cache := &Cache{}
if err := db.Get(id, cache); err != nil {
return fmt.Errorf("useCache: Get(%v): %w", id, err)
@ -236,7 +232,12 @@ func (c *cachesImpl) gcCache() {
c.gcAt = time.Now()
c.logger.Debugf("gc: %v", c.gcAt.String())
db := c.getDB()
db, err := c.openDB()
if err != nil {
fatal(c.logger, err)
return
}
defer db.Close()
// Remove the caches which are not completed for a while, they are most likely to be broken.
var caches []*Cache

View file

@ -14,7 +14,6 @@ import (
func TestCacheReadWrite(t *testing.T) {
caches, err := newCaches(t.TempDir(), "secret", logrus.New())
require.NoError(t, err)
defer caches.close()
t.Run("NotFound", func(t *testing.T) {
found, err := caches.readCache(456, "repo")
assert.Nil(t, found)
@ -34,7 +33,9 @@ func TestCacheReadWrite(t *testing.T) {
cache.Repo = repo
t.Run("Insert", func(t *testing.T) {
db := caches.getDB()
db, err := caches.openDB()
require.NoError(t, err)
defer db.Close()
assert.NoError(t, insertCache(db, cache))
})

View file

@ -9,6 +9,7 @@ import (
"net/http"
"strconv"
"strings"
"syscall"
"time"
"github.com/julienschmidt/httprouter"
@ -24,7 +25,7 @@ const (
var fatal = func(logger logrus.FieldLogger, err error) {
logger.Errorf("unrecoverable error in the cache: %v", err)
if err := suicide(); err != nil {
if err := syscall.Kill(syscall.Getpid(), syscall.SIGTERM); err != nil {
logger.Errorf("unrecoverable error in the cache: failed to send the TERM signal to shutdown the daemon %v", err)
}
}
@ -121,10 +122,6 @@ func (h *handler) Close() error {
return nil
}
var retErr error
if h.caches != nil {
h.caches.close()
h.caches = nil
}
if h.server != nil {
err := h.server.Close()
if err != nil {
@ -154,9 +151,6 @@ func (h *handler) getCaches() caches {
}
func (h *handler) setCaches(caches caches) {
if h.caches != nil {
h.caches.close()
}
h.caches = caches
}
@ -176,7 +170,12 @@ func (h *handler) find(w http.ResponseWriter, r *http.Request, params httprouter
}
version := r.URL.Query().Get("version")
db := h.caches.getDB()
db, err := h.caches.openDB()
if err != nil {
h.responseFatalJSON(w, r, err)
return
}
defer db.Close()
cache, err := findCacheWithIsolationKeyFallback(db, repo, keys, version, rundata.WriteIsolationKey)
if err != nil {
@ -222,7 +221,12 @@ func (h *handler) reserve(w http.ResponseWriter, r *http.Request, params httprou
api.Key = strings.ToLower(api.Key)
cache := api.ToCache()
db := h.caches.getDB()
db, err := h.caches.openDB()
if err != nil {
h.responseFatalJSON(w, r, err)
return
}
defer db.Close()
now := time.Now().Unix()
cache.CreatedAt = now
@ -331,7 +335,12 @@ func (h *handler) commit(w http.ResponseWriter, r *http.Request, params httprout
// write real size back to cache, it may be different from the current value when the request doesn't specify it.
cache.Size = size
db := h.caches.getDB()
db, err := h.caches.openDB()
if err != nil {
h.responseFatalJSON(w, r, err)
return
}
defer db.Close()
cache.Complete = true
if err := db.Update(cache.ID, cache); err != nil {

View file

@ -78,7 +78,9 @@ func TestHandler(t *testing.T) {
defer func() {
t.Run("inspect db", func(t *testing.T) {
db := handler.getCaches().getDB()
db, err := handler.getCaches().openDB()
require.NoError(t, err)
defer db.Close()
require.NoError(t, db.Bolt().View(func(tx *bbolt.Tx) error {
return tx.Bucket([]byte("Cache")).ForEach(func(k, v []byte) error {
t.Logf("%s: %s", k, v)
@ -935,11 +937,40 @@ func TestHandlerAPIFatalErrors(t *testing.T) {
handler.find(w, req, nil)
},
},
{
name: "find open",
caches: func(t *testing.T, message string) caches {
caches := newMockCaches(t)
caches.On("validateMac", RunData{}).Return(cacheRepo, nil)
caches.On("openDB", mock.Anything, mock.Anything).Return(nil, errors.New(message))
return caches
},
call: func(t *testing.T, handler Handler, w http.ResponseWriter) {
req, err := http.NewRequest("GET", "example.com/cache", nil)
require.NoError(t, err)
handler.find(w, req, nil)
},
},
{
name: "reserve",
caches: func(t *testing.T, message string) caches {
caches := newMockCaches(t)
caches.On("validateMac", RunData{}).Return(cacheRepo, nil)
caches.On("openDB", mock.Anything, mock.Anything).Return(nil, errors.New(message))
return caches
},
call: func(t *testing.T, handler Handler, w http.ResponseWriter) {
body, err := json.Marshal(&Request{})
require.NoError(t, err)
req, err := http.NewRequest("POST", "example.com/caches", bytes.NewReader(body))
require.NoError(t, err)
handler.reserve(w, req, nil)
},
},
{
name: "upload",
caches: func(t *testing.T, message string) caches {
caches := newMockCaches(t)
caches.On("close").Return()
caches.On("validateMac", RunData{}).Return(cacheRepo, nil)
caches.On("readCache", mock.Anything, mock.Anything).Return(nil, errors.New(message))
return caches
@ -957,7 +988,6 @@ func TestHandlerAPIFatalErrors(t *testing.T) {
name: "commit",
caches: func(t *testing.T, message string) caches {
caches := newMockCaches(t)
caches.On("close").Return()
caches.On("validateMac", RunData{}).Return(cacheRepo, nil)
caches.On("readCache", mock.Anything, mock.Anything).Return(nil, errors.New(message))
return caches
@ -975,7 +1005,6 @@ func TestHandlerAPIFatalErrors(t *testing.T) {
name: "get",
caches: func(t *testing.T, message string) caches {
caches := newMockCaches(t)
caches.On("close").Return()
caches.On("validateMac", RunData{}).Return(cacheRepo, nil)
caches.On("readCache", mock.Anything, mock.Anything).Return(nil, errors.New(message))
return caches
@ -1013,12 +1042,10 @@ func TestHandlerAPIFatalErrors(t *testing.T) {
dir := filepath.Join(t.TempDir(), "artifactcache")
handler, err := StartHandler(dir, "", 0, "secret", nil)
require.NoError(t, err)
defer handler.Close()
fatalMessage = "<unset>"
caches := testCase.caches(t, message) // doesn't need to be closed because it will be given to handler
handler.setCaches(caches)
handler.setCaches(testCase.caches(t, message))
w := httptest.NewRecorder()
testCase.call(t, handler, w)
@ -1111,15 +1138,18 @@ func TestHandler_gcCache(t *testing.T) {
},
}
db := handler.getCaches().getDB()
db, err := handler.getCaches().openDB()
require.NoError(t, err)
for _, c := range cases {
require.NoError(t, insertCache(db, c.Cache))
}
require.NoError(t, db.Close())
handler.getCaches().setgcAt(time.Time{}) // ensure gcCache will not skip
handler.getCaches().gcCache()
db = handler.getCaches().getDB()
db, err = handler.getCaches().openDB()
require.NoError(t, err)
for i, v := range cases {
t.Run(fmt.Sprintf("%d_%s", i, v.Cache.Key), func(t *testing.T) {
cache := &Cache{}
@ -1131,6 +1161,7 @@ func TestHandler_gcCache(t *testing.T) {
}
})
}
require.NoError(t, db.Close())
}
func TestHandler_ExternalURL(t *testing.T) {

View file

@ -19,11 +19,6 @@ type mockCaches struct {
mock.Mock
}
// close provides a mock function with no fields
func (_m *mockCaches) close() {
_m.Called()
}
// commit provides a mock function with given fields: id, size
func (_m *mockCaches) commit(id uint64, size int64) (int64, error) {
ret := _m.Called(id, size)
@ -85,15 +80,19 @@ func (_m *mockCaches) gcCache() {
_m.Called()
}
// getDB provides a mock function with no fields
func (_m *mockCaches) getDB() *bolthold.Store {
// openDB provides a mock function with no fields
func (_m *mockCaches) openDB() (*bolthold.Store, error) {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for getDB")
panic("no return value specified for openDB")
}
var r0 *bolthold.Store
var r1 error
if rf, ok := ret.Get(0).(func() (*bolthold.Store, error)); ok {
return rf()
}
if rf, ok := ret.Get(0).(func() *bolthold.Store); ok {
r0 = rf()
} else {
@ -102,7 +101,13 @@ func (_m *mockCaches) getDB() *bolthold.Store {
}
}
return r0
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// readCache provides a mock function with given fields: id, repo

View file

@ -1,9 +0,0 @@
//go:build !windows
package artifactcache
import "syscall"
func suicide() error {
return syscall.Kill(syscall.Getpid(), syscall.SIGTERM)
}

View file

@ -1,14 +0,0 @@
//go:build windows
package artifactcache
import "syscall"
func suicide() error {
handle, err := syscall.GetCurrentProcess()
if err != nil {
return err
}
return syscall.TerminateProcess(handle, uint32(syscall.SIGTERM))
}

View file

@ -37,8 +37,7 @@ type Handler struct {
outboundIP string
cacheServerHost string
cacheProxyHostOverride string
cacheServerHost string
cacheSecret string
@ -56,7 +55,7 @@ func (h *Handler) CreateRunData(fullName, runNumber, timestamp, writeIsolationKe
}
}
func StartHandler(targetHost, outboundIP string, port uint16, cacheProxyHostOverride, cacheSecret string, logger logrus.FieldLogger) (*Handler, error) {
func StartHandler(targetHost, outboundIP string, port uint16, cacheSecret string, logger logrus.FieldLogger) (*Handler, error) {
h := &Handler{}
if logger == nil {
@ -64,7 +63,7 @@ func StartHandler(targetHost, outboundIP string, port uint16, cacheProxyHostOver
discard.Out = io.Discard
logger = discard
}
logger = logger.WithField("module", "cacheproxy")
logger = logger.WithField("module", "artifactcache")
h.logger = logger
h.cacheSecret = cacheSecret
@ -78,11 +77,10 @@ func StartHandler(targetHost, outboundIP string, port uint16, cacheProxyHostOver
}
h.cacheServerHost = targetHost
h.cacheProxyHostOverride = cacheProxyHostOverride
proxy, err := h.newReverseProxy(targetHost)
if err != nil {
return nil, fmt.Errorf("unable to set up proxy to target host: %v", err)
return nil, fmt.Errorf("unable to set up proxy to target host")
}
router := httprouter.New()
@ -139,7 +137,6 @@ func (h *Handler) newReverseProxy(targetHost string) (*httputil.ReverseProxy, er
r.SetURL(targetURL)
r.Out.URL.Path = uri
h.logger.Debugf("proxy req %s %q to %q", r.In.Method, r.In.URL, r.Out.URL)
r.Out.Header.Set("Forgejo-Cache-Repo", runData.RepositoryFullName)
r.Out.Header.Set("Forgejo-Cache-RunNumber", runData.RunNumber)
@ -151,18 +148,12 @@ func (h *Handler) newReverseProxy(targetHost string) (*httputil.ReverseProxy, er
r.Out.Header.Set("Forgejo-Cache-WriteIsolationKey", runData.WriteIsolationKey)
}
},
ModifyResponse: func(r *http.Response) error {
h.logger.Debugf("proxy resp %s w/ %d bytes", r.Status, r.ContentLength)
return nil
},
}
return proxy, nil
}
func (h *Handler) ExternalURL() string {
if h.cacheProxyHostOverride != "" {
return h.cacheProxyHostOverride
}
// TODO: make the external url configurable if necessary
return fmt.Sprintf("http://%s", net.JoinHostPort(h.outboundIP, strconv.Itoa(h.listener.Addr().(*net.TCPAddr).Port)))
}

View file

@ -305,7 +305,7 @@ func gitOptions(token string) (fetchOptions git.FetchOptions, pullOptions git.Pu
func NewGitCloneExecutor(input NewGitCloneExecutorInput) common.Executor {
return func(ctx context.Context) error {
logger := common.Logger(ctx)
logger.Infof(" \u2601\ufe0f git clone '%s' # ref=%s", input.URL, input.Ref)
logger.Infof(" \u2601 git clone '%s' # ref=%s", input.URL, input.Ref)
logger.Debugf(" cloning %s to %s", input.URL, input.Dir)
cloneLock.Lock()

View file

@ -483,14 +483,6 @@ func (cr *containerReference) mergeJobOptions(ctx context.Context, config *conta
}
}
if jobConfig.HostConfig.Memory > 0 {
logger.Debugf("--memory %v", jobConfig.HostConfig.Memory)
if hostConfig.Memory > 0 && jobConfig.HostConfig.Memory > hostConfig.Memory {
return nil, nil, fmt.Errorf("the --memory %v option found in the workflow cannot be greater than the --memory %v option from the runner configuration file", jobConfig.HostConfig.Memory, hostConfig.Memory)
}
hostConfig.Memory = jobConfig.HostConfig.Memory
}
if len(jobConfig.Config.Hostname) > 0 {
logger.Debugf("--hostname %v", jobConfig.Config.Hostname)
config.Hostname = jobConfig.Config.Hostname

View file

@ -34,6 +34,7 @@ type HostEnvironment struct {
Workdir string
ActPath string
Root string
CleanUp func()
StdOut io.Writer
LXC bool
}
@ -388,7 +389,7 @@ func (e *HostEnvironment) ExecWithCmdLine(command []string, cmdline string, env
if err := e.exec(ctx, command, cmdline, env, user, workdir); err != nil {
select {
case <-ctx.Done():
return fmt.Errorf("this step has been cancelled: ctx: %w, exec: %w", ctx.Err(), err)
return fmt.Errorf("this step has been cancelled: %w", err)
default:
return err
}
@ -403,12 +404,11 @@ func (e *HostEnvironment) UpdateFromEnv(srcPath string, env *map[string]string)
func (e *HostEnvironment) Remove() common.Executor {
return func(ctx context.Context) error {
if e.GetLXC() {
// there may be files owned by root: removal
// is the responsibility of the LXC backend
return nil
if e.CleanUp != nil {
e.CleanUp()
}
return os.RemoveAll(e.Root)
common.Logger(ctx).Debugf("HostEnvironment.Remove %s", e.Path)
return os.RemoveAll(e.Path)
}
}

View file

@ -76,36 +76,6 @@ func NewInterpeter(
return exprparser.NewInterpeter(ee, config)
}
// Returns an interpeter used in the server in the context of workflow-level templates. Needs github, inputs, and vars
// context only.
func NewWorkflowInterpeter(
gitCtx *model.GithubContext,
vars map[string]string,
inputs map[string]any,
) exprparser.Interpreter {
ee := &exprparser.EvaluationEnvironment{
Github: gitCtx,
Env: nil, // no need
Job: nil, // no need
Steps: nil, // no need
Runner: nil, // no need
Secrets: nil, // no need
Strategy: nil, // no need
Matrix: nil, // no need
Needs: nil, // no need
Inputs: inputs,
Vars: vars,
}
config := exprparser.Config{
Run: nil,
WorkingDir: "", // WorkingDir is used for the function hashFiles, but it's not needed in the server
Context: "workflow",
}
return exprparser.NewInterpeter(ee, config)
}
// JobResult is the minimum requirement of job results for Interpeter
type JobResult struct {
Needs []string

View file

@ -1,8 +1,8 @@
package jobparser
import (
"bytes"
"fmt"
"strings"
"code.forgejo.org/forgejo/runner/v11/act/model"
"go.yaml.in/yaml/v3"
@ -193,32 +193,83 @@ func (evt *Event) Schedules() []map[string]string {
return evt.schedules
}
// Convert the raw YAML from the `concurrency` block on a workflow into the evaluated concurrency group and
// cancel-in-progress value. This implementation only supports workflow-level concurrency definition, where we expect
// expressions to be able to access only the github, inputs and vars contexts. If RawConcurrency is empty, then the
// returned concurrency group will be "" and cancel-in-progress will be nil -- this can be used to distinguish from an
// explicit cancel-in-progress choice even if a group isn't specified.
func EvaluateWorkflowConcurrency(rc *model.RawConcurrency, gitCtx *model.GithubContext, vars map[string]string, inputs map[string]any) (string, *bool, error) {
evaluator := NewExpressionEvaluator(NewWorkflowInterpeter(gitCtx, vars, inputs))
func ReadWorkflowRawConcurrency(content []byte) (*model.RawConcurrency, error) {
w := new(model.Workflow)
err := yaml.NewDecoder(bytes.NewReader(content)).Decode(w)
return w.RawConcurrency, err
}
func EvaluateConcurrency(rc *model.RawConcurrency, jobID string, job *Job, gitCtx map[string]any, results map[string]*JobResult, vars map[string]string, inputs map[string]any) (string, bool, error) {
actJob := &model.Job{}
if job != nil {
actJob.Strategy = &model.Strategy{
FailFastString: job.Strategy.FailFastString,
MaxParallelString: job.Strategy.MaxParallelString,
RawMatrix: job.Strategy.RawMatrix,
}
actJob.Strategy.FailFast = actJob.Strategy.GetFailFast()
actJob.Strategy.MaxParallel = actJob.Strategy.GetMaxParallel()
}
matrix := make(map[string]any)
matrixes, err := actJob.GetMatrixes()
if err != nil {
return "", false, err
}
if len(matrixes) > 0 {
matrix = matrixes[0]
}
evaluator := NewExpressionEvaluator(NewInterpeter(jobID, actJob, matrix, toGitContext(gitCtx), results, vars, inputs))
var node yaml.Node
if err := node.Encode(rc); err != nil {
return "", nil, fmt.Errorf("failed to encode concurrency: %w", err)
return "", false, fmt.Errorf("failed to encode concurrency: %w", err)
}
if err := evaluator.EvaluateYamlNode(&node); err != nil {
return "", nil, fmt.Errorf("failed to evaluate concurrency: %w", err)
return "", false, fmt.Errorf("failed to evaluate concurrency: %w", err)
}
var evaluated model.RawConcurrency
if err := node.Decode(&evaluated); err != nil {
return "", nil, fmt.Errorf("failed to unmarshal evaluated concurrency: %w", err)
return "", false, fmt.Errorf("failed to unmarshal evaluated concurrency: %w", err)
}
if evaluated.RawExpression != "" {
return evaluated.RawExpression, nil, nil
return evaluated.RawExpression, false, nil
}
if evaluated.CancelInProgress == "" {
return evaluated.Group, nil, nil
return evaluated.Group, evaluated.CancelInProgress == "true", nil
}
func toGitContext(input map[string]any) *model.GithubContext {
gitContext := &model.GithubContext{
EventPath: asString(input["event_path"]),
Workflow: asString(input["workflow"]),
RunID: asString(input["run_id"]),
RunNumber: asString(input["run_number"]),
Actor: asString(input["actor"]),
Repository: asString(input["repository"]),
EventName: asString(input["event_name"]),
Sha: asString(input["sha"]),
Ref: asString(input["ref"]),
RefName: asString(input["ref_name"]),
RefType: asString(input["ref_type"]),
HeadRef: asString(input["head_ref"]),
BaseRef: asString(input["base_ref"]),
Token: asString(input["token"]),
Workspace: asString(input["workspace"]),
Action: asString(input["action"]),
ActionPath: asString(input["action_path"]),
ActionRef: asString(input["action_ref"]),
ActionRepository: asString(input["action_repository"]),
Job: asString(input["job"]),
RepositoryOwner: asString(input["repository_owner"]),
RetentionDays: asString(input["retention_days"]),
}
cancelInProgress := evaluated.CancelInProgress == "true"
return evaluated.Group, &cancelInProgress, nil
event, ok := input["event"].(map[string]any)
if ok {
gitContext.Event = event
}
return gitContext
}
func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
@ -227,7 +278,7 @@ func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
var val string
err := rawOn.Decode(&val)
if err != nil {
return nil, fmt.Errorf("unable to interpret scalar value into a string: %w", err)
return nil, err
}
return []*Event{
{Name: val},
@ -239,12 +290,12 @@ func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
return nil, err
}
res := make([]*Event, 0, len(val))
for i, v := range val {
for _, v := range val {
switch t := v.(type) {
case string:
res = append(res, &Event{Name: t})
default:
return nil, fmt.Errorf("value at index %d was unexpected type %[2]T; must be a string but was %#[2]v", i, v)
return nil, fmt.Errorf("invalid type %T", t)
}
}
return res, nil
@ -264,6 +315,16 @@ func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
continue
}
switch t := v.(type) {
case string:
res = append(res, &Event{
Name: k,
acts: map[string][]string{},
})
case []string:
res = append(res, &Event{
Name: k,
acts: map[string][]string{},
})
case map[string]any:
acts := make(map[string][]string, len(t))
for act, branches := range t {
@ -277,15 +338,15 @@ func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
for i, v := range b {
var ok bool
if acts[act][i], ok = v.(string); !ok {
return nil, fmt.Errorf("key %q.%q index %d had unexpected type %[4]T; a string was expected but was %#[4]v", k, act, i, v)
return nil, fmt.Errorf("unknown on type: %#v", branches)
}
}
case map[string]any:
if err := isInvalidOnType(k, act); err != nil {
return nil, fmt.Errorf("invalid value on key %q: %w", k, err)
if isInvalidOnType(k, act) {
return nil, fmt.Errorf("unknown on type: %#v", v)
}
default:
return nil, fmt.Errorf("key %q.%q had unexpected type %T; was %#v", k, act, branches, branches)
return nil, fmt.Errorf("unknown on type: %#v", branches)
}
}
if k == "workflow_dispatch" || k == "workflow_call" {
@ -297,22 +358,19 @@ func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
})
case []any:
if k != "schedule" {
return nil, fmt.Errorf("key %q had an type %T; only the 'schedule' key is expected with this type", k, v)
return nil, fmt.Errorf("unknown on type: %#v", v)
}
schedules := make([]map[string]string, len(t))
for i, tt := range t {
vv, ok := tt.(map[string]any)
if !ok {
return nil, fmt.Errorf("key %q[%d] had unexpected type %[3]T; a map with a key \"cron\" was expected, but value was %#[3]v", k, i, tt)
return nil, fmt.Errorf("unknown on type: %#v", v)
}
schedules[i] = make(map[string]string, len(vv))
for kk, vvv := range vv {
if strings.ToLower(kk) != "cron" {
return nil, fmt.Errorf("key %q[%d] had unexpected key %q; \"cron\" was expected", k, i, kk)
}
for k, vvv := range vv {
var ok bool
if schedules[i][kk], ok = vvv.(string); !ok {
return nil, fmt.Errorf("key %q[%d].%q had unexpected type %[4]T; a string was expected by was %#[4]v", k, i, kk, vvv)
if schedules[i][k], ok = vvv.(string); !ok {
return nil, fmt.Errorf("unknown on type: %#v", v)
}
}
}
@ -321,29 +379,23 @@ func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
schedules: schedules,
})
default:
return nil, fmt.Errorf("key %q had unexpected type %[2]T; expected a map or array but was %#[2]v", k, v)
return nil, fmt.Errorf("unknown on type: %#v", v)
}
}
return res, nil
default:
return nil, fmt.Errorf("unexpected yaml node in `on`: %v", rawOn.Kind)
return nil, fmt.Errorf("unknown on type: %v", rawOn.Kind)
}
}
func isInvalidOnType(onType, subKey string) error {
if onType == "workflow_dispatch" {
if subKey == "inputs" {
return nil
}
return fmt.Errorf("workflow_dispatch only supports key \"inputs\", but key %q was found", subKey)
func isInvalidOnType(onType, subKey string) bool {
if onType == "workflow_dispatch" && subKey == "inputs" {
return false
}
if onType == "workflow_call" {
if subKey == "inputs" || subKey == "outputs" {
return nil
}
return fmt.Errorf("workflow_call only supports keys \"inputs\" and \"outputs\", but key %q was found", subKey)
if onType == "workflow_call" && (subKey == "inputs" || subKey == "outputs") {
return false
}
return fmt.Errorf("unexpected key %q.%q", onType, subKey)
return true
}
// parseMappingNode parse a mapping node and preserve order.
@ -378,3 +430,12 @@ func parseMappingNode[T any](node *yaml.Node) ([]string, []T, error) {
return scalars, datas, nil
}
func asString(v any) string {
if v == nil {
return ""
} else if s, ok := v.(string); ok {
return s
}
return ""
}

View file

@ -16,7 +16,6 @@ func TestParseRawOn(t *testing.T) {
kases := []struct {
input string
result []*Event
err string
}{
{
input: "on: issue_comment",
@ -34,10 +33,7 @@ func TestParseRawOn(t *testing.T) {
},
},
},
{
input: "on: [123]",
err: "value at index 0 was unexpected type int; must be a string but was 123",
},
{
input: "on:\n - push\n - pull_request",
result: []*Event{
@ -49,19 +45,6 @@ func TestParseRawOn(t *testing.T) {
},
},
},
{
input: "on: { push: null }",
result: []*Event{
{
Name: "push",
acts: map[string][]string{},
},
},
},
{
input: "on: { push: 'abc' }",
err: "key \"push\" had unexpected type string; expected a map or array but was \"abc\"",
},
{
input: "on:\n push:\n branches:\n - master",
result: []*Event{
@ -89,10 +72,6 @@ func TestParseRawOn(t *testing.T) {
},
},
},
{
input: "on:\n branch_protection_rule:\n types: [123, deleted]",
err: "key \"branch_protection_rule\".\"types\" index 0 had unexpected type int; a string was expected but was 123",
},
{
input: "on:\n project:\n types: [created, deleted]\n milestone:\n types: [opened, deleted]",
result: []*Event{
@ -210,22 +189,6 @@ func TestParseRawOn(t *testing.T) {
},
},
},
{
input: "on:\n schedule2:\n - cron: '20 6 * * *'",
err: "key \"schedule2\" had an type []interface {}; only the 'schedule' key is expected with this type",
},
{
input: "on:\n schedule:\n - 123",
err: "key \"schedule\"[0] had unexpected type int; a map with a key \"cron\" was expected, but value was 123",
},
{
input: "on:\n schedule:\n - corn: '20 6 * * *'",
err: "key \"schedule\"[0] had unexpected key \"corn\"; \"cron\" was expected",
},
{
input: "on:\n schedule:\n - cron: 123",
err: "key \"schedule\"[0].\"cron\" had unexpected type int; a string was expected by was 123",
},
{
input: `
on:
@ -259,37 +222,15 @@ on:
},
},
},
{
input: `
on:
workflow_call:
mistake:
access-token:
description: 'A token passed from the caller workflow'
required: false
`,
err: "invalid value on key \"workflow_call\": workflow_call only supports keys \"inputs\" and \"outputs\", but key \"mistake\" was found",
},
{
input: `
on:
workflow_call: { map: 123 }
`,
err: "key \"workflow_call\".\"map\" had unexpected type int; was 123",
},
}
for _, kase := range kases {
t.Run(kase.input, func(t *testing.T) {
origin, err := model.ReadWorkflow(strings.NewReader(kase.input), false)
require.NoError(t, err)
assert.NoError(t, err)
events, err := ParseRawOn(&origin.RawOn)
if kase.err != "" {
assert.ErrorContains(t, err, kase.err)
} else {
assert.NoError(t, err)
assert.EqualValues(t, kase.result, events, fmt.Sprintf("%#v", events))
}
assert.NoError(t, err)
assert.EqualValues(t, kase.result, events, fmt.Sprintf("%#v", events))
})
}
}
@ -401,11 +342,10 @@ func TestParseMappingNode(t *testing.T) {
func TestEvaluateConcurrency(t *testing.T) {
tests := []struct {
name string
input model.RawConcurrency
group string
cancelInProgressNil bool
cancelInProgress bool
name string
input model.RawConcurrency
group string
cancelInProgress bool
}{
{
name: "basic",
@ -417,18 +357,18 @@ func TestEvaluateConcurrency(t *testing.T) {
cancelInProgress: true,
},
{
name: "undefined",
input: model.RawConcurrency{},
group: "",
cancelInProgressNil: true,
name: "undefined",
input: model.RawConcurrency{},
group: "",
cancelInProgress: false,
},
{
name: "group-evaluation",
input: model.RawConcurrency{
Group: "${{ github.workflow }}-${{ github.ref }}",
},
group: "test_workflow-main",
cancelInProgressNil: true,
group: "test_workflow-main",
cancelInProgress: false,
},
{
name: "cancel-evaluation-true",
@ -453,44 +393,37 @@ func TestEvaluateConcurrency(t *testing.T) {
input: model.RawConcurrency{
Group: "user-${{ github.event.commits[0].author.username }}",
},
group: "user-someone",
cancelInProgressNil: true,
group: "user-someone",
cancelInProgress: false,
},
{
name: "arbitrary-var",
input: model.RawConcurrency{
Group: "${{ vars.eval_arbitrary_var }}",
},
group: "123",
cancelInProgressNil: true,
group: "123",
cancelInProgress: false,
},
{
name: "arbitrary-input",
input: model.RawConcurrency{
Group: "${{ inputs.eval_arbitrary_input }}",
},
group: "456",
cancelInProgressNil: true,
},
{
name: "cancel-in-progress-only",
input: model.RawConcurrency{
CancelInProgress: "true",
},
group: "",
cancelInProgress: true,
group: "456",
cancelInProgress: false,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
group, cancelInProgress, err := EvaluateWorkflowConcurrency(
group, cancelInProgress, err := EvaluateConcurrency(
&test.input,
// gitCtx
&model.GithubContext{
Workflow: "test_workflow",
Ref: "main",
Event: map[string]any{
"job-id",
nil, // job
map[string]any{
"workflow": "test_workflow",
"ref": "main",
"event": map[string]any{
"commits": []any{
map[string]any{
"author": map[string]any{
@ -504,24 +437,20 @@ func TestEvaluateConcurrency(t *testing.T) {
},
},
},
},
// vars
}, // gitCtx
map[string]*JobResult{
"job-id": {},
}, // results
map[string]string{
"eval_arbitrary_var": "123",
},
// inputs
}, // vars
map[string]any{
"eval_arbitrary_input": "456",
},
}, // inputs
)
assert.NoError(t, err)
assert.EqualValues(t, test.group, group)
if test.cancelInProgressNil {
assert.Nil(t, cancelInProgress)
} else {
require.NotNil(t, cancelInProgress)
assert.EqualValues(t, test.cancelInProgress, *cancelInProgress)
}
assert.EqualValues(t, test.cancelInProgress, cancelInProgress)
})
}
}

View file

@ -479,6 +479,8 @@ func rewriteSubExpression(ctx context.Context, in string, forceFormat bool) (str
func getEvaluatorInputs(ctx context.Context, rc *RunContext, step step, ghc *model.GithubContext) map[string]any {
inputs := map[string]any{}
setupWorkflowInputs(ctx, &inputs, rc)
var env map[string]string
if step != nil {
env = *step.getEnv()
@ -492,8 +494,6 @@ func getEvaluatorInputs(ctx context.Context, rc *RunContext, step step, ghc *mod
}
}
setupWorkflowInputs(ctx, &inputs, rc)
if rc.caller == nil && ghc.EventName == "workflow_dispatch" {
config := rc.Run.Workflow.WorkflowDispatchConfig()
if config != nil && config.Inputs != nil {

View file

@ -180,35 +180,31 @@ func setJobResult(ctx context.Context, info jobInfo, rc *RunContext, success boo
jobResult = "failure"
}
// Set local result on current job (child or parent)
info.result(jobResult)
if rc.caller != nil {
// Child reusable workflow:
// 1) propagate result to parent job state
// set reusable workflow job result
rc.caller.runContext.result(jobResult)
// 2) copy workflow_call outputs from child to parent (as in upstream)
jobOutputs := make(map[string]string)
ee := rc.NewExpressionEvaluator(ctx)
if wfcc := rc.Run.Workflow.WorkflowCallConfig(); wfcc != nil {
for k, v := range wfcc.Outputs {
jobOutputs[k] = ee.Interpolate(ctx, ee.Interpolate(ctx, v.Value))
}
}
rc.caller.runContext.Run.Job().Outputs = jobOutputs
// 3) DO NOT print banner in child job (prevents premature token revocation)
logger.Debugf("Reusable job result=%s (parent will finalize, no banner)", jobResult)
return
}
// Parent job: print the final banner ONCE (job-level)
jobResultMessage := "succeeded"
if jobResult != "success" {
jobResultMessage = "failed"
}
jobOutputs := rc.Run.Job().Outputs
if rc.caller != nil {
// Rewrite the job's outputs into the workflow_call outputs...
jobOutputs = make(map[string]string)
ee := rc.NewExpressionEvaluator(ctx)
for k, v := range rc.Run.Workflow.WorkflowCallConfig().Outputs {
jobOutputs[k] = ee.Interpolate(ctx, ee.Interpolate(ctx, v.Value))
}
// When running as a daemon and receiving jobs from Forgejo, the next job (and any of it's `needs` outputs) will
// be provided by Forgejo based upon the data sent to the logger below. However, when running `forgejo-runner
// exec` with a reusable workflow, the next job will only be able to read outputs if those outputs are stored on
// the workflow -- that's what is accomplished here:
rc.caller.runContext.Run.Job().Outputs = jobOutputs
}
logger.
WithFields(logrus.Fields{

View file

@ -444,76 +444,3 @@ func TestSetJobResultConcurrency(t *testing.T) {
assert.Equal(t, "failure", lastResult)
}
func TestSetJobResult_SkipsBannerInChildReusableWorkflow(t *testing.T) {
// Test that child reusable workflow does not print final banner
// to prevent premature token revocation
mockLogger := mocks.NewFieldLogger(t)
// Allow all variants of Debugf (git operations can call with 1-3 args)
mockLogger.On("Debugf", mock.Anything).Return(0).Maybe()
mockLogger.On("Debugf", mock.Anything, mock.Anything).Return(0).Maybe()
mockLogger.On("Debugf", mock.Anything, mock.Anything, mock.Anything).Return(0).Maybe()
// CRITICAL: In CI, git ref detection may fail and call Warningf
mockLogger.On("Warningf", mock.Anything, mock.Anything).Return(0).Maybe()
mockLogger.On("WithField", mock.Anything, mock.Anything).Return(&logrus.Entry{Logger: &logrus.Logger{}}).Maybe()
mockLogger.On("WithFields", mock.Anything).Return(&logrus.Entry{Logger: &logrus.Logger{}}).Maybe()
ctx := common.WithLogger(common.WithJobErrorContainer(t.Context()), mockLogger)
// Setup parent job
parentJob := &model.Job{
Result: "success",
}
parentRC := &RunContext{
Config: &Config{Env: map[string]string{}}, // Must have Config
Run: &model.Run{
JobID: "parent",
Workflow: &model.Workflow{
Jobs: map[string]*model.Job{
"parent": parentJob,
},
},
},
}
// Setup child job with caller reference
childJob := &model.Job{
Result: "success",
}
childRC := &RunContext{
Config: &Config{Env: map[string]string{}}, // Must have Config
Run: &model.Run{
JobID: "child",
Workflow: &model.Workflow{
Jobs: map[string]*model.Job{
"child": childJob,
},
},
},
caller: &caller{
runContext: parentRC,
},
}
jim := &jobInfoMock{}
jim.On("matrix").Return(map[string]any{}) // REQUIRED: setJobResult always calls matrix()
jim.On("result", "success")
// Call setJobResult for child workflow
setJobResult(ctx, jim, childRC, true)
// Verify:
// 1. Child result is set
jim.AssertCalled(t, "result", "success")
// 2. Parent result is propagated
assert.Equal(t, "success", parentJob.Result)
// 3. Final banner was NOT printed by child (critical for token security)
mockLogger.AssertNotCalled(t, "WithFields", mock.MatchedBy(func(fields logrus.Fields) bool {
_, okJobResult := fields["jobResult"]
_, okJobOutput := fields["jobOutputs"]
return okJobOutput && okJobResult
}))
}

View file

@ -146,26 +146,6 @@ func WithCompositeStepLogger(ctx context.Context, stepID string) context.Context
}).WithContext(ctx))
}
func GetOuterStepResult(entry *logrus.Entry) any {
r, ok := entry.Data["stepResult"]
if !ok {
return nil
}
// composite actions steps log with a list of stepID
if s, ok := entry.Data["stepID"]; ok {
if stepIDs, ok := s.([]string); ok {
if len(stepIDs) > 1 {
return nil
}
}
} else {
return nil
}
return r
}
func withStepLogger(ctx context.Context, stepNumber int, stepID, stepName, stageName string) context.Context {
rtn := common.Logger(ctx).WithFields(logrus.Fields{
"stepNumber": stepNumber,

View file

@ -1,63 +0,0 @@
package runner
import (
"testing"
"code.forgejo.org/forgejo/runner/v11/act/common"
"github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestRunner_GetOuterStepResult(t *testing.T) {
nullLogger, hook := test.NewNullLogger()
ctx := common.WithLogger(t.Context(), nullLogger)
t.Run("no stepResult", func(t *testing.T) {
hook.Reset()
common.Logger(ctx).Info("✅ Success")
entry := hook.LastEntry()
require.NotNil(t, entry)
assert.Nil(t, GetOuterStepResult(entry))
})
t.Run("stepResult and no stepID", func(t *testing.T) {
hook.Reset()
common.Logger(ctx).WithField("stepResult", "success").Info("✅ Success")
entry := hook.LastEntry()
require.NotNil(t, entry)
assert.Nil(t, GetOuterStepResult(entry))
})
stepNumber := 123
stepID := "step id"
stepName := "readable name"
stageName := "Main"
ctx = withStepLogger(ctx, stepNumber, stepID, stepName, stageName)
t.Run("stepResult and stepID", func(t *testing.T) {
hook.Reset()
common.Logger(ctx).WithField("stepResult", "success").Info("✅ Success")
entry := hook.LastEntry()
actualStepIDs, ok := entry.Data["stepID"]
require.True(t, ok)
require.Equal(t, []string{stepID}, actualStepIDs)
require.NotNil(t, entry)
assert.Equal(t, "success", GetOuterStepResult(entry))
})
compositeStepID := "composite step id"
ctx = WithCompositeStepLogger(ctx, compositeStepID)
t.Run("stepResult and composite stepID", func(t *testing.T) {
hook.Reset()
common.Logger(ctx).WithField("stepResult", "success").Info("✅ Success")
entry := hook.LastEntry()
actualStepIDs, ok := entry.Data["stepID"]
require.True(t, ok)
require.Equal(t, []string{stepID, compositeStepID}, actualStepIDs)
require.NotNil(t, entry)
assert.Nil(t, GetOuterStepResult(entry))
})
}

View file

@ -11,8 +11,6 @@ LXC_IPV6_PREFIX_DEFAULT="fd15"
LXC_DOCKER_PREFIX_DEFAULT="172.17"
LXC_IPV6_DOCKER_PREFIX_DEFAULT="fd00:d0ca"
LXC_APT_TOO_OLD='1 week ago'
: ${LXC_TRANSACTION_TIMEOUT:=600}
LXC_TRANSACTION_LOCK_FILE=/tmp/lxc-helper.lock
: ${LXC_SUDO:=}
: ${LXC_CONTAINER_RELEASE:=bookworm}
@ -30,22 +28,16 @@ function lxc_template_release() {
echo lxc-helpers-$LXC_CONTAINER_RELEASE
}
function lxc_directory() {
local name="$1"
echo /var/lib/lxc/$name
}
function lxc_root() {
local name="$1"
echo $(lxc_directory $name)/rootfs
echo /var/lib/lxc/$name/rootfs
}
function lxc_config() {
local name="$1"
echo $(lxc_directory $name)/config
echo /var/lib/lxc/$name/config
}
function lxc_container_run() {
@ -55,42 +47,6 @@ function lxc_container_run() {
$LXC_SUDO lxc-attach --clear-env --name $name -- "$@"
}
function lxc_transaction_lock() {
exec 7>$LXC_TRANSACTION_LOCK_FILE
flock --timeout $LXC_TRANSACTION_TIMEOUT 7
}
function lxc_transaction_unlock() {
exec 7>&-
}
function lxc_transaction_draft_name() {
echo "lxc-helper-draft"
}
function lxc_transaction_begin() {
local name=$1 # not actually used but it helps when reading in the caller
local draft=$(lxc_transaction_draft_name)
lxc_transaction_lock
lxc_container_destroy $draft
}
function lxc_transaction_commit() {
local name=$1
local draft=$(lxc_transaction_draft_name)
# do not use lxc-copy because it is not atomic if lxc-copy is
# interrupted it may leave the $name container half populated
$LXC_SUDO sed -i -e "s/$draft/$name/g" \
$(lxc_config $draft) \
$(lxc_root $draft)/etc/hosts \
$(lxc_root $draft)/etc/hostname
$LXC_SUDO rm -f $(lxc_root $draft)/var/lib/dhcp/dhclient.*
$LXC_SUDO mv $(lxc_directory $draft) $(lxc_directory $name)
lxc_transaction_unlock
}
function lxc_container_run_script_as() {
local name="$1"
local user="$2"
@ -286,7 +242,7 @@ function lxc_container_configure() {
function lxc_container_install_lxc_helpers() {
local name="$1"
$LXC_SUDO cp -a $LXC_SELF_DIR/lxc-helpers*.sh $(lxc_root $name)/$LXC_BIN
$LXC_SUDO cp -a $LXC_SELF_DIR/lxc-helpers*.sh $root/$LXC_BIN
#
# Wait for the network to come up
#
@ -348,9 +304,10 @@ function lxc_container_stop() {
function lxc_container_destroy() {
local name="$1"
local root="$2"
if lxc_exists "$name"; then
lxc_container_stop $name
lxc_container_stop $name $root
$LXC_SUDO lxc-destroy --force --name="$name"
fi
}
@ -385,44 +342,36 @@ function lxc_running() {
function lxc_build_template_release() {
local name="$(lxc_template_release)"
lxc_transaction_begin $name
if lxc_exists_and_apt_not_old $name; then
lxc_transaction_unlock
return
fi
local draft=$(lxc_transaction_draft_name)
$LXC_SUDO lxc-create --name $draft --template debian -- --release=$LXC_CONTAINER_RELEASE
echo 'lxc.apparmor.profile = unconfined' | $LXC_SUDO tee -a $(lxc_config $draft)
lxc_container_install_lxc_helpers $draft
lxc_container_start $draft
lxc_container_run $draft apt-get update -qq
lxc_apt_install $draft sudo git python3
lxc_container_stop $draft
lxc_transaction_commit $name
local root=$(lxc_root $name)
$LXC_SUDO lxc-create --name $name --template debian -- --release=$LXC_CONTAINER_RELEASE
echo 'lxc.apparmor.profile = unconfined' | $LXC_SUDO tee -a $(lxc_config $name)
lxc_container_install_lxc_helpers $name
lxc_container_start $name
lxc_container_run $name apt-get update -qq
lxc_apt_install $name sudo git python3
lxc_container_stop $name
}
function lxc_build_template() {
local name="$1"
local newname="$2"
if lxc_exists_and_apt_not_old $newname; then
return
fi
if test "$name" = "$(lxc_template_release)"; then
lxc_build_template_release
fi
lxc_transaction_begin $name
if lxc_exists_and_apt_not_old $newname; then
lxc_transaction_unlock
return
fi
local draft=$(lxc_transaction_draft_name)
if ! $LXC_SUDO lxc-copy --name=$name --newname=$draft; then
echo lxc-copy --name=$name --newname=$draft failed
if ! $LXC_SUDO lxc-copy --name=$name --newname=$newname; then
echo lxc-copy --name=$name --newname=$newname failed
return 1
fi
lxc_transaction_commit $newname
lxc_container_configure $newname
}

View file

@ -16,7 +16,6 @@ import (
"code.forgejo.org/forgejo/runner/v11/act/common"
"code.forgejo.org/forgejo/runner/v11/act/common/git"
"code.forgejo.org/forgejo/runner/v11/act/model"
"github.com/sirupsen/logrus"
)
func newLocalReusableWorkflowExecutor(rc *RunContext) common.Executor {
@ -116,10 +115,7 @@ func newActionCacheReusableWorkflowExecutor(rc *RunContext, filename string, rem
return err
}
planErr := runner.NewPlanExecutor(plan)(ctx)
// Finalize from parent context: one job-level banner
return finalizeReusableWorkflow(ctx, rc, planErr)
return runner.NewPlanExecutor(plan)(ctx)
}
}
@ -175,10 +171,7 @@ func newReusableWorkflowExecutor(rc *RunContext, directory, workflow string) com
return err
}
planErr := runner.NewPlanExecutor(plan)(ctx)
// Finalize from parent context: one job-level banner
return finalizeReusableWorkflow(ctx, rc, planErr)
return runner.NewPlanExecutor(plan)(ctx)
}
}
@ -236,29 +229,3 @@ func newRemoteReusableWorkflowWithPlat(url, uses string) *remoteReusableWorkflow
URL: url,
}
}
// finalizeReusableWorkflow prints the final job banner from the parent job context.
//
// The Forgejo reporter waits for this banner (log entry with "jobResult"
// field and without stage="Main") before marking the job as complete and revoking
// tokens. Printing this banner from the child reusable workflow would cause
// premature token revocation, breaking subsequent steps in the parent workflow.
func finalizeReusableWorkflow(ctx context.Context, rc *RunContext, planErr error) error {
jobResult := "success"
jobResultMessage := "succeeded"
if planErr != nil {
jobResult = "failure"
jobResultMessage = "failed"
}
// Outputs should already be present in the parent context:
// - copied by child's setJobResult branch (rc.caller != nil)
jobOutputs := rc.Run.Job().Outputs
common.Logger(ctx).WithFields(logrus.Fields{
"jobResult": jobResult,
"jobOutputs": jobOutputs,
}).Infof("\U0001F3C1 Job %s", jobResultMessage)
return planErr
}

View file

@ -1,247 +0,0 @@
package runner
import (
"errors"
"testing"
"code.forgejo.org/forgejo/runner/v11/act/common"
"code.forgejo.org/forgejo/runner/v11/act/model"
"code.forgejo.org/forgejo/runner/v11/act/runner/mocks"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
)
func TestConfig_GetToken(t *testing.T) {
t.Run("returns GITEA_TOKEN when both GITEA_TOKEN and GITHUB_TOKEN present", func(t *testing.T) {
c := &Config{
Secrets: map[string]string{
"GITHUB_TOKEN": "github-token",
"GITEA_TOKEN": "gitea-token",
},
}
assert.Equal(t, "gitea-token", c.GetToken())
})
t.Run("returns GITHUB_TOKEN when only GITHUB_TOKEN present", func(t *testing.T) {
c := &Config{
Secrets: map[string]string{
"GITHUB_TOKEN": "github-token",
},
}
assert.Equal(t, "github-token", c.GetToken())
})
t.Run("returns empty string when no tokens present", func(t *testing.T) {
c := &Config{
Secrets: map[string]string{},
}
assert.Equal(t, "", c.GetToken())
})
t.Run("returns empty string when Secrets is nil", func(t *testing.T) {
c := &Config{}
assert.Equal(t, "", c.GetToken())
})
}
func TestRemoteReusableWorkflow_CloneURL(t *testing.T) {
t.Run("adds https prefix when missing", func(t *testing.T) {
rw := &remoteReusableWorkflow{
URL: "code.forgejo.org",
Org: "owner",
Repo: "repo",
}
assert.Equal(t, "https://code.forgejo.org/owner/repo", rw.CloneURL())
})
t.Run("preserves https prefix", func(t *testing.T) {
rw := &remoteReusableWorkflow{
URL: "https://code.forgejo.org",
Org: "owner",
Repo: "repo",
}
assert.Equal(t, "https://code.forgejo.org/owner/repo", rw.CloneURL())
})
t.Run("preserves http prefix", func(t *testing.T) {
rw := &remoteReusableWorkflow{
URL: "http://localhost:3000",
Org: "owner",
Repo: "repo",
}
assert.Equal(t, "http://localhost:3000/owner/repo", rw.CloneURL())
})
}
func TestRemoteReusableWorkflow_FilePath(t *testing.T) {
tests := []struct {
name string
gitPlatform string
filename string
expectedPath string
}{
{
name: "github platform",
gitPlatform: "github",
filename: "test.yml",
expectedPath: "./.github/workflows/test.yml",
},
{
name: "gitea platform",
gitPlatform: "gitea",
filename: "build.yaml",
expectedPath: "./.gitea/workflows/build.yaml",
},
{
name: "forgejo platform",
gitPlatform: "forgejo",
filename: "deploy.yml",
expectedPath: "./.forgejo/workflows/deploy.yml",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
rw := &remoteReusableWorkflow{
GitPlatform: tt.gitPlatform,
Filename: tt.filename,
}
assert.Equal(t, tt.expectedPath, rw.FilePath())
})
}
}
func TestNewRemoteReusableWorkflowWithPlat(t *testing.T) {
tests := []struct {
name string
url string
uses string
expectedOrg string
expectedRepo string
expectedPlatform string
expectedFilename string
expectedRef string
shouldFail bool
}{
{
name: "valid github workflow",
url: "github.com",
uses: "owner/repo/.github/workflows/test.yml@main",
expectedOrg: "owner",
expectedRepo: "repo",
expectedPlatform: "github",
expectedFilename: "test.yml",
expectedRef: "main",
shouldFail: false,
},
{
name: "valid gitea workflow",
url: "code.forgejo.org",
uses: "forgejo/runner/.gitea/workflows/build.yaml@v1.0.0",
expectedOrg: "forgejo",
expectedRepo: "runner",
expectedPlatform: "gitea",
expectedFilename: "build.yaml",
expectedRef: "v1.0.0",
shouldFail: false,
},
{
name: "invalid format - missing platform",
url: "github.com",
uses: "owner/repo/workflows/test.yml@main",
shouldFail: true,
},
{
name: "invalid format - no ref",
url: "github.com",
uses: "owner/repo/.github/workflows/test.yml",
shouldFail: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := newRemoteReusableWorkflowWithPlat(tt.url, tt.uses)
if tt.shouldFail {
assert.Nil(t, result)
} else {
assert.NotNil(t, result)
assert.Equal(t, tt.expectedOrg, result.Org)
assert.Equal(t, tt.expectedRepo, result.Repo)
assert.Equal(t, tt.expectedPlatform, result.GitPlatform)
assert.Equal(t, tt.expectedFilename, result.Filename)
assert.Equal(t, tt.expectedRef, result.Ref)
assert.Equal(t, tt.url, result.URL)
}
})
}
}
func TestFinalizeReusableWorkflow_PrintsBannerSuccess(t *testing.T) {
mockLogger := mocks.NewFieldLogger(t)
bannerCalled := false
mockLogger.On("WithFields",
mock.MatchedBy(func(fields logrus.Fields) bool {
result, ok := fields["jobResult"].(string)
if !ok || result != "success" {
return false
}
outs, ok := fields["jobOutputs"].(map[string]string)
return ok && outs["foo"] == "bar"
}),
).Run(func(args mock.Arguments) {
bannerCalled = true
}).Return(&logrus.Entry{Logger: &logrus.Logger{}}).Once()
ctx := common.WithLogger(t.Context(), mockLogger)
rc := &RunContext{
Run: &model.Run{
JobID: "parent",
Workflow: &model.Workflow{
Jobs: map[string]*model.Job{
"parent": {
Outputs: map[string]string{"foo": "bar"},
},
},
},
},
}
err := finalizeReusableWorkflow(ctx, rc, nil)
assert.NoError(t, err)
assert.True(t, bannerCalled, "final banner should be printed from parent")
}
func TestFinalizeReusableWorkflow_PrintsBannerFailure(t *testing.T) {
mockLogger := mocks.NewFieldLogger(t)
bannerCalled := false
mockLogger.On("WithFields",
mock.MatchedBy(func(fields logrus.Fields) bool {
result, ok := fields["jobResult"].(string)
return ok && result == "failure"
}),
).Run(func(args mock.Arguments) {
bannerCalled = true
}).Return(&logrus.Entry{Logger: &logrus.Logger{}}).Once()
ctx := common.WithLogger(t.Context(), mockLogger)
rc := &RunContext{
Run: &model.Run{
JobID: "parent",
Workflow: &model.Workflow{
Jobs: map[string]*model.Job{
"parent": {},
},
},
},
}
planErr := errors.New("workflow failed")
err := finalizeReusableWorkflow(ctx, rc, planErr)
assert.EqualError(t, err, "workflow failed")
assert.True(t, bannerCalled, "banner should be printed even on failure")
}

View file

@ -248,18 +248,12 @@ var stopTemplate = template.Must(template.New("stop").Parse(`#!/bin/bash
source $(dirname $0)/lxc-helpers-lib.sh
lxc_container_destroy "{{.Name}}"
lxc_maybe_sudo
$LXC_SUDO rm -fr "{{ .Root }}"
`))
func (rc *RunContext) stopHostEnvironment(ctx context.Context) error {
logger := common.Logger(ctx)
logger.Debugf("stopHostEnvironment")
if !rc.IsLXCHostEnv(ctx) {
return nil
}
var stopScript bytes.Buffer
if err := stopTemplate.Execute(&stopScript, struct {
Name string
@ -316,8 +310,11 @@ func (rc *RunContext) startHostEnvironment() common.Executor {
ToolCache: rc.getToolCache(ctx),
Workdir: rc.Config.Workdir,
ActPath: actPath,
StdOut: logWriter,
LXC: rc.IsLXCHostEnv(ctx),
CleanUp: func() {
os.RemoveAll(miscpath)
},
StdOut: logWriter,
LXC: rc.IsLXCHostEnv(ctx),
}
rc.cleanUpJobContainer = func(ctx context.Context) error {
if err := rc.stopHostEnvironment(ctx); err != nil {
@ -948,7 +945,7 @@ func (rc *RunContext) Executor() (common.Executor, error) {
return err
}
if res {
timeoutctx, cancelTimeOut := evaluateTimeout(ctx, "job", rc.ExprEval, rc.Run.Job().TimeoutMinutes)
timeoutctx, cancelTimeOut := evaluateTimeout(ctx, rc.ExprEval, rc.Run.Job().TimeoutMinutes)
defer cancelTimeOut()
return executor(timeoutctx)
@ -1210,7 +1207,7 @@ func (rc *RunContext) getGithubContext(ctx context.Context) *model.GithubContext
ghc.RetentionDays = preset.RetentionDays
instance := rc.Config.GitHubInstance
if instance != "" && !strings.HasPrefix(instance, "http://") &&
if !strings.HasPrefix(instance, "http://") &&
!strings.HasPrefix(instance, "https://") {
instance = "https://" + instance
}
@ -1253,7 +1250,7 @@ func (rc *RunContext) getGithubContext(ctx context.Context) *model.GithubContext
{ // Adapt to Gitea
instance := rc.Config.GitHubInstance
if instance != "" && !strings.HasPrefix(instance, "http://") &&
if !strings.HasPrefix(instance, "http://") &&
!strings.HasPrefix(instance, "https://") {
instance = "https://" + instance
}
@ -1355,6 +1352,16 @@ func (rc *RunContext) withGithubEnv(ctx context.Context, github *model.GithubCon
set("SERVER_URL", github.ServerURL)
set("API_URL", github.APIURL)
{ // Adapt to Forgejo
instance := rc.Config.GitHubInstance
if !strings.HasPrefix(instance, "http://") &&
!strings.HasPrefix(instance, "https://") {
instance = "https://" + instance
}
set("SERVER_URL", instance)
set("API_URL", instance+"/api/v1")
}
if rc.Config.ArtifactServerPath != "" {
setActionRuntimeVars(rc, env)
}

View file

@ -280,39 +280,6 @@ func TestRunContext_GetBindsAndMounts(t *testing.T) {
})
}
func TestRunContext_GetGithubContextURL(t *testing.T) {
table := []struct {
instance string
serverURL string
APIURL string
}{
{instance: "", serverURL: "", APIURL: "/api/v1"},
{instance: "example.com", serverURL: "https://example.com", APIURL: "https://example.com/api/v1"},
{instance: "http://example.com", serverURL: "http://example.com", APIURL: "http://example.com/api/v1"},
{instance: "https://example.com", serverURL: "https://example.com", APIURL: "https://example.com/api/v1"},
}
for _, data := range table {
t.Run(data.instance, func(t *testing.T) {
rc := &RunContext{
EventJSON: "{}",
Config: &Config{
GitHubInstance: data.instance,
},
Run: &model.Run{
Workflow: &model.Workflow{
Name: "GitHubContextTest",
},
},
}
ghc := rc.getGithubContext(t.Context())
assert.Equal(t, data.serverURL, ghc.ServerURL)
assert.Equal(t, data.APIURL, ghc.APIURL)
})
}
}
func TestRunContext_GetGithubContextRef(t *testing.T) {
table := []struct {
event string

View file

@ -263,7 +263,6 @@ func TestRunner_RunEvent(t *testing.T) {
{workdir, "uses-workflow", "pull_request", "", platforms, map[string]string{"secret": "keep_it_private"}},
{workdir, "uses-docker-url", "push", "", platforms, secrets},
{workdir, "act-composite-env-test", "push", "", platforms, secrets},
{workdir, "uses-workflow-env-input", "push", "", platforms, secrets},
// Eval
{workdir, "evalmatrix", "push", "", platforms, secrets},

View file

@ -177,7 +177,7 @@ func runStepExecutor(step step, stage stepStage, executor common.Executor) commo
Mode: 0o666,
})(ctx)
timeoutctx, cancelTimeOut := evaluateTimeout(ctx, "step", rc.ExprEval, stepModel.TimeoutMinutes)
timeoutctx, cancelTimeOut := evaluateTimeout(ctx, rc.ExprEval, stepModel.TimeoutMinutes)
defer cancelTimeOut()
err = executor(timeoutctx)
@ -213,12 +213,12 @@ func runStepExecutor(step step, stage stepStage, executor common.Executor) commo
}
}
func evaluateTimeout(ctx context.Context, contextType string, exprEval ExpressionEvaluator, timeoutMinutes string) (context.Context, context.CancelFunc) {
func evaluateTimeout(ctx context.Context, exprEval ExpressionEvaluator, timeoutMinutes string) (context.Context, context.CancelFunc) {
timeout := exprEval.Interpolate(ctx, timeoutMinutes)
if timeout != "" {
timeOutMinutes, err := strconv.ParseInt(timeout, 10, 64)
if err == nil {
common.Logger(ctx).Debugf("the %s will stop in timeout-minutes %s", contextType, timeout)
common.Logger(ctx).Debugf("the step will stop in timeout-minutes %s", timeout)
return context.WithTimeout(ctx, time.Duration(timeOutMinutes)*time.Minute)
}
common.Logger(ctx).Errorf("timeout-minutes %s cannot be parsed and will be ignored: %w", timeout, err)

View file

@ -1,21 +0,0 @@
name: "use-inputs-impl"
on:
workflow_call:
inputs:
greet_target:
type: string
required: false
default: "Some Default Value"
jobs:
works:
runs-on: ubuntu-latest
env:
MY_INPUT_TEST: ${{ inputs.greet_target }}
INPUT_TEST: ${{ inputs.greet_target }}
INPUT_GREET_TARGET: ${{ inputs.greet_target }}
steps:
- run: '[ "$MY_INPUT_TEST" = "Mona the Octocat" ] || exit 1'
- run: '[ "$INPUT_TEST" = "Mona the Octocat" ] || exit 1'
- run: '[ "$INPUT_GREET_TARGET" = "Mona the Octocat" ] || exit 1'

View file

@ -1,4 +1,4 @@
FROM code.forgejo.org/oci/alpine:latest
FROM alpine:3
COPY entrypoint.sh /entrypoint.sh

View file

@ -1,8 +0,0 @@
name: local-action-env-input
on: push
jobs:
test:
runs-on: docker
uses: ./testdata/.github/workflows/local-reusable-env-input.yml
with:
greet_target: 'Mona the Octocat'

View file

@ -30,84 +30,6 @@ jobs:
assert.NoError(t, err)
}
func TestContextsInWorkflowMatrix(t *testing.T) {
t.Run("KnownContexts", func(t *testing.T) {
// Parse raw YAML snippet.
var node yaml.Node
err := yaml.Unmarshal([]byte(`
on: push
jobs:
job:
uses: ./.forgejo/workflow/test.yaml
strategy:
matrix:
input1:
- ${{ forge.KEY }}
- ${{ forgejo.KEY }}
- ${{ github.KEY }}
- ${{ inputs.KEY }}
- ${{ vars.KEY }}
- ${{ needs.KEY }}
include:
- forge: ${{ forge.KEY }}
- forgejo: ${{ forgejo.KEY }}
- github: ${{ github.KEY }}
- inputs: ${{ inputs.KEY }}
- vars: ${{ vars.KEY }}
- needs: ${{ needs.KEY }}
exclude:
- forge: ${{ forge.KEY }}
- forgejo: ${{ forgejo.KEY }}
- github: ${{ github.KEY }}
- inputs: ${{ inputs.KEY }}
- vars: ${{ vars.KEY }}
- needs: ${{ needs.KEY }}
`), &node)
if !assert.NoError(t, err) {
return
}
// Parse YAML node as a validated workflow.
err = (&Node{
Definition: "workflow-root",
Schema: GetWorkflowSchema(),
}).UnmarshalYAML(&node)
assert.NoError(t, err)
})
t.Run("UnknownContext", func(t *testing.T) {
for _, property := range []string{"include", "exclude", "input1"} {
t.Run(property, func(t *testing.T) {
for _, context := range []string{"secrets", "job", "steps", "runner", "matrix", "strategy"} {
t.Run(context, func(t *testing.T) {
var node yaml.Node
err := yaml.Unmarshal([]byte(fmt.Sprintf(`
on: push
jobs:
job:
uses: ./.forgejo/workflow/test.yaml
strategy:
matrix:
%[1]s:
- input1: ${{ %[2]s.KEY }}
`, property, context)), &node)
if !assert.NoError(t, err) {
return
}
err = (&Node{
Definition: "workflow-root",
Schema: GetWorkflowSchema(),
}).UnmarshalYAML(&node)
assert.ErrorContains(t, err, "Unknown Variable Access "+context)
})
}
})
}
})
}
func TestReusableWorkflow(t *testing.T) {
t.Run("KnownContexts", func(t *testing.T) {
var node yaml.Node
@ -242,7 +164,7 @@ jobs:
name: Build Silo Frontend DEV
runs-on: ubuntu-latest
container:
image: code.forgejo.org/oci/${{ env.IMAGE }}
image: code.forgejo.org/oci/node:22-bookworm
uses: ./.forgejo/workflows/${{ vars.PATHNAME }}
with:
STAGE: dev

View file

@ -2008,7 +2008,7 @@
},
"container": {
"description": "A container to run any steps in a job that don't already specify a container. If you have steps that use both script and container actions, the container actions will run as sibling containers on the same network with the same volume mounts.\n\nIf you do not set a container, all steps will run directly on the host specified by runs-on unless a step refers to an action configured to run in a container.",
"context": ["forge", "forgejo", "github", "inputs", "vars", "needs", "strategy", "matrix", "env"],
"context": ["forge", "forgejo", "github", "inputs", "vars", "needs", "strategy", "matrix"],
"one-of": ["string", "container-mapping"]
},
"container-mapping": {

View file

@ -51,7 +51,7 @@ services:
- 8080:3000
runner-register:
image: code.forgejo.org/forgejo/runner:11.1.2
image: code.forgejo.org/forgejo/runner:11.0.0
links:
- docker-in-docker
- forgejo
@ -77,7 +77,7 @@ services:
'
runner-daemon:
image: code.forgejo.org/forgejo/runner:11.1.2
image: code.forgejo.org/forgejo/runner:11.0.0
links:
- docker-in-docker
- forgejo

View file

@ -20,14 +20,14 @@ trap "rm -fr $TMPDIR" EXIT
: ${INPUTS_TOKEN:=}
: ${INPUTS_FORGEJO:=https://code.forgejo.org}
: ${INPUTS_LIFETIME:=7d}
DEFAULT_LXC_HELPERS_VERSION=1.1.3 # renovate: datasource=forgejo-tags depName=forgejo/lxc-helpers
DEFAULT_LXC_HELPERS_VERSION=1.1.0 # renovate: datasource=forgejo-tags depName=forgejo/lxc-helpers
: ${INPUTS_LXC_HELPERS_VERSION:=$DEFAULT_LXC_HELPERS_VERSION}
DEFAULT_RUNNER_VERSION=11.1.2 # renovate: datasource=forgejo-releases depName=forgejo/runner
DEFAULT_RUNNER_VERSION=11.0.0 # renovate: datasource=forgejo-releases depName=forgejo/runner
: ${INPUTS_RUNNER_VERSION:=$DEFAULT_RUNNER_VERSION}
: ${KILL_AFTER:=21600} # 6h == 21600
NODEJS_VERSION=20
DEBIAN_RELEASE=trixie
DEBIAN_RELEASE=bookworm
YQ_VERSION=v4.45.1
SELF=${BASH_SOURCE[0]}
SELF_FILENAME=$(basename "$SELF")

31
go.mod
View file

@ -2,22 +2,22 @@ module code.forgejo.org/forgejo/runner/v11
go 1.24.0
toolchain go1.24.9
toolchain go1.24.7
require (
code.forgejo.org/forgejo/actions-proto v0.5.3
connectrpc.com/connect v1.19.1
code.forgejo.org/forgejo/actions-proto v0.5.2
connectrpc.com/connect v1.18.1
dario.cat/mergo v1.0.2
github.com/Masterminds/semver v1.5.0
github.com/avast/retry-go/v4 v4.7.0
github.com/avast/retry-go/v4 v4.6.1
github.com/containerd/errdefs v1.0.0
github.com/creack/pty v1.1.24
github.com/distribution/reference v0.6.0
github.com/docker/cli v28.5.1+incompatible
github.com/docker/docker v28.5.1+incompatible
github.com/docker/cli v28.4.0+incompatible
github.com/docker/docker v28.4.0+incompatible
github.com/docker/go-connections v0.6.0
github.com/go-git/go-billy/v5 v5.6.2
github.com/go-git/go-git/v5 v5.16.3
github.com/go-git/go-git/v5 v5.16.2
github.com/gobwas/glob v0.2.3
github.com/google/go-cmp v0.7.0
github.com/google/uuid v1.6.0
@ -29,7 +29,7 @@ require (
github.com/moby/patternmatcher v0.6.0
github.com/opencontainers/image-spec v1.1.1
github.com/opencontainers/selinux v1.12.0
github.com/rhysd/actionlint v1.7.8
github.com/rhysd/actionlint v1.7.7
github.com/sirupsen/logrus v1.9.3
github.com/spf13/cobra v1.10.1
github.com/spf13/pflag v1.0.10
@ -37,9 +37,9 @@ require (
github.com/timshannon/bolthold v0.0.0-20240314194003-30aac6950928
go.etcd.io/bbolt v1.4.3
go.yaml.in/yaml/v3 v3.0.4
golang.org/x/term v0.36.0
golang.org/x/time v0.14.0
google.golang.org/protobuf v1.36.10
golang.org/x/term v0.35.0
golang.org/x/time v0.13.0
google.golang.org/protobuf v1.36.9
gotest.tools/v3 v3.5.2
)
@ -47,7 +47,7 @@ require (
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/ProtonMail/go-crypto v1.1.6 // indirect
github.com/bmatcuk/doublestar/v4 v4.9.1 // indirect
github.com/bmatcuk/doublestar/v4 v4.8.0 // indirect
github.com/cloudflare/circl v1.6.1 // indirect
github.com/containerd/errdefs/pkg v0.3.0 // indirect
github.com/containerd/log v0.1.0 // indirect
@ -69,7 +69,7 @@ require (
github.com/kevinburke/ssh_config v1.2.0 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-runewidth v0.0.17 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/mattn/go-shellwords v1.0.12 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/sys/atomicwriter v0.1.0 // indirect
@ -98,11 +98,10 @@ require (
go.opentelemetry.io/otel/metric v1.36.0 // indirect
go.opentelemetry.io/otel/sdk v1.21.0 // indirect
go.opentelemetry.io/otel/trace v1.36.0 // indirect
go.yaml.in/yaml/v4 v4.0.0-rc.2 // indirect
golang.org/x/crypto v0.37.0 // indirect
golang.org/x/net v0.39.0 // indirect
golang.org/x/sync v0.17.0 // indirect
golang.org/x/sys v0.37.0 // indirect
golang.org/x/sync v0.12.0 // indirect
golang.org/x/sys v0.36.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect

58
go.sum
View file

@ -1,7 +1,7 @@
code.forgejo.org/forgejo/actions-proto v0.5.3 h1:dDProRNB4CDvEl9gfo8jkiVfGdiW7fXAt5TM9Irka28=
code.forgejo.org/forgejo/actions-proto v0.5.3/go.mod h1:33iTdur/jVa/wAQP+BuciRTK9WZcVaxy0BNEnSWWFDM=
connectrpc.com/connect v1.19.1 h1:R5M57z05+90EfEvCY1b7hBxDVOUl45PrtXtAV2fOC14=
connectrpc.com/connect v1.19.1/go.mod h1:tN20fjdGlewnSFeZxLKb0xwIZ6ozc3OQs2hTXy4du9w=
code.forgejo.org/forgejo/actions-proto v0.5.2 h1:2+j3pXKk7l4cqATb7de75ZPPsNqc6e7RWX7xJ8DmySY=
code.forgejo.org/forgejo/actions-proto v0.5.2/go.mod h1:6CtsEiLzyODMO/3sqsdoTij4Y3gyQ29Nn8QnWunOw98=
connectrpc.com/connect v1.18.1 h1:PAg7CjSAGvscaf6YZKUefjoih5Z/qYkyaTrBW8xvYPw=
connectrpc.com/connect v1.18.1/go.mod h1:0292hj1rnx8oFrStN7cB4jjVBeqs+Yx5yDIC2prWDO8=
dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk=
@ -19,10 +19,10 @@ github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFI
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/avast/retry-go/v4 v4.7.0 h1:yjDs35SlGvKwRNSykujfjdMxMhMQQM0TnIjJaHB+Zio=
github.com/avast/retry-go/v4 v4.7.0/go.mod h1:ZMPDa3sY2bKgpLtap9JRUgk2yTAba7cgiFhqxY2Sg6Q=
github.com/bmatcuk/doublestar/v4 v4.9.1 h1:X8jg9rRZmJd4yRy7ZeNDRnM+T3ZfHv15JiBJ/avrEXE=
github.com/bmatcuk/doublestar/v4 v4.9.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
github.com/avast/retry-go/v4 v4.6.1 h1:VkOLRubHdisGrHnTu89g08aQEWEgRU7LVEop3GbIcMk=
github.com/avast/retry-go/v4 v4.6.1/go.mod h1:V6oF8njAwxJ5gRo1Q7Cxab24xs5NCWZBeaHHBklR8mA=
github.com/bmatcuk/doublestar/v4 v4.8.0 h1:DSXtrypQddoug1459viM9X9D3dp1Z7993fw36I2kNcQ=
github.com/bmatcuk/doublestar/v4 v4.8.0/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
@ -43,10 +43,10 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/docker/cli v28.5.1+incompatible h1:ESutzBALAD6qyCLqbQSEf1a/U8Ybms5agw59yGVc+yY=
github.com/docker/cli v28.5.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/docker v28.5.1+incompatible h1:Bm8DchhSD2J6PsFzxC35TZo4TLGR2PdW/E69rU45NhM=
github.com/docker/docker v28.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/cli v28.4.0+incompatible h1:RBcf3Kjw2pMtwui5V0DIMdyeab8glEw5QY0UUU4C9kY=
github.com/docker/cli v28.4.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/docker v28.4.0+incompatible h1:KVC7bz5zJY/4AZe/78BIvCnPsLaC9T/zh72xnlrTTOk=
github.com/docker/docker v28.4.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8=
github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40=
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
@ -69,8 +69,8 @@ github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UN
github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU=
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII=
github.com/go-git/go-git/v5 v5.16.3 h1:Z8BtvxZ09bYm/yYNgPKCzgWtaRqDTgIKRgIRHBfU6Z8=
github.com/go-git/go-git/v5 v5.16.3/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8=
github.com/go-git/go-git/v5 v5.16.2 h1:fT6ZIOjE5iEnkzKyxTHK1W4HGAsPhqEqiSAssSO77hM=
github.com/go-git/go-git/v5 v5.16.2/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
@ -115,8 +115,8 @@ github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHP
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.17 h1:78v8ZlW0bP43XfmAfPsdXcoNCelfMHsDmd/pkENfrjQ=
github.com/mattn/go-runewidth v0.0.17/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
@ -151,8 +151,8 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rhysd/actionlint v1.7.8 h1:3d+N9ourgAxVYG4z2IFxFIk/YiT6V+VnKASfXGwT60E=
github.com/rhysd/actionlint v1.7.8/go.mod h1:3kiS6egcbXG+vQsJIhFxTz+UKaF1JprsE0SKrpCZKvU=
github.com/rhysd/actionlint v1.7.7 h1:0KgkoNTrYY7vmOCs9BW2AHxLvvpoY9nEUzgBHiPUr0k=
github.com/rhysd/actionlint v1.7.7/go.mod h1:AE6I6vJEkNaIfWqC2GNE5spIJNhxf8NCtLEKU4NnUXg=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
@ -222,8 +222,6 @@ go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lI
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
go.yaml.in/yaml/v4 v4.0.0-rc.2 h1:/FrI8D64VSr4HtGIlUtlFMGsm7H7pWTbj6vOLVZcA6s=
go.yaml.in/yaml/v4 v4.0.0-rc.2/go.mod h1:aZqd9kCMsGL7AuUv/m/PvWLdg5sjJsZ4oHDEnfPPfY0=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
@ -232,8 +230,8 @@ golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbR
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -244,16 +242,16 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ=
golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI=
golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ=
google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f h1:2yNACc1O40tTnrsbk9Cv6oxiW8pxI/pXj0wRtdlYmgY=
@ -262,8 +260,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw=
google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw=
google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=

View file

@ -371,10 +371,6 @@ func runExec(ctx context.Context, execArgs *executeArgs) func(cmd *cobra.Command
log.Infof("cache handler listens on: %v", handler.ExternalURL())
execArgs.cacheHandler = handler
if execArgs.containerDaemonSocket != "/var/run/docker.sock" {
log.Warnf("--container-daemon-socket %s: please use the DOCKER_HOST environment variable as documented at https://forgejo.org/docs/next/admin/actions/runner-installation/#setting-up-the-container-environment instead. See https://code.forgejo.org/forgejo/runner/issues/577 for more information.", execArgs.containerDaemonSocket)
}
// run the plan
config := &runner.Config{
Workdir: execArgs.Workdir(),
@ -466,7 +462,7 @@ func loadExecCmd(ctx context.Context) *cobra.Command {
execCmd.Flags().BoolVar(&execArg.privileged, "privileged", false, "use privileged mode")
execCmd.Flags().StringVar(&execArg.usernsMode, "userns", "", "user namespace to use")
execCmd.PersistentFlags().StringVarP(&execArg.containerArchitecture, "container-architecture", "", "", "Architecture which should be used to run containers, e.g.: linux/amd64. If not specified, will use host default architecture. Requires Docker server API Version 1.41+. Ignored on earlier Docker server platforms.")
execCmd.PersistentFlags().StringVarP(&execArg.containerDaemonSocket, "container-daemon-socket", "", "/var/run/docker.sock", "Please use the DOCKER_HOST environment variable as documented at https://forgejo.org/docs/next/admin/actions/runner-installation/#setting-up-the-container-environment instead.")
execCmd.PersistentFlags().StringVarP(&execArg.containerDaemonSocket, "container-daemon-socket", "", "/var/run/docker.sock", "Path to Docker daemon socket which will be mounted to containers")
execCmd.Flags().BoolVar(&execArg.useGitIgnore, "use-gitignore", true, "Controls whether paths specified in .gitignore should be copied into container")
execCmd.Flags().StringArrayVarP(&execArg.containerCapAdd, "container-cap-add", "", []string{}, "kernel capabilities to add to the workflow containers (e.g. --container-cap-add SYS_PTRACE)")
execCmd.Flags().StringArrayVarP(&execArg.containerCapDrop, "container-cap-drop", "", []string{}, "kernel capabilities to remove from the workflow containers (e.g. --container-cap-drop SYS_PTRACE)")

View file

@ -36,20 +36,11 @@ func NewJob(cfg *config.Config, client client.Client, runner run.RunnerInterface
}
func (j *Job) Run(ctx context.Context) error {
log.Info("Polling for a job...")
for {
task, ok := j.fetchTask(ctx)
if ok {
return j.runTaskWithRecover(ctx, task)
}
// No task available, continue polling
select {
case <-ctx.Done():
return ctx.Err()
default:
// Continue to next iteration
}
task, ok := j.fetchTask(ctx)
if !ok {
return fmt.Errorf("could not fetch task")
}
return j.runTaskWithRecover(ctx, task)
}
func (j *Job) runTaskWithRecover(ctx context.Context, task *runnerv1.Task) error {

View file

@ -90,10 +90,10 @@ func (p *poller) Shutdown(ctx context.Context) error {
return nil
case <-ctx.Done():
log.Info("forcing the jobs to shutdown")
log.Trace("forcing the jobs to shutdown")
p.shutdownJobs()
<-p.done
log.Info("all jobs have been shutdown")
log.Trace("all jobs have been shutdown")
return ctx.Err()
}
}

View file

@ -117,13 +117,13 @@ func setupCache(cfg *config.Config, envs map[string]string) *cacheproxy.Handler
cacheServer, err := artifactcache.StartHandler(
cfg.Cache.Dir,
"", // automatically detect
cfg.Cache.Host,
cfg.Cache.Port,
cacheSecret,
log.StandardLogger().WithField("module", "cache_request"),
)
if err != nil {
log.Errorf("Could not start the cache server, cache will be disabled: %v", err)
log.Error("Could not start the cache server, cache will be disabled")
return nil
}
@ -144,14 +144,16 @@ func setupCache(cfg *config.Config, envs map[string]string) *cacheproxy.Handler
cacheURL,
cfg.Cache.Host,
cfg.Cache.ProxyPort,
cfg.Cache.ActionsCacheURLOverride,
cacheSecret,
log.StandardLogger().WithField("module", "cache_proxy"),
)
if err != nil {
log.Errorf("cannot init cache proxy, cache will be disabled: %v", err)
} else {
envs["ACTIONS_CACHE_URL"] = cacheProxy.ExternalURL()
}
envs["ACTIONS_CACHE_URL"] = cacheProxy.ExternalURL()
if cfg.Cache.ActionsCacheURLOverride != "" {
envs["ACTIONS_CACHE_URL"] = cfg.Cache.ActionsCacheURLOverride
}
return cacheProxy
@ -193,44 +195,6 @@ func explainFailedGenerateWorkflow(task *runnerv1.Task, log func(message string,
return fmt.Errorf("the workflow file is not usable")
}
func getWriteIsolationKey(ctx context.Context, eventName, ref string, event map[string]any) (string, error) {
if eventName == "pull_request" {
// The "closed" action of a pull request event runs in the context of the base repository
// and was merged by a user with write access to the base repository. It is authorized to
// write the repository cache.
if event["action"] == "closed" {
pullRequest, ok := event["pull_request"].(map[string]any)
if !ok {
return "", fmt.Errorf("getWriteIsolationKey: event.pull_request is not a map[string]any but %T", event["pull_request"])
}
merged, ok := pullRequest["merged"].(bool)
if !ok {
return "", fmt.Errorf("getWriteIsolationKey: event.pull_request.merged is not a bool but %T", pullRequest["merged"])
}
if merged {
return "", nil
}
// a pull request that is closed but not merged falls thru and is expected to obey the same
// constraints as an opened pull request, it may be closed by a user with no write permissions to the
// base repository
}
// When performing an action on an event from an opened PR, provide a "write isolation key" to the cache. The generated
// ACTIONS_CACHE_URL will be able to read the cache, and write to a cache, but its writes will be isolated to
// future runs of the PR's workflows and won't be shared with other pull requests or actions. This is a security
// measure to prevent a malicious pull request from poisoning the cache with secret-stealing code which would
// later be executed on another action.
// Ensure that `ref` has the expected format so that we don't end up with a useless write isolation key
if !strings.HasPrefix(ref, "refs/pull/") {
return "", fmt.Errorf("getWriteIsolationKey: expected ref to be refs/pull/..., but was %q", ref)
}
return ref, nil
}
// Other events do not allow the trigger user to modify the content of the repository and
// are allowed to write the cache without an isolation key
return "", nil
}
func (r *Runner) run(ctx context.Context, task *runnerv1.Task, reporter *report.Reporter) (err error) {
defer func() {
if r := recover(); r != nil {
@ -264,18 +228,15 @@ func (r *Runner) run(ctx context.Context, task *runnerv1.Task, reporter *report.
defaultActionURL,
r.client.Address())
eventName := taskContext["event_name"].GetStringValue()
ref := taskContext["ref"].GetStringValue()
event := taskContext["event"].GetStructValue().AsMap()
preset := &model.GithubContext{
Event: event,
Event: taskContext["event"].GetStructValue().AsMap(),
RunID: taskContext["run_id"].GetStringValue(),
RunNumber: taskContext["run_number"].GetStringValue(),
Actor: taskContext["actor"].GetStringValue(),
Repository: taskContext["repository"].GetStringValue(),
EventName: eventName,
EventName: taskContext["event_name"].GetStringValue(),
Sha: taskContext["sha"].GetStringValue(),
Ref: ref,
Ref: taskContext["ref"].GetStringValue(),
RefName: taskContext["ref_name"].GetStringValue(),
RefType: taskContext["ref_type"].GetStringValue(),
HeadRef: taskContext["head_ref"].GetStringValue(),
@ -305,9 +266,19 @@ func (r *Runner) run(ctx context.Context, task *runnerv1.Task, reporter *report.
// Register the run with the cacheproxy and modify the CACHE_URL
if r.cacheProxy != nil {
writeIsolationKey, err := getWriteIsolationKey(ctx, eventName, ref, event)
if err != nil {
return err
writeIsolationKey := ""
// When performing an action on an event from a PR, provide a "write isolation key" to the cache. The generated
// ACTIONS_CACHE_URL will be able to read the cache, and write to a cache, but its writes will be isolated to
// future runs of the PR's workflows and won't be shared with other pull requests or actions. This is a security
// measure to prevent a malicious pull request from poisoning the cache with secret-stealing code which would
// later be executed on another action.
if taskContext["event_name"].GetStringValue() == "pull_request" {
// Ensure that `Ref` has the expected format so that we don't end up with a useless write isolation key
if !strings.HasPrefix(preset.Ref, "refs/pull/") {
return fmt.Errorf("write isolation key: expected preset.Ref to be refs/pull/..., but was %q", preset.Ref)
}
writeIsolationKey = preset.Ref
}
timestamp := strconv.FormatInt(time.Now().Unix(), 10)

View file

@ -4,7 +4,6 @@ import (
"context"
"errors"
"fmt"
"net"
"os"
"testing"
"time"
@ -71,7 +70,6 @@ func TestLabelUpdate(t *testing.T) {
type forgejoClientMock struct {
mock.Mock
sent string
}
func (m *forgejoClientMock) Address() string {
@ -124,20 +122,11 @@ func (m *forgejoClientMock) UpdateTask(ctx context.Context, request *connect.Req
return args.Get(0).(*connect.Response[runnerv1.UpdateTaskResponse]), args.Error(1)
}
func rowsToString(rows []*runnerv1.LogRow) string {
s := ""
for _, row := range rows {
s += row.Content + "\n"
}
return s
}
func (m *forgejoClientMock) UpdateLog(ctx context.Context, request *connect.Request[runnerv1.UpdateLogRequest]) (*connect.Response[runnerv1.UpdateLogResponse], error) {
// Enable for log output from runs if needed.
// for _, row := range request.Msg.Rows {
// println(fmt.Sprintf("UpdateLog: %q", row.Content))
// }
m.sent += rowsToString(request.Msg.Rows)
args := m.Called(ctx, request)
mockRetval := args.Get(0)
mockError := args.Error(1)
@ -152,83 +141,6 @@ func (m *forgejoClientMock) UpdateLog(ctx context.Context, request *connect.Requ
}), nil
}
func TestRunner_getWriteIsolationKey(t *testing.T) {
t.Run("push", func(t *testing.T) {
key, err := getWriteIsolationKey(t.Context(), "push", "whatever", nil)
require.NoError(t, err)
assert.Empty(t, key)
})
t.Run("pull_request synchronized key is ref", func(t *testing.T) {
expectedKey := "refs/pull/1/head"
actualKey, err := getWriteIsolationKey(t.Context(), "pull_request", expectedKey, map[string]any{
"action": "synchronized",
})
require.NoError(t, err)
assert.Equal(t, expectedKey, actualKey)
})
t.Run("pull_request synchronized ref is invalid", func(t *testing.T) {
invalidKey := "refs/is/invalid"
key, err := getWriteIsolationKey(t.Context(), "pull_request", invalidKey, map[string]any{
"action": "synchronized",
})
require.Empty(t, key)
assert.ErrorContains(t, err, invalidKey)
})
t.Run("pull_request closed and not merged key is ref", func(t *testing.T) {
expectedKey := "refs/pull/1/head"
actualKey, err := getWriteIsolationKey(t.Context(), "pull_request", expectedKey, map[string]any{
"action": "closed",
"pull_request": map[string]any{
"merged": false,
},
})
require.NoError(t, err)
assert.Equal(t, expectedKey, actualKey)
})
t.Run("pull_request closed and merged key is empty", func(t *testing.T) {
key, err := getWriteIsolationKey(t.Context(), "pull_request", "whatever", map[string]any{
"action": "closed",
"pull_request": map[string]any{
"merged": true,
},
})
require.NoError(t, err)
assert.Empty(t, key)
})
t.Run("pull_request missing event.pull_request", func(t *testing.T) {
key, err := getWriteIsolationKey(t.Context(), "pull_request", "whatever", map[string]any{
"action": "closed",
})
require.Empty(t, key)
assert.ErrorContains(t, err, "event.pull_request is not a map")
})
t.Run("pull_request missing event.pull_request.merge", func(t *testing.T) {
key, err := getWriteIsolationKey(t.Context(), "pull_request", "whatever", map[string]any{
"action": "closed",
"pull_request": map[string]any{},
})
require.Empty(t, key)
assert.ErrorContains(t, err, "event.pull_request.merged is not a bool")
})
t.Run("pull_request with event.pull_request.merge of an unexpected type", func(t *testing.T) {
key, err := getWriteIsolationKey(t.Context(), "pull_request", "whatever", map[string]any{
"action": "closed",
"pull_request": map[string]any{
"merged": "string instead of bool",
},
})
require.Empty(t, key)
assert.ErrorContains(t, err, "not a bool but string")
})
}
func TestRunnerCacheConfiguration(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
@ -432,98 +344,6 @@ jobs:
})
}
func TestRunnerCacheStartupFailure(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
}
testCases := []struct {
desc string
listen string
}{
{
desc: "disable cache server",
listen: "127.0.0.1:40715",
},
{
desc: "disable cache proxy server",
listen: "127.0.0.1:40716",
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
forgejoClient := &forgejoClientMock{}
forgejoClient.On("Address").Return("https://127.0.0.1:8080") // not expected to be used in this test
forgejoClient.On("UpdateLog", mock.Anything, mock.Anything).Return(nil, nil)
forgejoClient.On("UpdateTask", mock.Anything, mock.Anything).
Return(connect.NewResponse(&runnerv1.UpdateTaskResponse{}), nil)
// We'll be listening on some network port in this test that will conflict with the cache configuration...
l, err := net.Listen("tcp4", tc.listen)
require.NoError(t, err)
defer l.Close()
runner := NewRunner(
&config.Config{
Cache: config.Cache{
Port: 40715,
ProxyPort: 40716,
Dir: t.TempDir(),
},
Host: config.Host{
WorkdirParent: t.TempDir(),
},
},
&config.Registration{
Labels: []string{"ubuntu-latest:docker://code.forgejo.org/oci/node:20-bookworm"},
},
forgejoClient)
require.NotNil(t, runner)
// Ensure that cacheProxy failed to start
assert.Nil(t, runner.cacheProxy)
runWorkflow := func(ctx context.Context, cancel context.CancelFunc, yamlContent string) {
task := &runnerv1.Task{
WorkflowPayload: []byte(yamlContent),
Context: &structpb.Struct{
Fields: map[string]*structpb.Value{
"token": structpb.NewStringValue("some token here"),
"forgejo_default_actions_url": structpb.NewStringValue("https://data.forgejo.org"),
"repository": structpb.NewStringValue("runner"),
"event_name": structpb.NewStringValue("push"),
"ref": structpb.NewStringValue("refs/heads/main"),
},
},
}
reporter := report.NewReporter(ctx, cancel, forgejoClient, task, time.Second)
err := runner.run(ctx, task, reporter)
reporter.Close(nil)
require.NoError(t, err)
}
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
checkCacheYaml := `
name: Verify No ACTIONS_CACHE_URL
on:
push:
jobs:
job-cache-check-1:
runs-on: ubuntu-latest
steps:
- run: echo $ACTIONS_CACHE_URL
- run: '[[ "$ACTIONS_CACHE_URL" = "" ]] || exit 1'
`
runWorkflow(ctx, cancel, checkCacheYaml)
})
}
}
func TestRunnerLXC(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
@ -594,164 +414,8 @@ jobs:
job:
runs-on: lxc
steps:
- run: mkdir -p some/directory/owned/by/root
- run: echo OK
`
runWorkflow(ctx, cancel, workflow, "push", "refs/heads/main", "OK")
})
}
func TestRunnerResources(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
}
forgejoClient := &forgejoClientMock{}
forgejoClient.On("Address").Return("https://127.0.0.1:8080") // not expected to be used in this test
forgejoClient.On("UpdateLog", mock.Anything, mock.Anything).Return(nil, nil)
forgejoClient.On("UpdateTask", mock.Anything, mock.Anything).
Return(connect.NewResponse(&runnerv1.UpdateTaskResponse{}), nil)
workdirParent := t.TempDir()
runWorkflow := func(ctx context.Context, cancel context.CancelFunc, yamlContent, options, errorMessage, logMessage string) {
task := &runnerv1.Task{
WorkflowPayload: []byte(yamlContent),
Context: &structpb.Struct{
Fields: map[string]*structpb.Value{
"token": structpb.NewStringValue("some token here"),
"forgejo_default_actions_url": structpb.NewStringValue("https://data.forgejo.org"),
"repository": structpb.NewStringValue("runner"),
"event_name": structpb.NewStringValue("push"),
"ref": structpb.NewStringValue("refs/heads/main"),
},
},
}
runner := NewRunner(
&config.Config{
Log: config.Log{
JobLevel: "trace",
},
Host: config.Host{
WorkdirParent: workdirParent,
},
Container: config.Container{
Options: options,
},
},
&config.Registration{
Labels: []string{"docker:docker://code.forgejo.org/oci/node:20-bookworm"},
},
forgejoClient)
require.NotNil(t, runner)
reporter := report.NewReporter(ctx, cancel, forgejoClient, task, time.Second)
err := runner.run(ctx, task, reporter)
reporter.Close(nil)
if len(errorMessage) > 0 {
require.Error(t, err)
assert.ErrorContains(t, err, errorMessage)
} else {
require.NoError(t, err)
}
if len(logMessage) > 0 {
assert.Contains(t, forgejoClient.sent, logMessage)
}
}
t.Run("config.yaml --memory set and enforced", func(t *testing.T) {
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
workflow := `
on:
push:
jobs:
job:
runs-on: docker
steps:
- run: |
# more than 300MB
perl -e '$a = "a" x (300 * 1024 * 1024)'
`
runWorkflow(ctx, cancel, workflow, "--memory 200M", "Job 'job' failed", "Killed")
})
t.Run("config.yaml --memory set and within limits", func(t *testing.T) {
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
workflow := `
on:
push:
jobs:
job:
runs-on: docker
steps:
- run: echo OK
`
runWorkflow(ctx, cancel, workflow, "--memory 200M", "", "")
})
t.Run("config.yaml --memory set and container fails to increase it", func(t *testing.T) {
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
workflow := `
on:
push:
jobs:
job:
runs-on: docker
container:
image: code.forgejo.org/oci/node:20-bookworm
options: --memory 4G
steps:
- run: |
# more than 300MB
perl -e '$a = "a" x (300 * 1024 * 1024)'
`
runWorkflow(ctx, cancel, workflow, "--memory 200M", "option found in the workflow cannot be greater than", "")
})
t.Run("container --memory set and enforced", func(t *testing.T) {
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
workflow := `
on:
push:
jobs:
job:
runs-on: docker
container:
image: code.forgejo.org/oci/node:20-bookworm
options: --memory 200M
steps:
- run: |
# more than 300MB
perl -e '$a = "a" x (300 * 1024 * 1024)'
`
runWorkflow(ctx, cancel, workflow, "", "Job 'job' failed", "Killed")
})
t.Run("container --memory set and within limits", func(t *testing.T) {
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
workflow := `
on:
push:
jobs:
job:
runs-on: docker
container:
image: code.forgejo.org/oci/node:20-bookworm
options: --memory 200M
steps:
- run: echo OK
`
runWorkflow(ctx, cancel, workflow, "", "", "")
})
}

View file

@ -110,20 +110,25 @@ cache:
#
external_server: ""
#
#######################################################################
#
# Common to the internal and external cache server
#
#######################################################################
#
# The shared cache secret used to secure the communications between
# the cache proxy and the cache server.
#
# If empty, it will be generated to a new secret automatically when
# the server starts and it will stay the same until it restarts.
#
# Every time the secret is modified, all cache entries that were
# created with it are invalidated. In order to ensure that the cache
# content is reused when the runner restarts, this secret must be
# set, for instance with the output of openssl rand -hex 40.
#
secret: ""
#
#######################################################################
#
# Common to the internal and external cache server
#
#######################################################################
#
# The IP or hostname (195.84.20.30 or example.com) to use when constructing
# ACTIONS_CACHE_URL which is the URL of the cache proxy.
#
@ -133,7 +138,7 @@ cache:
# different network than the Forgejo runner (for instance when the
# docker server used to create containers is not running on the same
# host as the Forgejo runner), it may be impossible to figure that
# out automatically. In that case you can specify which IP or
# out automatically. In that case you can specifify which IP or
# hostname to use to reach the internal cache server created by the
# Forgejo runner.
#
@ -176,12 +181,10 @@ container:
# valid_volumes:
# - '**'
valid_volumes: []
# Overrides the docker host set by the DOCKER_HOST environment variable, and mounts on the job container.
# If "-" or "", no docker host will be mounted in the job container
# overrides the docker client host with the specified one.
# If "-" or "", an available docker host will automatically be found.
# If "automount", an available docker host will automatically be found and mounted in the job container (e.g. /var/run/docker.sock).
# If it's a url, the specified docker host will be mounted in the job container
# Example urls: unix:///run/docker.socket or ssh://user@host
# The specified socket is mounted within the job container at /var/run/docker.sock
# Otherwise the specified docker host will be used and an error will be returned if it doesn't work.
docker_host: "-"
# Pull docker image(s) even if already present
force_pull: false

View file

@ -38,7 +38,7 @@ func (o *masker) add(secret string) {
})
// a multiline secret transformed into a single line by replacing
// newlines with \ followed by n must also be redacted
o.lines = append(o.lines, strings.Join(lines, "\\n"))
secret = strings.Join(lines, "\\n")
}
o.lines = append(o.lines, secret)

View file

@ -7,8 +7,6 @@ import (
"fmt"
"testing"
runnerv1 "code.forgejo.org/forgejo/actions-proto/runner/v1"
"github.com/stretchr/testify/assert"
)
@ -269,17 +267,4 @@ SIX`
assert.Equal(t, testCase.out, rowsToString(rows))
})
}
t.Run("MultilineSecretInSingleRow", func(t *testing.T) {
secret := "ABC\nDEF\nGHI"
m := newMasker()
m.add(secret)
rows := []*runnerv1.LogRow{
{Content: fmt.Sprintf("BEFORE%sAFTER", secret)},
}
noMore := false
needMore := m.replace(rows, noMore)
assert.False(t, needMore)
assert.Equal(t, "BEFORE***AFTER\n", rowsToString(rows))
})
}

View file

@ -13,7 +13,6 @@ import (
"time"
runnerv1 "code.forgejo.org/forgejo/actions-proto/runner/v1"
"code.forgejo.org/forgejo/runner/v11/act/runner"
"connectrpc.com/connect"
retry "github.com/avast/retry-go/v4"
log "github.com/sirupsen/logrus"
@ -48,7 +47,6 @@ type Reporter struct {
debugOutputEnabled bool
stopCommandEndToken string
issuedLocalCancel bool
}
func NewReporter(ctx context.Context, cancel context.CancelFunc, c client.Client, task *runnerv1.Task, reportInterval time.Duration) *Reporter {
@ -132,12 +130,10 @@ func (r *Reporter) Fire(entry *log.Entry) error {
}
}
}
if r.state.Result == runnerv1.Result_RESULT_SUCCESS {
if v, ok := entry.Data["jobOutputs"]; ok {
_ = r.setOutputs(v.(map[string]string))
} else {
log.Panicf("received log entry with successful jobResult, but without jobOutputs -- outputs will be corrupted for this job")
}
if v, ok := entry.Data["jobOutputs"]; ok {
_ = r.setOutputs(v.(map[string]string))
} else {
log.Panicf("received log entry with jobResult, but without jobOutputs -- outputs will be corrupted for this job")
}
}
if !r.duringSteps() {
@ -175,7 +171,7 @@ func (r *Reporter) Fire(entry *log.Entry) error {
} else if !r.duringSteps() {
r.logRows = appendIfNotNil(r.logRows, r.parseLogRow(entry))
}
if v := runner.GetOuterStepResult(entry); v != nil {
if v, ok := entry.Data["stepResult"]; ok {
if stepResult, ok := r.parseResult(v); ok {
if step.LogLength == 0 {
step.LogIndex = int64(r.logOffset + len(r.logRows))
@ -193,19 +189,11 @@ func (r *Reporter) RunDaemon() {
return
}
if r.ctx.Err() != nil {
// This shouldn't happen because DaemonContext is used for `r.ctx` which should outlive any running job.
log.Warnf("Terminating RunDaemon on an active job due to error: %v", r.ctx.Err())
return
}
err := r.ReportLog(false)
if err != nil {
log.Warnf("ReportLog error: %v", err)
}
err = r.ReportState()
if err != nil {
log.Warnf("ReportState error: %v", err)
}
_ = r.ReportLog(false)
_ = r.ReportState()
time.AfterFunc(r.reportInterval, r.RunDaemon)
}
@ -400,17 +388,8 @@ func (r *Reporter) ReportState() error {
r.outputs.Store(k, struct{}{})
}
localResultState := state.GetResult()
remoteResultState := resp.Msg.GetState().GetResult()
switch remoteResultState {
switch resp.Msg.GetState().GetResult() {
case runnerv1.Result_RESULT_CANCELLED, runnerv1.Result_RESULT_FAILURE:
// issuedLocalCancel is just used to deduplicate this log message if our local state doesn't catch up with our
// remote state as quickly as the report-interval, which would cause this message to repeat in the logs.
if !r.issuedLocalCancel && remoteResultState != localResultState {
log.Infof("UpdateTask returned task result %v for a task that was in local state %v - beginning local task termination",
remoteResultState, localResultState)
r.issuedLocalCancel = true
}
r.cancel()
}

View file

@ -301,20 +301,6 @@ func TestReporter_Fire(t *testing.T) {
value, _ := reporter.outputs.Load("key1")
assert.EqualValues(t, "value1", value)
})
t.Run("jobResult jobOutputs is absent if not success", func(t *testing.T) {
reporter, _, _ := mockReporter(t)
dataStep0 := map[string]any{
"stage": "Post",
"stepNumber": 0,
"raw_output": true,
"jobResult": "skipped",
}
assert.NoError(t, reporter.Fire(&log.Entry{Message: "skipped!", Data: dataStep0}))
assert.EqualValues(t, runnerv1.Result_RESULT_SKIPPED, reporter.state.Result)
})
}
func TestReporterReportState(t *testing.T) {