Merge branch 'main' into fix/reusable-workflow-clone-auth
This commit is contained in:
commit
5e27fc53c3
19 changed files with 339 additions and 106 deletions
|
|
@ -14,6 +14,7 @@ env:
|
|||
SERIAL: "30"
|
||||
LIFETIME: "60"
|
||||
SYSTEMD_OPTIONS: "--no-pager --full"
|
||||
USE_VERSION: 11.0.6 # renovate: datasource=docker depName=code.forgejo.org/forgejo/forgejo
|
||||
|
||||
jobs:
|
||||
example-lxc-systemd:
|
||||
|
|
@ -57,7 +58,7 @@ jobs:
|
|||
with:
|
||||
user: root
|
||||
password: admin1234
|
||||
binary: https://code.forgejo.org/forgejo/forgejo/releases/download/v7.0.12/forgejo-7.0.12-linux-amd64
|
||||
binary: https://code.forgejo.org/forgejo/forgejo/releases/download/v${{ env.USE_VERSION }}/forgejo-${{ env.USE_VERSION }}-linux-amd64
|
||||
# must be the same as LXC_IPV4_PREFIX in examples/lxc-systemd/forgejo-runner-service.sh
|
||||
lxc-ip-prefix: 10.105.7
|
||||
|
||||
|
|
|
|||
|
|
@ -64,7 +64,7 @@ func StartHandler(targetHost, outboundIP string, port uint16, cacheProxyHostOver
|
|||
discard.Out = io.Discard
|
||||
logger = discard
|
||||
}
|
||||
logger = logger.WithField("module", "artifactcache")
|
||||
logger = logger.WithField("module", "cacheproxy")
|
||||
h.logger = logger
|
||||
|
||||
h.cacheSecret = cacheSecret
|
||||
|
|
@ -139,6 +139,7 @@ func (h *Handler) newReverseProxy(targetHost string) (*httputil.ReverseProxy, er
|
|||
|
||||
r.SetURL(targetURL)
|
||||
r.Out.URL.Path = uri
|
||||
h.logger.Debugf("proxy req %s %q to %q", r.In.Method, r.In.URL, r.Out.URL)
|
||||
|
||||
r.Out.Header.Set("Forgejo-Cache-Repo", runData.RepositoryFullName)
|
||||
r.Out.Header.Set("Forgejo-Cache-RunNumber", runData.RunNumber)
|
||||
|
|
@ -150,6 +151,10 @@ func (h *Handler) newReverseProxy(targetHost string) (*httputil.ReverseProxy, er
|
|||
r.Out.Header.Set("Forgejo-Cache-WriteIsolationKey", runData.WriteIsolationKey)
|
||||
}
|
||||
},
|
||||
ModifyResponse: func(r *http.Response) error {
|
||||
h.logger.Debugf("proxy resp %s w/ %d bytes", r.Status, r.ContentLength)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
return proxy, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -34,7 +34,6 @@ type HostEnvironment struct {
|
|||
Workdir string
|
||||
ActPath string
|
||||
Root string
|
||||
CleanUp func()
|
||||
StdOut io.Writer
|
||||
LXC bool
|
||||
}
|
||||
|
|
@ -389,7 +388,7 @@ func (e *HostEnvironment) ExecWithCmdLine(command []string, cmdline string, env
|
|||
if err := e.exec(ctx, command, cmdline, env, user, workdir); err != nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return fmt.Errorf("this step has been cancelled: %w", err)
|
||||
return fmt.Errorf("this step has been cancelled: ctx: %w, exec: %w", ctx.Err(), err)
|
||||
default:
|
||||
return err
|
||||
}
|
||||
|
|
@ -404,11 +403,12 @@ func (e *HostEnvironment) UpdateFromEnv(srcPath string, env *map[string]string)
|
|||
|
||||
func (e *HostEnvironment) Remove() common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
if e.CleanUp != nil {
|
||||
e.CleanUp()
|
||||
if e.GetLXC() {
|
||||
// there may be files owned by root: removal
|
||||
// is the responsibility of the LXC backend
|
||||
return nil
|
||||
}
|
||||
common.Logger(ctx).Debugf("HostEnvironment.Remove %s", e.Path)
|
||||
return os.RemoveAll(e.Path)
|
||||
return os.RemoveAll(e.Root)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package jobparser
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"code.forgejo.org/forgejo/runner/v11/act/model"
|
||||
"go.yaml.in/yaml/v3"
|
||||
|
|
@ -226,7 +227,7 @@ func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
|
|||
var val string
|
||||
err := rawOn.Decode(&val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("unable to interpret scalar value into a string: %w", err)
|
||||
}
|
||||
return []*Event{
|
||||
{Name: val},
|
||||
|
|
@ -238,12 +239,12 @@ func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
|
|||
return nil, err
|
||||
}
|
||||
res := make([]*Event, 0, len(val))
|
||||
for _, v := range val {
|
||||
for i, v := range val {
|
||||
switch t := v.(type) {
|
||||
case string:
|
||||
res = append(res, &Event{Name: t})
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid type %T", t)
|
||||
return nil, fmt.Errorf("value at index %d was unexpected type %[2]T; must be a string but was %#[2]v", i, v)
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
|
|
@ -263,16 +264,6 @@ func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
|
|||
continue
|
||||
}
|
||||
switch t := v.(type) {
|
||||
case string:
|
||||
res = append(res, &Event{
|
||||
Name: k,
|
||||
acts: map[string][]string{},
|
||||
})
|
||||
case []string:
|
||||
res = append(res, &Event{
|
||||
Name: k,
|
||||
acts: map[string][]string{},
|
||||
})
|
||||
case map[string]any:
|
||||
acts := make(map[string][]string, len(t))
|
||||
for act, branches := range t {
|
||||
|
|
@ -286,15 +277,15 @@ func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
|
|||
for i, v := range b {
|
||||
var ok bool
|
||||
if acts[act][i], ok = v.(string); !ok {
|
||||
return nil, fmt.Errorf("unknown on type: %#v", branches)
|
||||
return nil, fmt.Errorf("key %q.%q index %d had unexpected type %[4]T; a string was expected but was %#[4]v", k, act, i, v)
|
||||
}
|
||||
}
|
||||
case map[string]any:
|
||||
if isInvalidOnType(k, act) {
|
||||
return nil, fmt.Errorf("unknown on type: %#v", v)
|
||||
if err := isInvalidOnType(k, act); err != nil {
|
||||
return nil, fmt.Errorf("invalid value on key %q: %w", k, err)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown on type: %#v", branches)
|
||||
return nil, fmt.Errorf("key %q.%q had unexpected type %T; was %#v", k, act, branches, branches)
|
||||
}
|
||||
}
|
||||
if k == "workflow_dispatch" || k == "workflow_call" {
|
||||
|
|
@ -306,19 +297,22 @@ func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
|
|||
})
|
||||
case []any:
|
||||
if k != "schedule" {
|
||||
return nil, fmt.Errorf("unknown on type: %#v", v)
|
||||
return nil, fmt.Errorf("key %q had an type %T; only the 'schedule' key is expected with this type", k, v)
|
||||
}
|
||||
schedules := make([]map[string]string, len(t))
|
||||
for i, tt := range t {
|
||||
vv, ok := tt.(map[string]any)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown on type: %#v", v)
|
||||
return nil, fmt.Errorf("key %q[%d] had unexpected type %[3]T; a map with a key \"cron\" was expected, but value was %#[3]v", k, i, tt)
|
||||
}
|
||||
schedules[i] = make(map[string]string, len(vv))
|
||||
for k, vvv := range vv {
|
||||
for kk, vvv := range vv {
|
||||
if strings.ToLower(kk) != "cron" {
|
||||
return nil, fmt.Errorf("key %q[%d] had unexpected key %q; \"cron\" was expected", k, i, kk)
|
||||
}
|
||||
var ok bool
|
||||
if schedules[i][k], ok = vvv.(string); !ok {
|
||||
return nil, fmt.Errorf("unknown on type: %#v", v)
|
||||
if schedules[i][kk], ok = vvv.(string); !ok {
|
||||
return nil, fmt.Errorf("key %q[%d].%q had unexpected type %[4]T; a string was expected by was %#[4]v", k, i, kk, vvv)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -327,23 +321,29 @@ func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
|
|||
schedules: schedules,
|
||||
})
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown on type: %#v", v)
|
||||
return nil, fmt.Errorf("key %q had unexpected type %[2]T; expected a map or array but was %#[2]v", k, v)
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown on type: %v", rawOn.Kind)
|
||||
return nil, fmt.Errorf("unexpected yaml node in `on`: %v", rawOn.Kind)
|
||||
}
|
||||
}
|
||||
|
||||
func isInvalidOnType(onType, subKey string) bool {
|
||||
if onType == "workflow_dispatch" && subKey == "inputs" {
|
||||
return false
|
||||
func isInvalidOnType(onType, subKey string) error {
|
||||
if onType == "workflow_dispatch" {
|
||||
if subKey == "inputs" {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("workflow_dispatch only supports key \"inputs\", but key %q was found", subKey)
|
||||
}
|
||||
if onType == "workflow_call" && (subKey == "inputs" || subKey == "outputs") {
|
||||
return false
|
||||
if onType == "workflow_call" {
|
||||
if subKey == "inputs" || subKey == "outputs" {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("workflow_call only supports keys \"inputs\" and \"outputs\", but key %q was found", subKey)
|
||||
}
|
||||
return true
|
||||
return fmt.Errorf("unexpected key %q.%q", onType, subKey)
|
||||
}
|
||||
|
||||
// parseMappingNode parse a mapping node and preserve order.
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ func TestParseRawOn(t *testing.T) {
|
|||
kases := []struct {
|
||||
input string
|
||||
result []*Event
|
||||
err string
|
||||
}{
|
||||
{
|
||||
input: "on: issue_comment",
|
||||
|
|
@ -33,7 +34,10 @@ func TestParseRawOn(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
input: "on: [123]",
|
||||
err: "value at index 0 was unexpected type int; must be a string but was 123",
|
||||
},
|
||||
{
|
||||
input: "on:\n - push\n - pull_request",
|
||||
result: []*Event{
|
||||
|
|
@ -45,6 +49,19 @@ func TestParseRawOn(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on: { push: null }",
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "push",
|
||||
acts: map[string][]string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on: { push: 'abc' }",
|
||||
err: "key \"push\" had unexpected type string; expected a map or array but was \"abc\"",
|
||||
},
|
||||
{
|
||||
input: "on:\n push:\n branches:\n - master",
|
||||
result: []*Event{
|
||||
|
|
@ -72,6 +89,10 @@ func TestParseRawOn(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n branch_protection_rule:\n types: [123, deleted]",
|
||||
err: "key \"branch_protection_rule\".\"types\" index 0 had unexpected type int; a string was expected but was 123",
|
||||
},
|
||||
{
|
||||
input: "on:\n project:\n types: [created, deleted]\n milestone:\n types: [opened, deleted]",
|
||||
result: []*Event{
|
||||
|
|
@ -189,6 +210,22 @@ func TestParseRawOn(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n schedule2:\n - cron: '20 6 * * *'",
|
||||
err: "key \"schedule2\" had an type []interface {}; only the 'schedule' key is expected with this type",
|
||||
},
|
||||
{
|
||||
input: "on:\n schedule:\n - 123",
|
||||
err: "key \"schedule\"[0] had unexpected type int; a map with a key \"cron\" was expected, but value was 123",
|
||||
},
|
||||
{
|
||||
input: "on:\n schedule:\n - corn: '20 6 * * *'",
|
||||
err: "key \"schedule\"[0] had unexpected key \"corn\"; \"cron\" was expected",
|
||||
},
|
||||
{
|
||||
input: "on:\n schedule:\n - cron: 123",
|
||||
err: "key \"schedule\"[0].\"cron\" had unexpected type int; a string was expected by was 123",
|
||||
},
|
||||
{
|
||||
input: `
|
||||
on:
|
||||
|
|
@ -222,15 +259,37 @@ on:
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: `
|
||||
on:
|
||||
workflow_call:
|
||||
mistake:
|
||||
access-token:
|
||||
description: 'A token passed from the caller workflow'
|
||||
required: false
|
||||
`,
|
||||
err: "invalid value on key \"workflow_call\": workflow_call only supports keys \"inputs\" and \"outputs\", but key \"mistake\" was found",
|
||||
},
|
||||
{
|
||||
input: `
|
||||
on:
|
||||
workflow_call: { map: 123 }
|
||||
`,
|
||||
err: "key \"workflow_call\".\"map\" had unexpected type int; was 123",
|
||||
},
|
||||
}
|
||||
for _, kase := range kases {
|
||||
t.Run(kase.input, func(t *testing.T) {
|
||||
origin, err := model.ReadWorkflow(strings.NewReader(kase.input), false)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
events, err := ParseRawOn(&origin.RawOn)
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, kase.result, events, fmt.Sprintf("%#v", events))
|
||||
if kase.err != "" {
|
||||
assert.ErrorContains(t, err, kase.err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, kase.result, events, fmt.Sprintf("%#v", events))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,6 +11,8 @@ LXC_IPV6_PREFIX_DEFAULT="fd15"
|
|||
LXC_DOCKER_PREFIX_DEFAULT="172.17"
|
||||
LXC_IPV6_DOCKER_PREFIX_DEFAULT="fd00:d0ca"
|
||||
LXC_APT_TOO_OLD='1 week ago'
|
||||
: ${LXC_TRANSACTION_TIMEOUT:=600}
|
||||
LXC_TRANSACTION_LOCK_FILE=/tmp/lxc-helper.lock
|
||||
|
||||
: ${LXC_SUDO:=}
|
||||
: ${LXC_CONTAINER_RELEASE:=bookworm}
|
||||
|
|
@ -28,16 +30,22 @@ function lxc_template_release() {
|
|||
echo lxc-helpers-$LXC_CONTAINER_RELEASE
|
||||
}
|
||||
|
||||
function lxc_directory() {
|
||||
local name="$1"
|
||||
|
||||
echo /var/lib/lxc/$name
|
||||
}
|
||||
|
||||
function lxc_root() {
|
||||
local name="$1"
|
||||
|
||||
echo /var/lib/lxc/$name/rootfs
|
||||
echo $(lxc_directory $name)/rootfs
|
||||
}
|
||||
|
||||
function lxc_config() {
|
||||
local name="$1"
|
||||
|
||||
echo /var/lib/lxc/$name/config
|
||||
echo $(lxc_directory $name)/config
|
||||
}
|
||||
|
||||
function lxc_container_run() {
|
||||
|
|
@ -47,6 +55,44 @@ function lxc_container_run() {
|
|||
$LXC_SUDO lxc-attach --clear-env --name $name -- "$@"
|
||||
}
|
||||
|
||||
function lxc_transaction_lock() {
|
||||
exec 7>$LXC_TRANSACTION_LOCK_FILE
|
||||
flock --timeout $LXC_TRANSACTION_TIMEOUT 7
|
||||
}
|
||||
|
||||
function lxc_transaction_unlock() {
|
||||
exec 7>&-
|
||||
}
|
||||
|
||||
function lxc_transaction_draft_name() {
|
||||
echo "lxc-helper-draft"
|
||||
}
|
||||
|
||||
function lxc_transaction_begin() {
|
||||
local name=$1 # not actually used but it helps when reading in the caller
|
||||
local draft=$(lxc_transaction_draft_name)
|
||||
|
||||
lxc_transaction_lock
|
||||
lxc_container_destroy $draft
|
||||
|
||||
echo $draft
|
||||
}
|
||||
|
||||
function lxc_transaction_commit() {
|
||||
local name=$1
|
||||
local draft=$(lxc_transaction_draft_name)
|
||||
|
||||
# do not use lxc-copy because it is not atomic if lxc-copy is
|
||||
# interrupted it may leave the $name container half populated
|
||||
$LXC_SUDO sed -i -e "s/$draft/$name/g" \
|
||||
$(lxc_config $draft) \
|
||||
$(lxc_root $draft)/etc/hosts \
|
||||
$(lxc_root $draft)/etc/hostname
|
||||
$LXC_SUDO rm -f $(lxc_root $draft)/var/lib/dhcp/dhclient.*
|
||||
$LXC_SUDO mv $(lxc_directory $draft) $(lxc_directory $name)
|
||||
lxc_transaction_unlock
|
||||
}
|
||||
|
||||
function lxc_container_run_script_as() {
|
||||
local name="$1"
|
||||
local user="$2"
|
||||
|
|
@ -242,7 +288,7 @@ function lxc_container_configure() {
|
|||
function lxc_container_install_lxc_helpers() {
|
||||
local name="$1"
|
||||
|
||||
$LXC_SUDO cp -a $LXC_SELF_DIR/lxc-helpers*.sh $root/$LXC_BIN
|
||||
$LXC_SUDO cp -a $LXC_SELF_DIR/lxc-helpers*.sh $(lxc_root $name)/$LXC_BIN
|
||||
#
|
||||
# Wait for the network to come up
|
||||
#
|
||||
|
|
@ -304,10 +350,9 @@ function lxc_container_stop() {
|
|||
|
||||
function lxc_container_destroy() {
|
||||
local name="$1"
|
||||
local root="$2"
|
||||
|
||||
if lxc_exists "$name"; then
|
||||
lxc_container_stop $name $root
|
||||
lxc_container_stop $name
|
||||
$LXC_SUDO lxc-destroy --force --name="$name"
|
||||
fi
|
||||
}
|
||||
|
|
@ -346,14 +391,15 @@ function lxc_build_template_release() {
|
|||
return
|
||||
fi
|
||||
|
||||
local root=$(lxc_root $name)
|
||||
$LXC_SUDO lxc-create --name $name --template debian -- --release=$LXC_CONTAINER_RELEASE
|
||||
echo 'lxc.apparmor.profile = unconfined' | $LXC_SUDO tee -a $(lxc_config $name)
|
||||
lxc_container_install_lxc_helpers $name
|
||||
lxc_container_start $name
|
||||
lxc_container_run $name apt-get update -qq
|
||||
lxc_apt_install $name sudo git python3
|
||||
lxc_container_stop $name
|
||||
local draft=$(lxc_transaction_begin $name)
|
||||
$LXC_SUDO lxc-create --name $draft --template debian -- --release=$LXC_CONTAINER_RELEASE
|
||||
echo 'lxc.apparmor.profile = unconfined' | $LXC_SUDO tee -a $(lxc_config $draft)
|
||||
lxc_container_install_lxc_helpers $draft
|
||||
lxc_container_start $draft
|
||||
lxc_container_run $draft apt-get update -qq
|
||||
lxc_apt_install $draft sudo git python3
|
||||
lxc_container_stop $draft
|
||||
lxc_transaction_commit $name
|
||||
}
|
||||
|
||||
function lxc_build_template() {
|
||||
|
|
@ -368,10 +414,12 @@ function lxc_build_template() {
|
|||
lxc_build_template_release
|
||||
fi
|
||||
|
||||
if ! $LXC_SUDO lxc-copy --name=$name --newname=$newname; then
|
||||
echo lxc-copy --name=$name --newname=$newname failed
|
||||
local draft=$(lxc_transaction_begin $newname)
|
||||
if ! $LXC_SUDO lxc-copy --name=$name --newname=$draft; then
|
||||
echo lxc-copy --name=$name --newname=$draft failed
|
||||
return 1
|
||||
fi
|
||||
lxc_transaction_commit $newname
|
||||
lxc_container_configure $newname
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -193,8 +193,6 @@ var lxcHelpers string
|
|||
|
||||
var startTemplate = template.Must(template.New("start").Parse(`#!/bin/bash -e
|
||||
|
||||
exec 5<>/tmp/forgejo-runner-lxc.lock ; flock --timeout 21600 5
|
||||
|
||||
LXC_CONTAINER_CONFIG="{{.Config}}"
|
||||
LXC_CONTAINER_RELEASE="{{.Release}}"
|
||||
|
||||
|
|
@ -248,6 +246,8 @@ var stopTemplate = template.Must(template.New("stop").Parse(`#!/bin/bash
|
|||
source $(dirname $0)/lxc-helpers-lib.sh
|
||||
|
||||
lxc_container_destroy "{{.Name}}"
|
||||
lxc_maybe_sudo
|
||||
$LXC_SUDO rm -fr "{{ .Root }}"
|
||||
`))
|
||||
|
||||
func (rc *RunContext) stopHostEnvironment(ctx context.Context) error {
|
||||
|
|
@ -314,11 +314,8 @@ func (rc *RunContext) startHostEnvironment() common.Executor {
|
|||
ToolCache: rc.getToolCache(ctx),
|
||||
Workdir: rc.Config.Workdir,
|
||||
ActPath: actPath,
|
||||
CleanUp: func() {
|
||||
os.RemoveAll(miscpath)
|
||||
},
|
||||
StdOut: logWriter,
|
||||
LXC: rc.IsLXCHostEnv(ctx),
|
||||
StdOut: logWriter,
|
||||
LXC: rc.IsLXCHostEnv(ctx),
|
||||
}
|
||||
rc.cleanUpJobContainer = func(ctx context.Context) error {
|
||||
if err := rc.stopHostEnvironment(ctx); err != nil {
|
||||
|
|
@ -949,7 +946,7 @@ func (rc *RunContext) Executor() (common.Executor, error) {
|
|||
return err
|
||||
}
|
||||
if res {
|
||||
timeoutctx, cancelTimeOut := evaluateTimeout(ctx, rc.ExprEval, rc.Run.Job().TimeoutMinutes)
|
||||
timeoutctx, cancelTimeOut := evaluateTimeout(ctx, "job", rc.ExprEval, rc.Run.Job().TimeoutMinutes)
|
||||
defer cancelTimeOut()
|
||||
|
||||
return executor(timeoutctx)
|
||||
|
|
|
|||
|
|
@ -177,7 +177,7 @@ func runStepExecutor(step step, stage stepStage, executor common.Executor) commo
|
|||
Mode: 0o666,
|
||||
})(ctx)
|
||||
|
||||
timeoutctx, cancelTimeOut := evaluateTimeout(ctx, rc.ExprEval, stepModel.TimeoutMinutes)
|
||||
timeoutctx, cancelTimeOut := evaluateTimeout(ctx, "step", rc.ExprEval, stepModel.TimeoutMinutes)
|
||||
defer cancelTimeOut()
|
||||
err = executor(timeoutctx)
|
||||
|
||||
|
|
@ -213,12 +213,12 @@ func runStepExecutor(step step, stage stepStage, executor common.Executor) commo
|
|||
}
|
||||
}
|
||||
|
||||
func evaluateTimeout(ctx context.Context, exprEval ExpressionEvaluator, timeoutMinutes string) (context.Context, context.CancelFunc) {
|
||||
func evaluateTimeout(ctx context.Context, contextType string, exprEval ExpressionEvaluator, timeoutMinutes string) (context.Context, context.CancelFunc) {
|
||||
timeout := exprEval.Interpolate(ctx, timeoutMinutes)
|
||||
if timeout != "" {
|
||||
timeOutMinutes, err := strconv.ParseInt(timeout, 10, 64)
|
||||
if err == nil {
|
||||
common.Logger(ctx).Debugf("the step will stop in timeout-minutes %s", timeout)
|
||||
common.Logger(ctx).Debugf("the %s will stop in timeout-minutes %s", contextType, timeout)
|
||||
return context.WithTimeout(ctx, time.Duration(timeOutMinutes)*time.Minute)
|
||||
}
|
||||
common.Logger(ctx).Errorf("timeout-minutes %s cannot be parsed and will be ignored: %w", timeout, err)
|
||||
|
|
|
|||
|
|
@ -51,7 +51,7 @@ services:
|
|||
- 8080:3000
|
||||
|
||||
runner-register:
|
||||
image: code.forgejo.org/forgejo/runner:11.1.1
|
||||
image: code.forgejo.org/forgejo/runner:11.1.2
|
||||
links:
|
||||
- docker-in-docker
|
||||
- forgejo
|
||||
|
|
@ -77,7 +77,7 @@ services:
|
|||
'
|
||||
|
||||
runner-daemon:
|
||||
image: code.forgejo.org/forgejo/runner:11.1.1
|
||||
image: code.forgejo.org/forgejo/runner:11.1.2
|
||||
links:
|
||||
- docker-in-docker
|
||||
- forgejo
|
||||
|
|
|
|||
|
|
@ -20,9 +20,9 @@ trap "rm -fr $TMPDIR" EXIT
|
|||
: ${INPUTS_TOKEN:=}
|
||||
: ${INPUTS_FORGEJO:=https://code.forgejo.org}
|
||||
: ${INPUTS_LIFETIME:=7d}
|
||||
DEFAULT_LXC_HELPERS_VERSION=1.1.0 # renovate: datasource=forgejo-tags depName=forgejo/lxc-helpers
|
||||
DEFAULT_LXC_HELPERS_VERSION=1.1.1 # renovate: datasource=forgejo-tags depName=forgejo/lxc-helpers
|
||||
: ${INPUTS_LXC_HELPERS_VERSION:=$DEFAULT_LXC_HELPERS_VERSION}
|
||||
DEFAULT_RUNNER_VERSION=11.1.1 # renovate: datasource=forgejo-releases depName=forgejo/runner
|
||||
DEFAULT_RUNNER_VERSION=11.1.2 # renovate: datasource=forgejo-releases depName=forgejo/runner
|
||||
: ${INPUTS_RUNNER_VERSION:=$DEFAULT_RUNNER_VERSION}
|
||||
|
||||
: ${KILL_AFTER:=21600} # 6h == 21600
|
||||
|
|
|
|||
8
go.mod
8
go.mod
|
|
@ -2,7 +2,7 @@ module code.forgejo.org/forgejo/runner/v11
|
|||
|
||||
go 1.24.0
|
||||
|
||||
toolchain go1.24.7
|
||||
toolchain go1.24.8
|
||||
|
||||
require (
|
||||
code.forgejo.org/forgejo/actions-proto v0.5.2
|
||||
|
|
@ -13,11 +13,11 @@ require (
|
|||
github.com/containerd/errdefs v1.0.0
|
||||
github.com/creack/pty v1.1.24
|
||||
github.com/distribution/reference v0.6.0
|
||||
github.com/docker/cli v28.4.0+incompatible
|
||||
github.com/docker/cli v28.5.0+incompatible
|
||||
github.com/docker/docker v28.4.0+incompatible
|
||||
github.com/docker/go-connections v0.6.0
|
||||
github.com/go-git/go-billy/v5 v5.6.2
|
||||
github.com/go-git/go-git/v5 v5.16.2
|
||||
github.com/go-git/go-git/v5 v5.16.3
|
||||
github.com/gobwas/glob v0.2.3
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/google/uuid v1.6.0
|
||||
|
|
@ -39,7 +39,7 @@ require (
|
|||
go.yaml.in/yaml/v3 v3.0.4
|
||||
golang.org/x/term v0.35.0
|
||||
golang.org/x/time v0.13.0
|
||||
google.golang.org/protobuf v1.36.9
|
||||
google.golang.org/protobuf v1.36.10
|
||||
gotest.tools/v3 v3.5.2
|
||||
)
|
||||
|
||||
|
|
|
|||
12
go.sum
12
go.sum
|
|
@ -43,8 +43,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
|||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/cli v28.4.0+incompatible h1:RBcf3Kjw2pMtwui5V0DIMdyeab8glEw5QY0UUU4C9kY=
|
||||
github.com/docker/cli v28.4.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli v28.5.0+incompatible h1:crVqLrtKsrhC9c00ythRx435H8LiQnUKRtJLRR+Auxk=
|
||||
github.com/docker/cli v28.5.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/docker v28.4.0+incompatible h1:KVC7bz5zJY/4AZe/78BIvCnPsLaC9T/zh72xnlrTTOk=
|
||||
github.com/docker/docker v28.4.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8=
|
||||
|
|
@ -69,8 +69,8 @@ github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UN
|
|||
github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU=
|
||||
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=
|
||||
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII=
|
||||
github.com/go-git/go-git/v5 v5.16.2 h1:fT6ZIOjE5iEnkzKyxTHK1W4HGAsPhqEqiSAssSO77hM=
|
||||
github.com/go-git/go-git/v5 v5.16.2/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8=
|
||||
github.com/go-git/go-git/v5 v5.16.3 h1:Z8BtvxZ09bYm/yYNgPKCzgWtaRqDTgIKRgIRHBfU6Z8=
|
||||
github.com/go-git/go-git/v5 v5.16.3/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
|
|
@ -260,8 +260,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:
|
|||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
|
||||
google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw=
|
||||
google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
|
||||
google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw=
|
||||
google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
|
||||
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
|
||||
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
|
|
|
|||
|
|
@ -90,10 +90,10 @@ func (p *poller) Shutdown(ctx context.Context) error {
|
|||
return nil
|
||||
|
||||
case <-ctx.Done():
|
||||
log.Trace("forcing the jobs to shutdown")
|
||||
log.Info("forcing the jobs to shutdown")
|
||||
p.shutdownJobs()
|
||||
<-p.done
|
||||
log.Trace("all jobs have been shutdown")
|
||||
log.Info("all jobs have been shutdown")
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -123,7 +123,7 @@ func setupCache(cfg *config.Config, envs map[string]string) *cacheproxy.Handler
|
|||
log.StandardLogger().WithField("module", "cache_request"),
|
||||
)
|
||||
if err != nil {
|
||||
log.Error("Could not start the cache server, cache will be disabled")
|
||||
log.Errorf("Could not start the cache server, cache will be disabled: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -150,10 +150,10 @@ func setupCache(cfg *config.Config, envs map[string]string) *cacheproxy.Handler
|
|||
)
|
||||
if err != nil {
|
||||
log.Errorf("cannot init cache proxy, cache will be disabled: %v", err)
|
||||
} else {
|
||||
envs["ACTIONS_CACHE_URL"] = cacheProxy.ExternalURL()
|
||||
}
|
||||
|
||||
envs["ACTIONS_CACHE_URL"] = cacheProxy.ExternalURL()
|
||||
|
||||
return cacheProxy
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
|
@ -421,6 +422,98 @@ jobs:
|
|||
})
|
||||
}
|
||||
|
||||
func TestRunnerCacheStartupFailure(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test")
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
desc string
|
||||
listen string
|
||||
}{
|
||||
{
|
||||
desc: "disable cache server",
|
||||
listen: "127.0.0.1:40715",
|
||||
},
|
||||
{
|
||||
desc: "disable cache proxy server",
|
||||
listen: "127.0.0.1:40716",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
forgejoClient := &forgejoClientMock{}
|
||||
|
||||
forgejoClient.On("Address").Return("https://127.0.0.1:8080") // not expected to be used in this test
|
||||
forgejoClient.On("UpdateLog", mock.Anything, mock.Anything).Return(nil, nil)
|
||||
forgejoClient.On("UpdateTask", mock.Anything, mock.Anything).
|
||||
Return(connect.NewResponse(&runnerv1.UpdateTaskResponse{}), nil)
|
||||
|
||||
// We'll be listening on some network port in this test that will conflict with the cache configuration...
|
||||
l, err := net.Listen("tcp4", tc.listen)
|
||||
require.NoError(t, err)
|
||||
defer l.Close()
|
||||
|
||||
runner := NewRunner(
|
||||
&config.Config{
|
||||
Cache: config.Cache{
|
||||
Port: 40715,
|
||||
ProxyPort: 40716,
|
||||
Dir: t.TempDir(),
|
||||
},
|
||||
Host: config.Host{
|
||||
WorkdirParent: t.TempDir(),
|
||||
},
|
||||
},
|
||||
&config.Registration{
|
||||
Labels: []string{"ubuntu-latest:docker://code.forgejo.org/oci/node:20-bookworm"},
|
||||
},
|
||||
forgejoClient)
|
||||
require.NotNil(t, runner)
|
||||
|
||||
// Ensure that cacheProxy failed to start
|
||||
assert.Nil(t, runner.cacheProxy)
|
||||
|
||||
runWorkflow := func(ctx context.Context, cancel context.CancelFunc, yamlContent string) {
|
||||
task := &runnerv1.Task{
|
||||
WorkflowPayload: []byte(yamlContent),
|
||||
Context: &structpb.Struct{
|
||||
Fields: map[string]*structpb.Value{
|
||||
"token": structpb.NewStringValue("some token here"),
|
||||
"forgejo_default_actions_url": structpb.NewStringValue("https://data.forgejo.org"),
|
||||
"repository": structpb.NewStringValue("runner"),
|
||||
"event_name": structpb.NewStringValue("push"),
|
||||
"ref": structpb.NewStringValue("refs/heads/main"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
reporter := report.NewReporter(ctx, cancel, forgejoClient, task, time.Second)
|
||||
err := runner.run(ctx, task, reporter)
|
||||
reporter.Close(nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
defer cancel()
|
||||
|
||||
checkCacheYaml := `
|
||||
name: Verify No ACTIONS_CACHE_URL
|
||||
on:
|
||||
push:
|
||||
jobs:
|
||||
job-cache-check-1:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: echo $ACTIONS_CACHE_URL
|
||||
- run: '[[ "$ACTIONS_CACHE_URL" = "" ]] || exit 1'
|
||||
`
|
||||
runWorkflow(ctx, cancel, checkCacheYaml)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunnerLXC(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test")
|
||||
|
|
@ -491,7 +584,7 @@ jobs:
|
|||
job:
|
||||
runs-on: lxc
|
||||
steps:
|
||||
- run: echo OK
|
||||
- run: mkdir -p some/directory/owned/by/root
|
||||
`
|
||||
runWorkflow(ctx, cancel, workflow, "push", "refs/heads/main", "OK")
|
||||
})
|
||||
|
|
|
|||
|
|
@ -110,25 +110,20 @@ cache:
|
|||
#
|
||||
external_server: ""
|
||||
#
|
||||
#######################################################################
|
||||
#
|
||||
# Common to the internal and external cache server
|
||||
#
|
||||
#######################################################################
|
||||
#
|
||||
# The shared cache secret used to secure the communications between
|
||||
# the cache proxy and the cache server.
|
||||
#
|
||||
# If empty, it will be generated to a new secret automatically when
|
||||
# the server starts and it will stay the same until it restarts.
|
||||
#
|
||||
# Every time the secret is modified, all cache entries that were
|
||||
# created with it are invalidated. In order to ensure that the cache
|
||||
# content is reused when the runner restarts, this secret must be
|
||||
# set, for instance with the output of openssl rand -hex 40.
|
||||
#
|
||||
secret: ""
|
||||
#
|
||||
#######################################################################
|
||||
#
|
||||
# Common to the internal and external cache server
|
||||
#
|
||||
#######################################################################
|
||||
#
|
||||
# The IP or hostname (195.84.20.30 or example.com) to use when constructing
|
||||
# ACTIONS_CACHE_URL which is the URL of the cache proxy.
|
||||
#
|
||||
|
|
@ -181,10 +176,12 @@ container:
|
|||
# valid_volumes:
|
||||
# - '**'
|
||||
valid_volumes: []
|
||||
# overrides the docker client host with the specified one.
|
||||
# If "-" or "", an available docker host will automatically be found.
|
||||
# Overrides the docker host set by the DOCKER_HOST environment variable, and mounts on the job container.
|
||||
# If "-" or "", no docker host will be mounted in the job container
|
||||
# If "automount", an available docker host will automatically be found and mounted in the job container (e.g. /var/run/docker.sock).
|
||||
# Otherwise the specified docker host will be used and an error will be returned if it doesn't work.
|
||||
# If it's a url, the specified docker host will be mounted in the job container
|
||||
# Example urls: unix:///run/docker.socket or ssh://user@host
|
||||
# The specified socket is mounted within the job container at /var/run/docker.sock
|
||||
docker_host: "-"
|
||||
# Pull docker image(s) even if already present
|
||||
force_pull: false
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ func (o *masker) add(secret string) {
|
|||
})
|
||||
// a multiline secret transformed into a single line by replacing
|
||||
// newlines with \ followed by n must also be redacted
|
||||
secret = strings.Join(lines, "\\n")
|
||||
o.lines = append(o.lines, strings.Join(lines, "\\n"))
|
||||
}
|
||||
|
||||
o.lines = append(o.lines, secret)
|
||||
|
|
|
|||
|
|
@ -7,6 +7,8 @@ import (
|
|||
"fmt"
|
||||
"testing"
|
||||
|
||||
runnerv1 "code.forgejo.org/forgejo/actions-proto/runner/v1"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
|
|
@ -267,4 +269,17 @@ SIX`
|
|||
assert.Equal(t, testCase.out, rowsToString(rows))
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("MultilineSecretInSingleRow", func(t *testing.T) {
|
||||
secret := "ABC\nDEF\nGHI"
|
||||
m := newMasker()
|
||||
m.add(secret)
|
||||
rows := []*runnerv1.LogRow{
|
||||
{Content: fmt.Sprintf("BEFORE%sAFTER", secret)},
|
||||
}
|
||||
noMore := false
|
||||
needMore := m.replace(rows, noMore)
|
||||
assert.False(t, needMore)
|
||||
assert.Equal(t, "BEFORE***AFTER\n", rowsToString(rows))
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -48,6 +48,7 @@ type Reporter struct {
|
|||
|
||||
debugOutputEnabled bool
|
||||
stopCommandEndToken string
|
||||
issuedLocalCancel bool
|
||||
}
|
||||
|
||||
func NewReporter(ctx context.Context, cancel context.CancelFunc, c client.Client, task *runnerv1.Task, reportInterval time.Duration) *Reporter {
|
||||
|
|
@ -192,11 +193,19 @@ func (r *Reporter) RunDaemon() {
|
|||
return
|
||||
}
|
||||
if r.ctx.Err() != nil {
|
||||
// This shouldn't happen because DaemonContext is used for `r.ctx` which should outlive any running job.
|
||||
log.Warnf("Terminating RunDaemon on an active job due to error: %v", r.ctx.Err())
|
||||
return
|
||||
}
|
||||
|
||||
_ = r.ReportLog(false)
|
||||
_ = r.ReportState()
|
||||
err := r.ReportLog(false)
|
||||
if err != nil {
|
||||
log.Warnf("ReportLog error: %v", err)
|
||||
}
|
||||
err = r.ReportState()
|
||||
if err != nil {
|
||||
log.Warnf("ReportState error: %v", err)
|
||||
}
|
||||
|
||||
time.AfterFunc(r.reportInterval, r.RunDaemon)
|
||||
}
|
||||
|
|
@ -391,8 +400,17 @@ func (r *Reporter) ReportState() error {
|
|||
r.outputs.Store(k, struct{}{})
|
||||
}
|
||||
|
||||
switch resp.Msg.GetState().GetResult() {
|
||||
localResultState := state.GetResult()
|
||||
remoteResultState := resp.Msg.GetState().GetResult()
|
||||
switch remoteResultState {
|
||||
case runnerv1.Result_RESULT_CANCELLED, runnerv1.Result_RESULT_FAILURE:
|
||||
// issuedLocalCancel is just used to deduplicate this log message if our local state doesn't catch up with our
|
||||
// remote state as quickly as the report-interval, which would cause this message to repeat in the logs.
|
||||
if !r.issuedLocalCancel && remoteResultState != localResultState {
|
||||
log.Infof("UpdateTask returned task result %v for a task that was in local state %v - beginning local task termination",
|
||||
remoteResultState, localResultState)
|
||||
r.issuedLocalCancel = true
|
||||
}
|
||||
r.cancel()
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue