2025-05-20 09:40:15 +00:00
|
|
|
// Copyright 2025 Cloudbase Solutions SRL
|
|
|
|
|
//
|
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
|
|
|
// not use this file except in compliance with the License. You may obtain
|
|
|
|
|
// a copy of the License at
|
|
|
|
|
//
|
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
//
|
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
|
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
|
// License for the specific language governing permissions and limitations
|
|
|
|
|
// under the License.
|
|
|
|
|
|
2023-06-30 00:57:45 +03:00
|
|
|
package pool
|
|
|
|
|
|
2023-06-27 11:50:04 +00:00
|
|
|
import (
|
|
|
|
|
"sort"
|
|
|
|
|
"strings"
|
|
|
|
|
"sync"
|
|
|
|
|
"sync/atomic"
|
2025-05-14 21:09:02 +00:00
|
|
|
"time"
|
2023-06-27 11:50:04 +00:00
|
|
|
|
2025-06-17 21:03:46 +00:00
|
|
|
"github.com/google/go-github/v72/github"
|
2024-04-01 14:48:31 +00:00
|
|
|
|
2023-07-22 22:26:47 +00:00
|
|
|
runnerErrors "github.com/cloudbase/garm-provider-common/errors"
|
2024-04-01 14:48:31 +00:00
|
|
|
commonParams "github.com/cloudbase/garm-provider-common/params"
|
2025-05-14 21:09:02 +00:00
|
|
|
"github.com/cloudbase/garm/cache"
|
2024-06-20 15:28:56 +00:00
|
|
|
dbCommon "github.com/cloudbase/garm/database/common"
|
|
|
|
|
"github.com/cloudbase/garm/database/watcher"
|
2023-06-27 11:50:04 +00:00
|
|
|
"github.com/cloudbase/garm/params"
|
|
|
|
|
)
|
|
|
|
|
|
Add pool balancing strategy
This change adds the ability to specify the pool balancing strategy to
use when processing queued jobs. Before this change, GARM would round-robin
through all pools that matched the set of tags requested by queued jobs.
When round-robin (default) is used for an entity (repo, org or enterprise)
and you have 2 pools defined for that entity with a common set of tags that
match 10 jobs (for example), then those jobs would trigger the creation of
a new runner in each of the two pools in turn. Job 1 would go to pool 1,
job 2 would go to pool 2, job 3 to pool 1, job 4 to pool 2 and so on.
When "stack" is used, those same 10 jobs would trigger the creation of a
new runner in the pool with the highest priority, every time.
In both cases, if a pool is full, the next one would be tried automatically.
For the stack case, this would mean that if pool 2 had a priority of 10 and
pool 1 would have a priority of 5, pool 2 would be saturated first, then
pool 1.
Signed-off-by: Gabriel Adrian Samfira <gsamfira@cloudbasesolutions.com>
2024-03-14 20:04:34 +00:00
|
|
|
type poolCacheStore interface {
|
|
|
|
|
Next() (params.Pool, error)
|
|
|
|
|
Reset()
|
|
|
|
|
Len() int
|
|
|
|
|
}
|
|
|
|
|
|
2023-06-27 11:50:04 +00:00
|
|
|
type poolRoundRobin struct {
|
|
|
|
|
pools []params.Pool
|
|
|
|
|
next uint32
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (p *poolRoundRobin) Next() (params.Pool, error) {
|
|
|
|
|
if len(p.pools) == 0 {
|
|
|
|
|
return params.Pool{}, runnerErrors.ErrNoPoolsAvailable
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
n := atomic.AddUint32(&p.next, 1)
|
|
|
|
|
return p.pools[(int(n)-1)%len(p.pools)], nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (p *poolRoundRobin) Len() int {
|
|
|
|
|
return len(p.pools)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (p *poolRoundRobin) Reset() {
|
|
|
|
|
atomic.StoreUint32(&p.next, 0)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type poolsForTags struct {
|
Add pool balancing strategy
This change adds the ability to specify the pool balancing strategy to
use when processing queued jobs. Before this change, GARM would round-robin
through all pools that matched the set of tags requested by queued jobs.
When round-robin (default) is used for an entity (repo, org or enterprise)
and you have 2 pools defined for that entity with a common set of tags that
match 10 jobs (for example), then those jobs would trigger the creation of
a new runner in each of the two pools in turn. Job 1 would go to pool 1,
job 2 would go to pool 2, job 3 to pool 1, job 4 to pool 2 and so on.
When "stack" is used, those same 10 jobs would trigger the creation of a
new runner in the pool with the highest priority, every time.
In both cases, if a pool is full, the next one would be tried automatically.
For the stack case, this would mean that if pool 2 had a priority of 10 and
pool 1 would have a priority of 5, pool 2 would be saturated first, then
pool 1.
Signed-off-by: Gabriel Adrian Samfira <gsamfira@cloudbasesolutions.com>
2024-03-14 20:04:34 +00:00
|
|
|
pools sync.Map
|
|
|
|
|
poolCacheType params.PoolBalancerType
|
2023-06-27 11:50:04 +00:00
|
|
|
}
|
|
|
|
|
|
Add pool balancing strategy
This change adds the ability to specify the pool balancing strategy to
use when processing queued jobs. Before this change, GARM would round-robin
through all pools that matched the set of tags requested by queued jobs.
When round-robin (default) is used for an entity (repo, org or enterprise)
and you have 2 pools defined for that entity with a common set of tags that
match 10 jobs (for example), then those jobs would trigger the creation of
a new runner in each of the two pools in turn. Job 1 would go to pool 1,
job 2 would go to pool 2, job 3 to pool 1, job 4 to pool 2 and so on.
When "stack" is used, those same 10 jobs would trigger the creation of a
new runner in the pool with the highest priority, every time.
In both cases, if a pool is full, the next one would be tried automatically.
For the stack case, this would mean that if pool 2 had a priority of 10 and
pool 1 would have a priority of 5, pool 2 would be saturated first, then
pool 1.
Signed-off-by: Gabriel Adrian Samfira <gsamfira@cloudbasesolutions.com>
2024-03-14 20:04:34 +00:00
|
|
|
func (p *poolsForTags) Get(tags []string) (poolCacheStore, bool) {
|
2023-06-27 11:50:04 +00:00
|
|
|
sort.Strings(tags)
|
|
|
|
|
key := strings.Join(tags, "^")
|
|
|
|
|
|
|
|
|
|
v, ok := p.pools.Load(key)
|
|
|
|
|
if !ok {
|
|
|
|
|
return nil, false
|
|
|
|
|
}
|
Add pool balancing strategy
This change adds the ability to specify the pool balancing strategy to
use when processing queued jobs. Before this change, GARM would round-robin
through all pools that matched the set of tags requested by queued jobs.
When round-robin (default) is used for an entity (repo, org or enterprise)
and you have 2 pools defined for that entity with a common set of tags that
match 10 jobs (for example), then those jobs would trigger the creation of
a new runner in each of the two pools in turn. Job 1 would go to pool 1,
job 2 would go to pool 2, job 3 to pool 1, job 4 to pool 2 and so on.
When "stack" is used, those same 10 jobs would trigger the creation of a
new runner in the pool with the highest priority, every time.
In both cases, if a pool is full, the next one would be tried automatically.
For the stack case, this would mean that if pool 2 had a priority of 10 and
pool 1 would have a priority of 5, pool 2 would be saturated first, then
pool 1.
Signed-off-by: Gabriel Adrian Samfira <gsamfira@cloudbasesolutions.com>
2024-03-14 20:04:34 +00:00
|
|
|
poolCache := v.(*poolRoundRobin)
|
2024-03-15 07:26:04 +00:00
|
|
|
if p.poolCacheType == params.PoolBalancerTypePack {
|
Add pool balancing strategy
This change adds the ability to specify the pool balancing strategy to
use when processing queued jobs. Before this change, GARM would round-robin
through all pools that matched the set of tags requested by queued jobs.
When round-robin (default) is used for an entity (repo, org or enterprise)
and you have 2 pools defined for that entity with a common set of tags that
match 10 jobs (for example), then those jobs would trigger the creation of
a new runner in each of the two pools in turn. Job 1 would go to pool 1,
job 2 would go to pool 2, job 3 to pool 1, job 4 to pool 2 and so on.
When "stack" is used, those same 10 jobs would trigger the creation of a
new runner in the pool with the highest priority, every time.
In both cases, if a pool is full, the next one would be tried automatically.
For the stack case, this would mean that if pool 2 had a priority of 10 and
pool 1 would have a priority of 5, pool 2 would be saturated first, then
pool 1.
Signed-off-by: Gabriel Adrian Samfira <gsamfira@cloudbasesolutions.com>
2024-03-14 20:04:34 +00:00
|
|
|
// When we service a list of jobs, we want to try each pool in turn
|
|
|
|
|
// for each job. Pools are sorted by priority so we always start from the
|
|
|
|
|
// highest priority pool and move on to the next if the first one is full.
|
|
|
|
|
poolCache.Reset()
|
|
|
|
|
}
|
|
|
|
|
return poolCache, true
|
2023-06-27 11:50:04 +00:00
|
|
|
}
|
|
|
|
|
|
2024-03-15 14:35:05 +00:00
|
|
|
func (p *poolsForTags) Add(tags []string, pools []params.Pool) poolCacheStore {
|
Add pool balancing strategy
This change adds the ability to specify the pool balancing strategy to
use when processing queued jobs. Before this change, GARM would round-robin
through all pools that matched the set of tags requested by queued jobs.
When round-robin (default) is used for an entity (repo, org or enterprise)
and you have 2 pools defined for that entity with a common set of tags that
match 10 jobs (for example), then those jobs would trigger the creation of
a new runner in each of the two pools in turn. Job 1 would go to pool 1,
job 2 would go to pool 2, job 3 to pool 1, job 4 to pool 2 and so on.
When "stack" is used, those same 10 jobs would trigger the creation of a
new runner in the pool with the highest priority, every time.
In both cases, if a pool is full, the next one would be tried automatically.
For the stack case, this would mean that if pool 2 had a priority of 10 and
pool 1 would have a priority of 5, pool 2 would be saturated first, then
pool 1.
Signed-off-by: Gabriel Adrian Samfira <gsamfira@cloudbasesolutions.com>
2024-03-14 20:04:34 +00:00
|
|
|
sort.Slice(pools, func(i, j int) bool {
|
|
|
|
|
return pools[i].Priority > pools[j].Priority
|
|
|
|
|
})
|
|
|
|
|
|
2023-06-27 11:50:04 +00:00
|
|
|
sort.Strings(tags)
|
|
|
|
|
key := strings.Join(tags, "^")
|
|
|
|
|
|
|
|
|
|
poolRR := &poolRoundRobin{pools: pools}
|
|
|
|
|
v, _ := p.pools.LoadOrStore(key, poolRR)
|
|
|
|
|
return v.(*poolRoundRobin)
|
|
|
|
|
}
|
2024-04-01 14:48:31 +00:00
|
|
|
|
|
|
|
|
func instanceInList(instanceName string, instances []commonParams.ProviderInstance) (commonParams.ProviderInstance, bool) {
|
|
|
|
|
for _, val := range instances {
|
|
|
|
|
if val.Name == instanceName {
|
|
|
|
|
return val, true
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return commonParams.ProviderInstance{}, false
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func controllerIDFromLabels(labels []string) string {
|
|
|
|
|
for _, lbl := range labels {
|
|
|
|
|
if strings.HasPrefix(lbl, controllerLabelPrefix) {
|
2025-05-14 21:09:02 +00:00
|
|
|
trimLength := min(len(controllerLabelPrefix)+1, len(lbl))
|
|
|
|
|
return lbl[trimLength:]
|
2024-04-01 14:48:31 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return ""
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func labelsFromRunner(runner *github.Runner) []string {
|
|
|
|
|
if runner == nil || runner.Labels == nil {
|
|
|
|
|
return []string{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var labels []string
|
|
|
|
|
for _, val := range runner.Labels {
|
|
|
|
|
if val == nil {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
labels = append(labels, val.GetName())
|
|
|
|
|
}
|
|
|
|
|
return labels
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// isManagedRunner returns true if labels indicate the runner belongs to a pool
|
|
|
|
|
// this manager is responsible for.
|
|
|
|
|
func isManagedRunner(labels []string, controllerID string) bool {
|
|
|
|
|
runnerControllerID := controllerIDFromLabels(labels)
|
|
|
|
|
return runnerControllerID == controllerID
|
|
|
|
|
}
|
2024-06-20 15:28:56 +00:00
|
|
|
|
2025-05-12 21:47:13 +00:00
|
|
|
func composeWatcherFilters(entity params.ForgeEntity) dbCommon.PayloadFilterFunc {
|
2024-06-20 15:28:56 +00:00
|
|
|
// We want to watch for changes in either the controller or the
|
|
|
|
|
// entity itself.
|
|
|
|
|
return watcher.WithAny(
|
|
|
|
|
watcher.WithAll(
|
|
|
|
|
// Updates to the controller
|
|
|
|
|
watcher.WithEntityTypeFilter(dbCommon.ControllerEntityType),
|
|
|
|
|
watcher.WithOperationTypeFilter(dbCommon.UpdateOperation),
|
|
|
|
|
),
|
|
|
|
|
// Any operation on the entity we're managing the pool for.
|
|
|
|
|
watcher.WithEntityFilter(entity),
|
|
|
|
|
// Watch for changes to the github credentials
|
2025-05-12 21:47:13 +00:00
|
|
|
watcher.WithForgeCredentialsFilter(entity.Credentials),
|
2024-06-20 15:28:56 +00:00
|
|
|
)
|
|
|
|
|
}
|
2025-05-14 21:09:02 +00:00
|
|
|
|
|
|
|
|
func (r *basePoolManager) waitForToolsOrCancel() (hasTools, stopped bool) {
|
|
|
|
|
ticker := time.NewTicker(1 * time.Second)
|
|
|
|
|
defer ticker.Stop()
|
|
|
|
|
select {
|
|
|
|
|
case <-ticker.C:
|
|
|
|
|
if _, err := cache.GetGithubToolsCache(r.entity.ID); err != nil {
|
|
|
|
|
return false, false
|
|
|
|
|
}
|
|
|
|
|
return true, false
|
|
|
|
|
case <-r.quit:
|
|
|
|
|
return false, true
|
|
|
|
|
case <-r.ctx.Done():
|
|
|
|
|
return false, true
|
|
|
|
|
}
|
|
|
|
|
}
|