From 51b3bc816c322d73765d0c2a815cb01be2470ebc Mon Sep 17 00:00:00 2001 From: Gabriel Adrian Samfira Date: Thu, 12 May 2022 21:32:36 +0000 Subject: [PATCH] Add some docs (WiP) --- README.md | 4 +- contrib/garm.service | 13 ++ doc/external_provider.md | 325 ++++++++++++++++++++++++++++++++++++++ doc/github_credentials.md | 31 ++++ doc/install.md | 73 +++++++++ doc/providers.md | 140 ++++++++++++++++ doc/webhooks.md | 33 ++++ testdata/config.toml | 12 +- 8 files changed, 624 insertions(+), 7 deletions(-) create mode 100644 contrib/garm.service create mode 100644 doc/external_provider.md create mode 100644 doc/github_credentials.md create mode 100644 doc/install.md create mode 100644 doc/providers.md create mode 100644 doc/webhooks.md diff --git a/README.md b/README.md index 69de2760..87a8c414 100644 --- a/README.md +++ b/README.md @@ -197,7 +197,9 @@ You can choose either one of these. For most cases, ```SQLite3``` should do, but ### Provider configuration -Garm was designed to be extensible. The database layer as well as the providers are defined as interfaces. Currently the only implementation of a provider is for [LXD](https://linuxcontainers.org/lxd/introduction/), but will be extended to include more providers in the future. LXD is the simplest cloud-like system you can easily set up on any GNU/Linux machine, which allows you to create both containers and Virtual Machines. +Garm was designed to be extensible. The database layer as well as the providers are defined as interfaces. Currently there are two providers: + * [LXD](https://linuxcontainers.org/lxd/introduction/) + * External Garm leverages the virtual machines feature of LXD to create the runners, and the provider itself allows you to separate those machines from the rest of your LXD workloads, by using LXD projects. Here is a sample config section for an LXD provider: diff --git a/contrib/garm.service b/contrib/garm.service new file mode 100644 index 00000000..d0ead1f5 --- /dev/null +++ b/contrib/garm.service @@ -0,0 +1,13 @@ +[Unit] +Description=GitHub Actions Runner Manager (garm) +After=multi-user.target + +[Service] +Type=simple +ExecStart=/usr/local/bin/garm -config /etc/garm/config.toml +Restart=always +RestartSec=5s +User=garm + +[Install] +WantedBy=multi-user.target diff --git a/doc/external_provider.md b/doc/external_provider.md new file mode 100644 index 00000000..06ebfb47 --- /dev/null +++ b/doc/external_provider.md @@ -0,0 +1,325 @@ +# Writing an external provider + +External provider enables you to write a fully functional provider, using any scripring or programming language. Garm will then call your executable to manage the lifecycle of the instances hosting the runners. This document describes the API that an executable needs to implement to be usable by ```garm```. + +## Environment variables + +When ```garm``` calls your executable, a number of environment variables are set, depending on the operation. There are two environment variable will always be set regardless of operation. Those variables are: + + * ```GARM_COMMAND``` + * ```GARM_PROVIDER_CONFIG_FILE``` + +The following are variables that are specific to some operations: + + * ```GARM_CONTROLLER_ID``` + * ```GARM_POOL_ID``` + * ```GARM_INSTANCE_ID``` + +### The GARM_COMMAND variable + +The ```GARM_COMMAND``` environment variable will be set to one of the operations defined in the interface. When your executable is called, you'll need to look at this variable to know which operation you need to execute. + +### The GARM_PROVIDER_CONFIG_FILE variable + +The ```GARM_PROVIDER_CONFIG_FILE``` variable will contain a path on disk to a file that can contain whatever configuration your executable needs. For example, in the case of the OpenStack external provider, this file contains variables that you would normally find in a ```keystonerc``` file, used to access an OpenStack cloud. But you can use it to add any extra configuration you need. + +In your executable, you could implement something like this: + +```bash +if [ -f "${GARM_PROVIDER_CONFIG_FILE}" ];then + source "${GARM_PROVIDER_CONFIG_FILE}" +fi +``` + +Which would make the contents of that config available to you. Then you could implement the needed operations: + +```bash +case "${GARM_COMMAND}" in + "CreateInstance") + # Run the create instance code + ;; + "DeleteInstance") + # Run the delete instance code + ;; + # .... the rest of the operations detailed in next sections .... + *) + # handle unknown command + echo "unknown command ${GARM_COMMAND}" + exit 1 + ;; +esac +``` + +### The GARM_CONTROLLER_ID variable + +The ```GARM_CONTROLLER_ID``` variable is set for two operations: + + * CreateInstance + * RemoveAllInstances + +This variable contains the ```UUID4``` identifying a ```garm``` installation. Whenever you start up ```garm``` for the first time, a new ```UUID4``` is generated and saved in ```garm's``` database. This ID is meant to be used to track all resources created by ```garm``` within a provider. That way, if you decide to tare it all down, you have a way of identifying what was created by one particular installation of ```garm```. + +This is useful if various teams from your company use the same credentials to access a cloud. You won't accidentally clobber someone else's resources. + +In most clouds you can attach ```tags``` to resources. You can use the controller ID as one of the tagg suring the ```CreateInstance``` operation. + +# The GARM_POOL_ID variable + +The ```GARM_POOL_ID``` anvironment variable is an ```UUID4``` describing the pool in which a runner is created. This variable is set in two operations: + + * CreateInstance + * ListInstances + +As is with ```GARM_CONTROLLER_ID```, this ID can also be attached as a tag in most clouds. + +### The GARM_INSTANCE_ID variable + +The ```GARM_INSTANCE_ID``` environment variable is used in four operations: + + * GetInstance + * DeleteInstance + * Start + * Stop + +It contains the ```provider_id``` of the instance. The ```provider_id``` is a unique identifier, specific to the IaaS in which the compute resource was created. In OpenStack, it's an ```UUID4```, while in LXD, it's the virtual machine's name. + +We need this ID whenever we need to execute an operation that targets one specific runner. + +# Operations + +The operations that a provider must implement are described in the ```Provider``` interface available [here](https://github.com/cloudbase/garm/blob/main/runner/common/provider.go#L22-L39). The external provider implements this interface, and delegates each operation to your external executable. These operations are: + + * CreateInstance + * DeleteInstance + * GetInstance + * ListInstances + * RemoveAllInstances + * Stop + * Start + +The ```AsParams()``` function does not need to be implemented by the external executable. + +## CreateInstance + +The ```CreateInstance``` command has the most moving parts. The ideal external provider is one that will create all required resources for a fully functional instance, will start the instance. Waiting for the instance to start is not necessary. If the instance can reach the ```callback_url``` configured in ```garm```, it will update it's own status when it boots. + +But aside from creating resources, the ideal external provider is also idempotent, and will clean up after itself in case of failure. If for any reason the executable will fail to create the instance, any dependency that it has created up to the point of failure, should be cleaned up before returning an error code. + +At the very least, it must be able to clean up those resources, if it is called with the ```DeleteInstance``` command by ```garm```. Garm will retry creating a failed instance. Before it tries again, it will attempt to run a ```DeleteInstance``` using the ```provider_id``` returned by your executable. + +If your executable failed before a ```provider_id``` could be supplied, ```garm``` will send the name of the instance as a ```GARM_INSTANCE_ID``` environment variable. + +Your external provider will need to be able to handle both. The instance name generated by ```garm``` will be unique (contains a UUID4), so it's fairly safe to use when deleting instances. + +### CreateInstance inputs + +The ```CreateInstance``` command is the only command that receives information using, environment variables and standard input. The available environment variables are: + + * GARM_PROVIDER_CONFIG_FILE - Config file specific to your executable + * GARM_COMMAND - the command we need to run + * GARM_CONTROLLER_ID - The unique ID of the ```garm``` installation + * GARM_POOL_ID - The unique ID of the pool this node is a part of + +The information sent in via standard input is a ```json``` serialized instance of the [BootstrapInstance structure](https://github.com/cloudbase/garm/blob/main/params/params.go#L80-L103) + +Here is a sample of that: + +```json +{ + "name": "garm-fc7b3174-9695-460e-b9c7-ae75ee217b53", + "tools": [ + { + "os": "osx", + "architecture": "x64", + "download_url": "https://github.com/actions/runner/releases/download/v2.291.1/actions-runner-osx-x64-2.291.1.tar.gz", + "filename": "actions-runner-osx-x64-2.291.1.tar.gz", + "sha256_checksum": "1ed51d6f35af946e97bb1e10f1272197ded20dd55186ae463563cd2f58f476dc" + }, + { + "os": "linux", + "architecture": "x64", + "download_url": "https://github.com/actions/runner/releases/download/v2.291.1/actions-runner-linux-x64-2.291.1.tar.gz", + "filename": "actions-runner-linux-x64-2.291.1.tar.gz", + "sha256_checksum": "1bde3f2baf514adda5f8cf2ce531edd2f6be52ed84b9b6733bf43006d36dcd4c" + }, + { + "os": "win", + "architecture": "x64", + "download_url": "https://github.com/actions/runner/releases/download/v2.291.1/actions-runner-win-x64-2.291.1.zip", + "filename": "actions-runner-win-x64-2.291.1.zip", + "sha256_checksum": "2a504f852b0ab0362d08a36a84984753c2ac159ef17e5d1cd93f661ecd367cbd" + }, + { + "os": "linux", + "architecture": "arm", + "download_url": "https://github.com/actions/runner/releases/download/v2.291.1/actions-runner-linux-arm-2.291.1.tar.gz", + "filename": "actions-runner-linux-arm-2.291.1.tar.gz", + "sha256_checksum": "a78e86ba6428a28733730bdff3a807480f9eeb843f4c64bd1bbc45de13e61348" + }, + { + "os": "linux", + "architecture": "arm64", + "download_url": "https://github.com/actions/runner/releases/download/v2.291.1/actions-runner-linux-arm64-2.291.1.tar.gz", + "filename": "actions-runner-linux-arm64-2.291.1.tar.gz", + "sha256_checksum": "c4823bd8322f80cb24a311ef49273f0677ff938530248242de7df33800a22900" + } + ], + "repo_url": "https://github.com/gabriel-samfira/scripts", + "github_runner_access_token": "super secret token", + "callback-url": "https://garm.example.com/api/v1/callbacks/status", + "instance-token": "super secret JWT token", + "ssh-keys": null, + "arch": "amd64", + "flavor": "m1.small", + "image": "050f1e00-7eab-4f47-b10b-796df34d2e6b", + "labels": [ + "ubuntu", + "simple-runner", + "repo-runner", + "self-hosted", + "x64", + "linux", + "runner-controller-id:f9286791-1589-4f39-a106-5b68c2a18af4", + "runner-pool-id:fb25f308-7ad2-4769-988e-6ec2935f642a" + ], + "pool_id": "fb25f308-7ad2-4769-988e-6ec2935f642a" +} +``` + +In your executable you can read in this blob, by using something like this: + +```bash +# Test if the stdin file descriptor is opened +if [ ! -t 0 ] +then + # Read in the information from standard in + INPUT=$(cat -) +fi +``` + +Then you can easily parse it. If you're using ```bash```, you can use the amazing [jq json processor](https://stedolan.github.io/jq/). Other programming languages have suitable libraries that can handle ```json```. + +You will have to parse the bootstrap params, verify that the requested image exists, gather operating system information, CPU architecture information and using that information, you will need to select the appropriate tools for the arch/OS combination you are deploying. + +Refer to the OpenStack or Azure providers available in the [providers.d](../contrib/providers.d/) folder. + +### CreateInstance outputs + +On success, your executable is expected to print to standard output a json that can be unserialized into an ```Instance{}``` structure [defined here](https://github.com/cloudbase/garm/blob/main/params/params.go#L43-L78). + +Not all fields are expected to be populated by the provider. The ones that should be set are: + +```json +{ + "provider_id": "88818ff3-1fca-4cb5-9b37-84bfc3511ea6", + "name": "garm-0542a982-4a0d-4aca-aef0-d736c96f61ca", + "os_type": "linux", + "os_name": "ubuntu", + "os_version": "20.04", + "os_arch": "x86_64", + "status": "running", + "pool_id": "41c4a43a-acee-493a-965b-cf340b2c775d", + "provider_fault": "" +} +``` + +In case of error, ```garm``` expects at the very least to see a non-zero exit code. If possible, your executable should return as much information as possible via the above ```json```, with the ```status``` field set to ```error``` and the ```provider_fault``` set to a meaningful error message describing what has happened. That information will be visible when doing a: + +```bash +garm-cli runner show +``` + +## DeleteInstance + +The ```DeleteInstance``` command will permanently remove an instance from the cloud provider. + +Available environment variables: + + * GARM_COMMAND + * GARM_PROVIDER_CONFIG_FILE + * GARM_INSTANCE_ID + +This command is not expected to output anything. On success it should simply ```exit 0```. + +If the target instance does not exist in the provider, this command is expected to be a noop. + +## GetInstance + +NOTE: This operation is currently not use by ```garm```, but should be implemented. + +The ```GetInstance``` command will return details about the instance, as seen by the provider. + +Available environment variables: + + * GARM_COMMAND + * GARM_PROVIDER_CONFIG_FILE + * GARM_INSTANCE_ID + +On success, this command is expected to return a valid ```json``` that can be unserialized into an ```Instance{}``` structure (see CreateInstance). If possible, IP addresses allocated to the VM should be returned in adition to the sample ```json``` printed above. + +On failure, this command is expected to return a non-zero exit code. + +## ListInstances + +NOTE: This operation is currently not use by ```garm```, but should be implemented. + +The ```ListInstances``` command will print to standard output, a json that is unserializable into an **array** of ```Instance{}```. + +Available environment variables: + + * GARM_COMMAND + * GARM_PROVIDER_CONFIG_FILE + * GARM_POOL_ID + +This command must list all instances that have been tagged with the value in ```GARM_POOL_ID```. + +On success, a ```json``` is expected on standard output. + +On failure, a non-zero exit code is expected. + +## RemoveAllInstances + +NOTE: This operation is currently not use by ```garm```, but should be implemented. + +The ```RemoveAllInstances``` operation will remove all resources created in a cloud that have been tagged with the ```GARM_CONTROLLER_ID```. + +Available environment variables: + + * GARM_COMMAND + * GARM_PROVIDER_CONFIG_FILE + * GARM_CONTROLLER_ID + +On success, no output is expected. + +On failure, a non-zero exit code is expected. + +## Start + +The ```Start``` operation will start the virtual machine in the selected cloud. + +Available environment variables: + + * GARM_COMMAND + * GARM_PROVIDER_CONFIG_FILE + * GARM_INSTANCE_ID + +On success, no output is expected. + +On failure, a non-zero exit code is expected. + + +## Stop + +NOTE: This operation is currently not use by ```garm```, but should be implemented. + +The ```Stop``` operation will stop the virtual machine in the selected cloud. + +Available environment variables: + + * GARM_COMMAND + * GARM_PROVIDER_CONFIG_FILE + * GARM_INSTANCE_ID + +On success, no output is expected. + +On failure, a non-zero exit code is expected. diff --git a/doc/github_credentials.md b/doc/github_credentials.md new file mode 100644 index 00000000..d1b854ed --- /dev/null +++ b/doc/github_credentials.md @@ -0,0 +1,31 @@ +# Configuring github credentials + +Garm needs a [Personal Access Token (PAT)](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token) to create runner registration tokens, list current self hosted runners and potentially remove them if they become orphaned (the VM was manually removed on the provider). + +From the list of scopes, you will need to select: + + * ```public_repo``` - for access to a repository + * ```repo``` - for access to a private repository + * ```admin:org``` - if you plan on using this with an organization to which you have access + +The resulting token must be configured in the ```[[github]]``` section of the config. Sample as follows: + +```toml +# This is a list of credentials that you can define as part of the repository +# or organization definitions. They are not saved inside the database, as there +# is no Vault integration (yet). This will change in the future. +# Credentials defined here can be listed using the API. Obviously, only the name +# and descriptions are returned. +[[github]] + name = "gabriel" + description = "github token or user gabriel" + # This is a personal token with access to the repositories and organizations + # you plan on adding to garm. The "workflow" option needs to be selected in order + # to work with repositories, and the admin:org needs to be set if you plan on + # adding an organization. + oauth2_token = "super secret token" +``` + +The double paranthesis means that this is an array. You can specify the ```[[github]]``` section multiple times, with different tokens from different users, or with different access levels. You will then be able to list the available credentials using the API, and reference these credentials when adding repositories or organizations. + +The API will only ever return the name and description to the API consumer. diff --git a/doc/install.md b/doc/install.md new file mode 100644 index 00000000..4fe584fb --- /dev/null +++ b/doc/install.md @@ -0,0 +1,73 @@ +# Install + +## Build from source + +You need to have Go install, then run: + +```bash +git clone https://github.com/cloudbase/garm +cd garm +go install ./... +``` + +You should now have both ```garm``` and ```garm-cli``` in your ```$GOPATH/bin``` folder. + +## Install the service + +Add a new system user: + +```bash +useradd --shell /usr/bin/false \ + --system \ + --groups lxd \ + --no-create-home garm +``` + +Copy the binary from your ```$GOPATH``` to somewhere in the system ```$PATH```: + +```bash +sudo cp $(go env GOPATH)/bin/garm /usr/local/bin/garm +``` + +Create the config folder: + +```bash +sudo mkdir -p /etc/garm +``` + +Copy the config template: + +```bash +sudo cp ./testdata/config.toml /etc/garm/ +``` + +Copy the external provider (optional): + +```bash +sudo cp -a ./contrib/providers.d /etc/garm/ +``` + +Copy the systemd service file: + +```bash +sudo cp ./contrib/garm.service /etc/systemd/system/ +``` + +Change permissions on config folder: + +```bash +sudo chown -R garm:garm /etc/garm +sudo chmod 750 -R /etc/garm +``` + +Enable the service: + +```bash +sudo systemctl enable garm +``` + +Customize the config in ```/etc/garm/config.toml```, and start the service: + +```bash +sudo systemctl start garm +``` \ No newline at end of file diff --git a/doc/providers.md b/doc/providers.md new file mode 100644 index 00000000..f41106df --- /dev/null +++ b/doc/providers.md @@ -0,0 +1,140 @@ +# Provider configuration + +Garm was designed to be extensible. The database layer as well as the providers are defined as interfaces. Currently there are two providers: + * [LXD](https://linuxcontainers.org/lxd/introduction/) + * External + +LXD is the simplest cloud-like system you can easily set up on any GNU/Linux machine, which enables you to create both containers and Virtual Machines. The ```external``` provider is a special type of provider, which delegates functionality to external executables. + +## The LXD provider + +Garm leverages the virtual machines feature of LXD to create the runners. Here is a sample config section for an LXD provider: + +```toml +# Currently, providers are defined statically in the config. This is due to the fact +# that we have not yet added support for storing secrets in something like Barbican +# or Vault. This will change in the future. However, for now, it's important to remember +# that once you create a pool using one of the providers defined here, the name of that +# provider must not be changes, or the pool will no longer work. Make sure you remove any +# pools before removing or changing a provider. +[[provider]] + # An arbitrary string describing this provider. + name = "lxd_local" + # Provider type. Garm is designed to allow creating providers which are used to spin + # up compute resources, which in turn will run the github runner software. + # Currently, LXD is the only supprted provider, but more will be written in the future. + provider_type = "lxd" + # A short description of this provider. The name, description and provider types will + # be included in the information returned by the API when listing available providers. + description = "Local LXD installation" + [provider.lxd] + # the path to the unix socket that LXD is listening on. This works if garm and LXD + # are on the same system, and this option takes precedence over the "url" option, + # which connects over the network. + unix_socket_path = "/var/snap/lxd/common/lxd/unix.socket" + # When defining a pool for a repository or an organization, you have an option to + # specify a "flavor". In LXD terms, this translates to "profiles". Profiles allow + # you to customize your instances (memory, cpu, disks, nics, etc). + # This option allows you to inject the "default" profile along with the profile selected + # by the flavor. + include_default_profile = false + # enable/disable secure boot. If the image you select for the pool does not have a + # signed bootloader, set this to false, otherwise your instances won't boot. + secure_boot = false + # Project name to use. You can create a separate project in LXD for runners. + project_name = "default" + # URL is the address on which LXD listens for connections (ex: https://example.com:8443) + url = "" + # garm supports certificate authentication for LXD remote connections. The easiest way + # to get the needed certificates, is to install the lxc client and add a remote. The + # client_certificate, client_key and tls_server_certificate can be then fetched from + # $HOME/snap/lxd/common/config. + client_certificate = "" + client_key = "" + tls_server_certificate = "" + [provider.lxd.image_remotes] + # Image remotes are important. These are the default remotes used by lxc. The names + # of these remotes are important. When specifying an "image" for the pool, that image + # can be a hash of an existing image on your local LXD installation or it can be a + # remote image from one of these remotes. You can specify the images as follows: + # Example: + # + # * ubuntu:20.04 + # * ubuntu_daily:20.04 + # * images:centos/8/cloud + # + # Ubuntu images come pre-installed with cloud-init which we use to set up the runner + # automatically and customize the runner. For non Ubuntu images, you need to use the + # variant that has "/cloud" in the name. Those images come with cloud-init. + [provider.lxd.image_remotes.ubuntu] + addr = "https://cloud-images.ubuntu.com/releases" + public = true + protocol = "simplestreams" + skip_verify = false + [provider.lxd.image_remotes.ubuntu_daily] + addr = "https://cloud-images.ubuntu.com/daily" + public = true + protocol = "simplestreams" + skip_verify = false + [provider.lxd.image_remotes.images] + addr = "https://images.linuxcontainers.org" + public = true + protocol = "simplestreams" + skip_verify = false +``` + +You can choose to connect to a local LXD server by using the ```unix_socket_path``` option, or you can connect to a remote LXD cluster/server by using the ```url``` option. If both are specified, the unix socket takes precedence. The config file is fairly well commented, but I will add a note about remotes. + +### LXD remotes + +By default, garm does not load any image remotes. You get to choose which remotes you add (if any). An image remote is a repository of images that LXD uses to create new instances, either virtual machines or containers. In the absence of any remote, garm will attempt to find the image you configure for a pool of runners, on the LXD server we're connecting to. If one is present, it will be used, otherwise it will fail and you will need to configure a remote. + +The sample config file in this repository has the usual default ```LXD``` remotes: + + * https://cloud-images.ubuntu.com/releases (ubuntu) - Official Ubuntu images + * https://cloud-images.ubuntu.com/daily (ubuntu_daily) - Official Ubuntu images, daily build + * https://images.linuxcontainers.org (images) - Comunity maintained images for various operating systems + +When creating a new pool, you'll be able to specify which image you want to use. The images are referenced by ```remote_name:image_tag```. For example, if you want to launch a runner on an Ubuntu 20.04, the image name would be ```ubuntu:20.04```. For a daily image it would be ```ubuntu_daily:20.04```. And for one of the unnoficial images it would be ```images:centos/8-Stream/cloud```. Note, for unofficial images you need to use the tags that have ```/cloud``` in the name. These images come pre-installed with ```cloud-init``` which we need to set up the runners automatically. + +You can also create your own image remote, where you can host your own custom images. If you want to build your own images, have a look at [distrobuilder](https://github.com/lxc/distrobuilder). + +Image remotes in the ```garm``` config, is a map of strings to remote settins. The name of the remote is the last bit of string in the section header. For example, the following section ```[provider.lxd.image_remotes.ubuntu_daily]```, defines the image remote named **ubuntu_daily**. Use this name to reference images inside that remote. + + +## The External provider + +The external provider is a special kind of provider. It delegates the functionality needed to create the runners to external executables. These executables can be either binaries or scripts. As long as they adhere to the needed interface, they can be used to create runners in any target IaaS. This is identical to what ```containerd``` does with ```CNIs```. + +There is currently one external provider for [OpenStack](https://www.openstack.org/) available in the [contrb folder of this repository](../contrib/providers.d/openstack). The provider is written in ```bash``` and it is just a sample. A production ready provider would need more error checking and idempotency, but it serves as an example of what can be done. As it stands, it is functional. + +The configuration for an external provider is quite simple: + +```toml +# This is an example external provider. External providers are executables that +# implement the needed interface to create/delete/list compute systems that are used +# by garm to create runners. +[[provider]] +name = "openstack_external" +description = "external openstack provider" +provider_type = "external" + [provider.external] + # config file passed to the executable via GARM_PROVIDER_CONFIG_FILE environment variable. + # This file needs to exist, and it must contain valid access credentials to your openstack + # cloud. + config_file = "/etc/garm/providers.d/openstack/keystonerc" + # path on disk to a folder that contains a "garm-external-provider" executable. The executable + # can be anything (bash, a binary, python, etc) + provider_dir = "/etc/garm/providers.d/openstack" +``` + +The external provider has two options that are important: + + * ```provider_dir``` + * ```config_file``` + +The ```provider_dir``` option is the location on disk where the executable that implements the business logic of the provider, resides. The external provider will look for an executable called ```garm-external-provider``` in this folder. If it finds it, it will delegate all operations to it. + +The ```config_file``` option is a path on disk to an arbitrary file, that is passed to the external executable via the environment variable ```GARM_PROVIDER_CONFIG_FILE```. This file is relevant to the external binary only. In the case of the OpenStack provider, this file contains access information for an OpenStack cloud (what you would tipically find in a ```keystonerc``` file) as well as some provider specific options like whether or not to boot from volume and which tenant network to use. You can check out the [sample config file](../contrib/providers.d/openstack/keystonerc) in this repository. + +If you want to implement an external provider, you can use this file for anything you need to pass into the binary when ```garm``` calls it to execute a particular operation. \ No newline at end of file diff --git a/doc/webhooks.md b/doc/webhooks.md new file mode 100644 index 00000000..28ef6eb2 --- /dev/null +++ b/doc/webhooks.md @@ -0,0 +1,33 @@ +# Webhooks + +Garm is designed to auto-scale github runners based on a few simple rules: + + * A minimum idle runner count can be set for a pool. Garm will attempt to maintain that minimum of idle runners, ready to be used by your workflows. + * A maximum number of runners for a pool. This is a hard limit of runners a pool will create, regardless of minimum idle runners. + * When a runner is scheduled by github, ```garm``` will automatically spin up a new runner to replace it, obeying the maximum hard limit defined. + +To achieve this, ```garm``` relies on [GitHub Webhooks](https://docs.github.com/en/developers/webhooks-and-events/webhooks/about-webhooks). Webhooks allow ```garm``` to react to workflow events from your repository or organization. + +In your repository or organization, navigate to ```Settings --> Webhooks```. In the ```Payload URL``` field, enter the URL to the ```garm``` webhook endpoint. The ```garm``` API endpoint for webhooks is: + +``` +POST /webhooks +``` + +If ```garm``` is running on a server under the domain ```garm.example.com```, then that field should be set to ```https://garm.example.com/webhooks```. + +In the webhook configuration page under ```Content type``` you will need to select ```application/json```, set the proper webhook URL and, really important, **make sure you configure a webhook secret**. Garm will authenticate the payloads to make sure they are coming from GitHub. + +The webhook secret must be secure. Use something like this to generate one: + +```bash +gabriel@rossak:~$ function generate_secret () { + tr -dc 'a-zA-Z0-9!@#$%^&*()_+?><~\`;' < /dev/urandom | head -c 64; + echo '' +} + +gabriel@rossak:~$ generate_secret +9Q*nsr*S54g0imK64(!2$Ns6C!~VsH(p)cFj+AMLug%LM!R%FOQ +``` + +Next, you can choose which events GitHub should send to ```garm``` via webhooks. Click on ```Let me select individual events``` and select ```Workflow jobs``` (should be at the bottom). You can send everything if you want, but any events ```garm``` doesn't care about will simply be ignored. \ No newline at end of file diff --git a/testdata/config.toml b/testdata/config.toml index fcd5ec92..268aa8d4 100644 --- a/testdata/config.toml +++ b/testdata/config.toml @@ -6,7 +6,7 @@ callback_url = "https://garm.example.com/api/v1/callbacks/status" # This folder is defined here for future use. Right now, we create a SSH # public/private key-pair. -config_dir = "/home/runner/garm" +config_dir = "/etc/garm" # Uncomment this line if you'd like to log to a file instead of standard output. # log_file = "/tmp/runner-manager.log" @@ -62,7 +62,7 @@ time_to_live = "8760h" database = "" [database.sqlite3] # Path on disk to the sqlite3 database file. - db_file = "/home/runner/file.db" + db_file = "/etc/garm/garm.db" # Currently, providers are defined statically in the config. This is due to the fact @@ -145,10 +145,10 @@ description = "external openstack provider" provider_type = "external" [provider.external] # config file passed to the executable via GARM_PROVIDER_CONFIG_FILE environment variable - config_file = "/home/ubuntu/garm/providers.d/openstack/keystonerc" + config_file = "/etc/garm/providers.d/openstack/keystonerc" # path on disk to a folder that contains a "garm-external-provider" executable. The executable # can be anything (bash, a binary, python, etc) - provider_dir = "/home/ubuntu/garm/providers.d/openstack" + provider_dir = "/etc/garm/providers.d/openstack" [[provider]] name = "azure_external" @@ -156,10 +156,10 @@ description = "external azure provider" provider_type = "external" [provider.external] # config file passed to the executable via GARM_PROVIDER_CONFIG_FILE environment variable - config_file = "/home/ubuntu/garm/providers.d/azure/config.sh" + config_file = "/etc/garm/providers.d/azure/config.sh" # path on disk to a folder that contains a "garm-external-provider" executable. The executable # can be anything (bash, a binary, python, etc) - provider_dir = "/home/ubuntu/garm/providers.d/azure" + provider_dir = "/etc/garm/providers.d/azure" # This is a list of credentials that you can define as part of the repository # or organization definitions. They are not saved inside the database, as there