Compare commits
1 commit
main
...
workshop-a
| Author | SHA1 | Date | |
|---|---|---|---|
| efe8286e5a |
|
|
@ -1,22 +0,0 @@
|
|||
.github
|
||||
.gitignore
|
||||
.vscode
|
||||
.devbox
|
||||
.devcontainer
|
||||
node_modules
|
||||
public
|
||||
resources
|
||||
tmp
|
||||
*.md
|
||||
!content/**/*.md
|
||||
TESTING.md
|
||||
Taskfile.yml
|
||||
devbox.json
|
||||
devbox.lock
|
||||
.hugo_build.lock
|
||||
.htmltest.yml
|
||||
.htmlvalidate.json
|
||||
.markdownlint.json
|
||||
|
||||
# Ensure package-lock.json is included for npm ci
|
||||
!package-lock.json
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
# Tool versions for development and CI/CD
|
||||
# These versions are used in:
|
||||
# - devbox.json (pinned versions)
|
||||
# - Dockerfile (build arguments)
|
||||
# - .github/workflows/ci.yaml (CI/CD pipeline)
|
||||
|
||||
NODE_VERSION=24.10.0
|
||||
GO_VERSION=1.25.1
|
||||
HUGO_VERSION=0.151.0
|
||||
79
.github/workflows/ci.yaml
vendored
|
|
@ -1,79 +0,0 @@
|
|||
name: ci
|
||||
|
||||
on:
|
||||
push:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Load versions from .env.versions
|
||||
id: versions
|
||||
run: |
|
||||
# Source the versions file
|
||||
set -a
|
||||
source .env.versions
|
||||
set +a
|
||||
|
||||
echo "node_version=${NODE_VERSION}" >> "$GITHUB_OUTPUT"
|
||||
echo "go_version=${GO_VERSION}" >> "$GITHUB_OUTPUT"
|
||||
echo "hugo_version=${HUGO_VERSION}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
echo "Node: ${NODE_VERSION}"
|
||||
echo "Go: ${GO_VERSION}"
|
||||
echo "Hugo: ${HUGO_VERSION}"
|
||||
|
||||
- name: Repository meta
|
||||
id: repository
|
||||
run: |
|
||||
registry=${{ github.server_url }}
|
||||
registry=${registry##http*://}
|
||||
echo "registry=${registry}" >> "$GITHUB_OUTPUT"
|
||||
echo "registry=${registry}"
|
||||
repository="$(echo "${{ github.repository }}" | tr '[:upper:]' '[:lower:]')"
|
||||
echo "repository=${repository}" >> "$GITHUB_OUTPUT"
|
||||
echo "repository=${repository}"
|
||||
|
||||
- name: Docker meta
|
||||
uses: docker/metadata-action@v5
|
||||
id: docker
|
||||
with:
|
||||
images: ${{ steps.repository.outputs.registry }}/${{ steps.repository.outputs.repository }}
|
||||
tags: |
|
||||
type=sha,prefix=
|
||||
type=ref,event=tag
|
||||
-
|
||||
name: Login to registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ steps.repository.outputs.registry }}
|
||||
username: "${{ secrets.PACKAGES_USER }}"
|
||||
password: "${{ secrets.PACKAGES_TOKEN }}"
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
buildkitd-flags: '--allow-insecure-entitlement network.host'
|
||||
driver-opts: network=host
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
allow: network.host
|
||||
network: host
|
||||
tags: ${{ steps.docker.outputs.tags }}
|
||||
labels: ${{ steps.docker.outputs.labels }}
|
||||
build-args: |
|
||||
NODE_VERSION=${{ steps.versions.outputs.node_version }}
|
||||
GO_VERSION=${{ steps.versions.outputs.go_version }}
|
||||
HUGO_VERSION=${{ steps.versions.outputs.hugo_version }}
|
||||
32
.github/workflows/delete-edge.yaml
vendored
|
|
@ -1,32 +0,0 @@
|
|||
name: delete-edge
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: [build]
|
||||
types:
|
||||
- completed
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Replace Image Version
|
||||
run: |
|
||||
sha="${{ github.sha }}"
|
||||
shortSha="${sha:0:7}"
|
||||
echo "Setting image version to: edp.buildth.ing/devfw-cicd/website-and-documentation:${shortSha}"
|
||||
sed -i "s@###IMAGETAG###@edp.buildth.ing/devfw-cicd/website-and-documentation:${shortSha}@g" ./k8s-deployment.yaml
|
||||
|
||||
- name: Delete action
|
||||
uses: https://edp.buildth.ing/DevFW-CICD/edge-connect-delete-action@main
|
||||
id: delete
|
||||
with:
|
||||
configFile: ./edgeconnectdeployment.yaml
|
||||
baseUrl: https://hub.apps.edge.platform.mg3.mdb.osc.live
|
||||
username: ${{ secrets.EDGEXR_PLATFORM_USERNAME }}
|
||||
password: ${{ secrets.EDGEXR_PLATFORM_PASSWORD }}
|
||||
32
.github/workflows/deploy-edge.yaml
vendored
|
|
@ -1,32 +0,0 @@
|
|||
name: deploy-edge
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: [build]
|
||||
types:
|
||||
- completed
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Replace Image Version
|
||||
run: |
|
||||
sha="${{ github.sha }}"
|
||||
shortSha="${sha:0:7}"
|
||||
echo "Setting image version to: edp.buildth.ing/devfw-cicd/website-and-documentation:${shortSha}"
|
||||
sed -i "s@###IMAGETAG###@edp.buildth.ing/devfw-cicd/website-and-documentation:${shortSha}@g" ./k8s-deployment.yaml
|
||||
|
||||
- name: Deploy action
|
||||
uses: https://edp.buildth.ing/DevFW-CICD/edge-connect-deploy-action@main
|
||||
id: deploy
|
||||
with:
|
||||
configFile: ./edgeconnectdeployment.yaml
|
||||
baseUrl: https://hub.apps.edge.platform.mg3.mdb.osc.live
|
||||
username: ${{ secrets.EDGEXR_PLATFORM_USERNAME }}
|
||||
password: ${{ secrets.EDGEXR_PLATFORM_PASSWORD }}
|
||||
149
.github/workflows/release.yaml
vendored
|
|
@ -1,149 +0,0 @@
|
|||
name: release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*.*.*' # Triggert auf Semantic Versioning Tags (v1.0.0, v2.1.3, etc.)
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
packages: write
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Load versions from .env.versions
|
||||
id: versions
|
||||
run: |
|
||||
set -a
|
||||
source .env.versions
|
||||
set +a
|
||||
|
||||
echo "node_version=${NODE_VERSION}" >> "$GITHUB_OUTPUT"
|
||||
echo "go_version=${GO_VERSION}" >> "$GITHUB_OUTPUT"
|
||||
echo "hugo_version=${HUGO_VERSION}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
echo "Node: ${NODE_VERSION}"
|
||||
echo "Go: ${GO_VERSION}"
|
||||
echo "Hugo: ${HUGO_VERSION}"
|
||||
|
||||
- name: Extract version from tag
|
||||
id: version
|
||||
run: |
|
||||
VERSION=${GITHUB_REF#refs/tags/v}
|
||||
echo "version=${VERSION}" >> "$GITHUB_OUTPUT"
|
||||
echo "Version: ${VERSION}"
|
||||
|
||||
- name: Repository meta
|
||||
id: repository
|
||||
run: |
|
||||
registry=${{ github.server_url }}
|
||||
registry=${registry##http*://}
|
||||
echo "registry=${registry}" >> "$GITHUB_OUTPUT"
|
||||
echo "registry=${registry}"
|
||||
repository="$(echo "${{ github.repository }}" | tr '[:upper:]' '[:lower:]')"
|
||||
echo "repository=${repository}" >> "$GITHUB_OUTPUT"
|
||||
echo "repository=${repository}"
|
||||
|
||||
- name: Docker meta
|
||||
uses: docker/metadata-action@v5
|
||||
id: docker
|
||||
with:
|
||||
images: ${{ steps.repository.outputs.registry }}/${{ steps.repository.outputs.repository }}
|
||||
tags: |
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=raw,value=latest
|
||||
|
||||
- name: Login to registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ steps.repository.outputs.registry }}
|
||||
username: "${{ secrets.PACKAGES_USER }}"
|
||||
password: "${{ secrets.PACKAGES_TOKEN }}"
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
buildkitd-flags: '--allow-insecure-entitlement network.host'
|
||||
driver-opts: network=host
|
||||
|
||||
- name: Build and push release images
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
allow: network.host
|
||||
network: host
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: ${{ steps.docker.outputs.tags }}
|
||||
labels: ${{ steps.docker.outputs.labels }}
|
||||
build-args: |
|
||||
NODE_VERSION=${{ steps.versions.outputs.node_version }}
|
||||
GO_VERSION=${{ steps.versions.outputs.go_version }}
|
||||
HUGO_VERSION=${{ steps.versions.outputs.hugo_version }}
|
||||
|
||||
- name: Generate changelog
|
||||
id: changelog
|
||||
run: |
|
||||
# Finde vorheriges Tag
|
||||
PREVIOUS_TAG=$(git describe --abbrev=0 --tags ${GITHUB_REF}^ 2>/dev/null || echo "")
|
||||
|
||||
if [ -z "$PREVIOUS_TAG" ]; then
|
||||
echo "Erster Release - Changelog von Anfang an"
|
||||
CHANGELOG=$(git log --pretty=format:"- %s (%h)" --no-merges)
|
||||
else
|
||||
echo "Changelog seit ${PREVIOUS_TAG}"
|
||||
CHANGELOG=$(git log ${PREVIOUS_TAG}..${GITHUB_REF} --pretty=format:"- %s (%h)" --no-merges)
|
||||
fi
|
||||
|
||||
# Schreibe in Output-Datei (multiline)
|
||||
{
|
||||
echo 'changelog<<EOF'
|
||||
echo "$CHANGELOG"
|
||||
echo 'EOF'
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Create Forgejo/Gitea Release
|
||||
uses: actions/forgejo-release@v2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
direction: upload
|
||||
release-dir: .
|
||||
title: "Release ${{ steps.version.outputs.version }}"
|
||||
tag: ${{ github.ref_name }}
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
release-notes: |
|
||||
# Release ${{ steps.version.outputs.version }}
|
||||
|
||||
## Docker Images
|
||||
|
||||
Multi-platform images (linux/amd64, linux/arm64) sind verfügbar:
|
||||
|
||||
```bash
|
||||
docker pull ${{ steps.repository.outputs.registry }}/${{ steps.repository.outputs.repository }}:${{ steps.version.outputs.version }}
|
||||
docker pull ${{ steps.repository.outputs.registry }}/${{ steps.repository.outputs.repository }}:latest
|
||||
```
|
||||
|
||||
## Build Versions
|
||||
|
||||
- Node.js: ${{ steps.versions.outputs.node_version }}
|
||||
- Go: ${{ steps.versions.outputs.go_version }}
|
||||
- Hugo: ${{ steps.versions.outputs.hugo_version }}
|
||||
|
||||
## Changes
|
||||
|
||||
${{ steps.changelog.outputs.changelog }}
|
||||
51
.github/workflows/test.yml
vendored
|
|
@ -1,51 +0,0 @@
|
|||
name: Hugo Site Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
pull_request:
|
||||
branches: [ main ]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Hugo
|
||||
uses: peaceiris/actions-hugo@v3
|
||||
with:
|
||||
hugo-version: 'latest'
|
||||
extended: true
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '24'
|
||||
cache: 'npm'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
npm ci
|
||||
go install github.com/wjdp/htmltest@latest
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
npm run test:build
|
||||
npm run test:markdown
|
||||
npm run test:html
|
||||
|
||||
- name: Run link checker
|
||||
run: htmltest
|
||||
continue-on-error: true
|
||||
|
||||
- name: Upload htmltest results
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: htmltest-report
|
||||
path: tmp/.htmltest/
|
||||
38
.gitignore
vendored
|
|
@ -1,37 +1,7 @@
|
|||
# Hugo
|
||||
.hugo_build.lock
|
||||
public/
|
||||
resources/_gen/
|
||||
public
|
||||
|
||||
# Node.js / NPM
|
||||
node_modules/
|
||||
# for npm devcontainer cli
|
||||
package-lock.json
|
||||
package.json
|
||||
|
||||
# Test outputs
|
||||
tmp/
|
||||
.htmltest/
|
||||
|
||||
# devbox
|
||||
.devbox/
|
||||
|
||||
# Task cache
|
||||
.task/
|
||||
|
||||
# Generated build data
|
||||
data/
|
||||
|
||||
# IDE
|
||||
.vscode/
|
||||
.idea/
|
||||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
|
||||
# OS
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
# Logs
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
|
|
|
|||
|
|
@ -1,24 +0,0 @@
|
|||
DirectoryPath: "public"
|
||||
CheckExternal: true
|
||||
CheckInternalHash: true
|
||||
IgnoreURLs:
|
||||
- "^https://example\\.docsy\\.dev"
|
||||
- "^https://example\\.com"
|
||||
- "^http://localhost"
|
||||
- "^/livereload\\.js"
|
||||
- "^https://cnoe\\.localtest\\.me"
|
||||
- "^https://technologyconversations\\.com"
|
||||
- "^https://developers\\.redhat\\.com"
|
||||
- "^https://platformengineering\\.org"
|
||||
- "^https://cnoe\\.io"
|
||||
- "^https://console\\.otc\\.t-systems\\.com"
|
||||
IgnoreInternalURLs:
|
||||
- "/docs-old/"
|
||||
- "/blog/"
|
||||
- "/docs/v1/"
|
||||
- "/docs/architecture/"
|
||||
- "/docs/documentation/"
|
||||
IgnoreInternalEmptyHashes: true
|
||||
IgnoreDirectoryMissingTrailingSlash: true
|
||||
IgnoreAltMissing: true
|
||||
CheckDoctype: true
|
||||
|
|
@ -1,27 +0,0 @@
|
|||
{
|
||||
"extends": ["html-validate:recommended"],
|
||||
"rules": {
|
||||
"no-inline-style": "off",
|
||||
"require-sri": "off",
|
||||
"no-trailing-whitespace": "off",
|
||||
"void-style": "off",
|
||||
"wcag/h30": "off",
|
||||
"wcag/h32": "off",
|
||||
"wcag/h37": "off",
|
||||
"no-redundant-role": "off",
|
||||
"unique-landmark": "off",
|
||||
"no-multiple-main": "off",
|
||||
"no-dup-id": "off",
|
||||
"element-permitted-content": "off",
|
||||
"attr-quotes": "off",
|
||||
"empty-heading": "off",
|
||||
"element-required-content": "off",
|
||||
"long-title": "off",
|
||||
"no-raw-characters": "off",
|
||||
"valid-id": "off",
|
||||
"doctype-style": "off"
|
||||
},
|
||||
"elements": [
|
||||
"html5"
|
||||
]
|
||||
}
|
||||
|
|
@ -1,8 +0,0 @@
|
|||
{
|
||||
"default": true,
|
||||
"MD013": false,
|
||||
"MD033": false,
|
||||
"MD041": false,
|
||||
"MD024": { "siblings_only": true },
|
||||
"MD025": { "front_matter_title": "" }
|
||||
}
|
||||
|
|
@ -1,4 +0,0 @@
|
|||
# Ignore v1 documentation (legacy content with pre-existing lint issues)
|
||||
content/en/docs/v1/**
|
||||
content/en/blog/**
|
||||
content/en/docs-old/**
|
||||
22
.vscode/settings.json
vendored
|
|
@ -1,22 +0,0 @@
|
|||
{
|
||||
"peacock.remoteColor": "#61dafb",
|
||||
"workbench.colorCustomizations": {
|
||||
"activityBar.activeBackground": "#93e6fc",
|
||||
"activityBar.background": "#93e6fc",
|
||||
"activityBar.foreground": "#15202b",
|
||||
"activityBar.inactiveForeground": "#15202b99",
|
||||
"activityBarBadge.background": "#fa45d4",
|
||||
"activityBarBadge.foreground": "#15202b",
|
||||
"commandCenter.border": "#15202b99",
|
||||
"sash.hoverBorder": "#93e6fc",
|
||||
"statusBar.background": "#61dafb",
|
||||
"statusBar.foreground": "#15202b",
|
||||
"statusBarItem.hoverBackground": "#2fcefa",
|
||||
"statusBarItem.remoteBackground": "#61dafb",
|
||||
"statusBarItem.remoteForeground": "#15202b",
|
||||
"titleBar.activeBackground": "#61dafb",
|
||||
"titleBar.activeForeground": "#15202b",
|
||||
"titleBar.inactiveBackground": "#61dafb99",
|
||||
"titleBar.inactiveForeground": "#15202b99"
|
||||
}
|
||||
}
|
||||
69
Dockerfile
|
|
@ -1,69 +0,0 @@
|
|||
# Build arguments for version pinning (matching devbox.json)
|
||||
ARG NODE_VERSION=24.10.0
|
||||
ARG GO_VERSION=1.25.1
|
||||
ARG HUGO_VERSION=0.151.0
|
||||
|
||||
# Build stage - use same versions as local devbox environment
|
||||
FROM node:${NODE_VERSION}-bookworm AS builder
|
||||
|
||||
# Get target architecture for multi-platform builds
|
||||
ARG TARGETARCH
|
||||
|
||||
# Install Git (needed for Hugo's enableGitInfo)
|
||||
RUN apt-get update && apt-get install -y git && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install Go (map TARGETARCH: amd64->amd64, arm64->arm64)
|
||||
ARG GO_VERSION
|
||||
RUN wget -q https://go.dev/dl/go${GO_VERSION}.linux-${TARGETARCH}.tar.gz && \
|
||||
tar -C /usr/local -xzf go${GO_VERSION}.linux-${TARGETARCH}.tar.gz && \
|
||||
rm go${GO_VERSION}.linux-${TARGETARCH}.tar.gz
|
||||
|
||||
ENV PATH="/usr/local/go/bin:${PATH}"
|
||||
ENV GOPATH="/go"
|
||||
ENV PATH="${GOPATH}/bin:${PATH}"
|
||||
|
||||
# Install Hugo extended (map TARGETARCH: amd64->amd64, arm64->arm64)
|
||||
ARG HUGO_VERSION
|
||||
RUN wget -q https://github.com/gohugoio/hugo/releases/download/v${HUGO_VERSION}/hugo_extended_${HUGO_VERSION}_linux-${TARGETARCH}.tar.gz && \
|
||||
tar -xzf hugo_extended_${HUGO_VERSION}_linux-${TARGETARCH}.tar.gz && \
|
||||
mv hugo /usr/local/bin/ && \
|
||||
rm hugo_extended_${HUGO_VERSION}_linux-${TARGETARCH}.tar.gz && \
|
||||
hugo version
|
||||
|
||||
WORKDIR /src
|
||||
|
||||
# Copy package files and install npm dependencies
|
||||
COPY package*.json ./
|
||||
RUN npm ci
|
||||
|
||||
# Copy all source files
|
||||
COPY . .
|
||||
|
||||
# Build Hugo site (Git info wird aus dem aktuellen Kontext genommen, nicht aus .git)
|
||||
# Hugo sucht nach .git, findet es nicht, und überspringt Git-Info automatisch
|
||||
RUN hugo --gc --minify
|
||||
|
||||
# Runtime stage - nginx to serve static content
|
||||
FROM nginx:1.27-alpine
|
||||
|
||||
# Copy built site from builder
|
||||
COPY --from=builder /src/public /usr/share/nginx/html
|
||||
|
||||
# Copy custom nginx config
|
||||
RUN echo 'server {' > /etc/nginx/conf.d/default.conf && \
|
||||
echo ' listen 80;' >> /etc/nginx/conf.d/default.conf && \
|
||||
echo ' server_name _;' >> /etc/nginx/conf.d/default.conf && \
|
||||
echo ' root /usr/share/nginx/html;' >> /etc/nginx/conf.d/default.conf && \
|
||||
echo ' index index.html;' >> /etc/nginx/conf.d/default.conf && \
|
||||
echo '' >> /etc/nginx/conf.d/default.conf && \
|
||||
echo ' location / {' >> /etc/nginx/conf.d/default.conf && \
|
||||
echo ' try_files $uri $uri/ /index.html;' >> /etc/nginx/conf.d/default.conf && \
|
||||
echo ' }' >> /etc/nginx/conf.d/default.conf && \
|
||||
echo '' >> /etc/nginx/conf.d/default.conf && \
|
||||
echo ' gzip on;' >> /etc/nginx/conf.d/default.conf && \
|
||||
echo ' gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;' >> /etc/nginx/conf.d/default.conf && \
|
||||
echo '}' >> /etc/nginx/conf.d/default.conf
|
||||
|
||||
EXPOSE 80
|
||||
|
||||
CMD ["nginx", "-g", "daemon off;"]
|
||||
97
README.md
|
|
@ -1,33 +1,88 @@
|
|||
# EDP - Edge Developer Platform
|
||||
# IPCEICIS-DeveloperFramework Documentation
|
||||
|
||||
Documentation for the edgeDeveloperFramework (eDF) project and the resulting Edge Developer Platform (EDP) product suite.
|
||||
This repo contains business and architectural design and documentation of the DeveloperFramework subproject of IPCEI-CIS.
|
||||
|
||||
## Quick Start
|
||||
## How to read and contribute to this documentation locally
|
||||
|
||||
```bash
|
||||
# Install dependencies
|
||||
task deps
|
||||
The documentation is done in [Hugo-format](https://gohugo.io).
|
||||
|
||||
# Start local development server
|
||||
task serve
|
||||
Hugo is a static site renderer - so to get the documentation site presented you need a running Hugo processor. Therefore there is
|
||||
|
||||
# Run tests
|
||||
task test
|
||||
* either a Hugo [`.devcontainer`-definition](https://containers.dev/) - just run a devcontainer aware IDE or CLI, e.g. Visual Studio code
|
||||
* or a Hugo [`Devbox`-definition](https://www.jetify.com/devbox/) - in this case just run a devbox shell
|
||||
|
||||
# Build production site
|
||||
task build
|
||||
```
|
||||
## Local installation of the Hugo documentation system
|
||||
|
||||
## Documentation
|
||||
We describe two possible ways (one with devcontainer, one with devbox) to get the Hugo-documentation system locally running.
|
||||
|
||||
* [Developer Guide](doc/README-developer.md)
|
||||
* [Technical Writer Guide](doc/README-technical-writer.md)
|
||||
* [Release Notes](doc/RELEASE.md)
|
||||
For both prepare the following three steps:
|
||||
|
||||
## Project
|
||||
1. open a terminal on your local box
|
||||
2. clone this repo: `git clone https://forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/DevFW/website-and-documentation`
|
||||
3. change to the repo working dir: `cd website-and-documentation`
|
||||
|
||||
This is a Hugo-based documentation site for the Edge Developer Platform, built as part of the IPCEI-CIS project.
|
||||
### Possibility 1: Hugo in a devcontainer
|
||||
|
||||
**Website:** Access the documentation at the deployed URL or run locally with `task serve`
|
||||
[`devcontainers`](https://containers.dev/) are running containers as virtual systems on your local box. The defintion is in the `.devcontainer` folder.
|
||||
Thus as preliminary you need a container daemon running, e.g. Docker.
|
||||
|
||||
For detailed information, see the documentation in the `doc/` folder.
|
||||
There are several options to create and run the devcontainer - we present here two:
|
||||
|
||||
#### Option 1: Run the container triggered by and connected to an IDE, e.g. VS Code
|
||||
|
||||
1. open the repo in an [Devcontainer-aware tool/IDE](https://containers.dev/supporting) (e.g. `code .`)
|
||||
1. start the `devcontainer` (in VSC it's `F1 + Reopen in Devcontainer`)
|
||||
1. when the container is up & running just open your browser with `http://localhost:1313/`
|
||||
|
||||
#### Option 2: Run the container natively
|
||||
|
||||
An alternative to get the container image is the [devcontainer CLI](https://github.com/devcontainers/cli), then you can run the devcontainer without VS Code.
|
||||
Thus as preliminary you need to do the install steps of the devconatiner cli.
|
||||
|
||||
1. start the devcontainer by running: `devcontainer up --workspace-folder .`
|
||||
1. find out the IP address of the devconatiner by using `docker ps` and `docker inspect <id of container>`
|
||||
1. when the container is up & running just open your browser with `http://<DOCKER IP>:1313/`
|
||||
|
||||
### Possibility 2: Hugo in a devbox
|
||||
|
||||
[`Devboxes`](https://www.jetify.com/devbox/) are locally isolated environments, managed by the [Nix package manager](https://nix.dev/). So first [prepare the devbox](https://www.jetify.com/docs/devbox/installing_devbox/).
|
||||
|
||||
Then
|
||||
|
||||
1. ```devbox shell```
|
||||
1. In the shell: ```hugo serve```
|
||||
|
||||
|
||||
## Editing
|
||||
|
||||
### Documentation language
|
||||
|
||||
The documentation is done in [Docsy-Theme](https://www.docsy.dev/).
|
||||
|
||||
So for editing content just goto the `content`-folder and edit content arrording to the [Docsy documentation](https://www.docsy.dev/docs/adding-content/)
|
||||
|
||||
## Commiting
|
||||
|
||||
After having finished a unit of work commit and push.
|
||||
|
||||
# Annex
|
||||
|
||||
## Installation steps illustrated
|
||||
|
||||
When you run the above installation, the outputs could typically look like this:
|
||||
|
||||
### In Visual Studio Code
|
||||
|
||||
#### Reopen in Container
|
||||
|
||||

|
||||
|
||||
#### Hugo server is running and (typically) listens to localhost:1313
|
||||
|
||||
After some installation time you have:
|
||||
|
||||

|
||||
|
||||
### Final result in a web browser
|
||||
|
||||

|
||||
|
|
|
|||
225
Taskfile.yml
|
|
@ -1,225 +0,0 @@
|
|||
version: '3'
|
||||
|
||||
vars:
|
||||
HUGO_CMD: hugo
|
||||
NPM_CMD: npm
|
||||
|
||||
tasks:
|
||||
default:
|
||||
desc: Show available tasks
|
||||
cmds:
|
||||
- task --list
|
||||
|
||||
# Build tasks
|
||||
build:
|
||||
desc: Build Hugo site
|
||||
deps:
|
||||
- deps:ensure-npm
|
||||
- build:generate-info
|
||||
cmds:
|
||||
- "{{.HUGO_CMD}} --gc --minify"
|
||||
|
||||
build:dev:
|
||||
desc: Build Hugo site for development
|
||||
deps:
|
||||
- deps:ensure-npm
|
||||
- build:generate-info
|
||||
cmds:
|
||||
- "{{.HUGO_CMD}}"
|
||||
|
||||
build:generate-info:
|
||||
desc: Generate build information (git commit, version, etc.)
|
||||
sources:
|
||||
- .git/HEAD
|
||||
- .git/refs/**/*
|
||||
generates:
|
||||
- data/build_info.json
|
||||
cmds:
|
||||
- ./scripts/generate-build-info.sh
|
||||
|
||||
serve:
|
||||
desc: Start Hugo dev server
|
||||
deps:
|
||||
- deps:ensure-npm
|
||||
- build:generate-info
|
||||
cmds:
|
||||
- "{{.HUGO_CMD}} server"
|
||||
|
||||
clean:
|
||||
desc: Clean build artifacts
|
||||
cmds:
|
||||
- rm -rf public resources/_gen .hugo_build.lock
|
||||
|
||||
# Test tasks
|
||||
test:
|
||||
desc: Run all tests
|
||||
deps:
|
||||
- test:build
|
||||
- test:markdown
|
||||
- test:html
|
||||
- likec4:validate
|
||||
|
||||
test:hugo:
|
||||
desc: Run Hugo-only tests (markdown, HTML, build)
|
||||
deps:
|
||||
- test:build
|
||||
- test:markdown
|
||||
- test:html
|
||||
|
||||
test:full:
|
||||
desc: Run all tests including link check (may have errors in legacy content)
|
||||
deps:
|
||||
- test:build
|
||||
- test:markdown
|
||||
- test:html
|
||||
- test:links
|
||||
- likec4:validate
|
||||
|
||||
test:quick:
|
||||
desc: Run quick tests (without link check)
|
||||
deps:
|
||||
- test:build
|
||||
- test:markdown
|
||||
- likec4:validate
|
||||
|
||||
test:build:
|
||||
desc: Test Hugo build
|
||||
deps:
|
||||
- deps:ensure-npm
|
||||
- build:generate-info
|
||||
cmds:
|
||||
- "{{.HUGO_CMD}} --gc --minify --logLevel info"
|
||||
|
||||
test:markdown:
|
||||
desc: Lint markdown files
|
||||
deps:
|
||||
- deps:ensure-npm
|
||||
cmds:
|
||||
- "{{.NPM_CMD}} run test:markdown"
|
||||
|
||||
test:html:
|
||||
desc: Validate HTML
|
||||
deps:
|
||||
- deps:ensure-npm
|
||||
cmds:
|
||||
- "{{.NPM_CMD}} run test:html"
|
||||
|
||||
test:links:
|
||||
desc: Check links (skips legacy content)
|
||||
cmds:
|
||||
- |
|
||||
# Move legacy dirs outside public temporarily
|
||||
mkdir -p /tmp/htmltest-backup-$$
|
||||
if [ -d "public/docs-old" ]; then mv public/docs-old /tmp/htmltest-backup-$$/; fi
|
||||
if [ -d "public/blog" ]; then mv public/blog /tmp/htmltest-backup-$$/; fi
|
||||
if [ -d "public/_print/docs-old" ]; then mv public/_print/docs-old /tmp/htmltest-backup-$$/docs-old-print; fi
|
||||
|
||||
# Run htmltest
|
||||
htmltest || EXIT_CODE=$?
|
||||
|
||||
# Restore directories
|
||||
if [ -d "/tmp/htmltest-backup-$$/docs-old" ]; then mv /tmp/htmltest-backup-$$/docs-old public/; fi
|
||||
if [ -d "/tmp/htmltest-backup-$$/blog" ]; then mv /tmp/htmltest-backup-$$/blog public/; fi
|
||||
if [ -d "/tmp/htmltest-backup-$$/docs-old-print" ]; then mv /tmp/htmltest-backup-$$/docs-old-print public/_print/docs-old; fi
|
||||
rm -rf /tmp/htmltest-backup-$$
|
||||
|
||||
# Exit with the original exit code
|
||||
exit ${EXIT_CODE:-0}
|
||||
|
||||
# LikeC4 tasks
|
||||
likec4:generate:
|
||||
desc: Generate LikeC4 webcomponent (includes all architecture projects)
|
||||
cmds:
|
||||
- npx likec4 codegen webcomponent --webcomponent-prefix likec4 --outfile static/js/likec4-webcomponent.js resources/edp-likec4 resources/doc-likec4
|
||||
|
||||
likec4:validate:
|
||||
desc: Validate LikeC4 models
|
||||
cmds:
|
||||
- echo "Validating EDP architecture models..."
|
||||
- npx likec4 validate --ignore-layout resources/edp-likec4
|
||||
- echo "Validating Documentation platform models..."
|
||||
- npx likec4 validate --ignore-layout resources/doc-likec4
|
||||
- echo "✓ All LikeC4 models validated successfully"
|
||||
|
||||
likec4:validate:layout:
|
||||
desc: Validate LikeC4 models including layout
|
||||
cmds:
|
||||
- echo "Validating EDP architecture models (including layout)..."
|
||||
- npx likec4 validate resources/edp-likec4
|
||||
- echo "Validating Documentation platform models (including layout)..."
|
||||
- npx likec4 validate resources/doc-likec4
|
||||
- echo "✓ All LikeC4 models and layouts validated successfully"
|
||||
|
||||
likec4:update:
|
||||
desc: Update LikeC4 to latest version
|
||||
cmds:
|
||||
- npm update likec4 --prefix resources/edp-likec4
|
||||
- npm update likec4 --prefix resources/doc-likec4
|
||||
- echo "✓ LikeC4 updated in both projects"
|
||||
|
||||
# Development tasks
|
||||
deps:ensure-npm:
|
||||
desc: Ensure npm dependencies are installed
|
||||
sources:
|
||||
- package.json
|
||||
- package-lock.json
|
||||
generates:
|
||||
- node_modules/.package-lock.json
|
||||
cmds:
|
||||
- "{{.NPM_CMD}} install"
|
||||
status:
|
||||
- test -d node_modules
|
||||
|
||||
deps:install:
|
||||
desc: Install all dependencies
|
||||
cmds:
|
||||
- "{{.NPM_CMD}} install"
|
||||
- "{{.HUGO_CMD}} mod get -u"
|
||||
- "{{.HUGO_CMD}} mod tidy"
|
||||
|
||||
deps:update:
|
||||
desc: Update dependencies
|
||||
cmds:
|
||||
- devbox update
|
||||
- "{{.NPM_CMD}} update"
|
||||
- "{{.HUGO_CMD}} mod get -u"
|
||||
|
||||
# CI/CD
|
||||
ci:
|
||||
desc: Run CI pipeline locally
|
||||
deps:
|
||||
- test
|
||||
|
||||
build:oci-image:
|
||||
desc: Build OCI/Docker image with versions from .env.versions
|
||||
cmds:
|
||||
- |
|
||||
set -a
|
||||
source .env.versions
|
||||
set +a
|
||||
echo "Building OCI image with versions:"
|
||||
echo " NODE_VERSION=${NODE_VERSION}"
|
||||
echo " GO_VERSION=${GO_VERSION}"
|
||||
echo " HUGO_VERSION=${HUGO_VERSION}"
|
||||
docker build --network=host \
|
||||
--build-arg NODE_VERSION=${NODE_VERSION} \
|
||||
--build-arg GO_VERSION=${GO_VERSION} \
|
||||
--build-arg HUGO_VERSION=${HUGO_VERSION} \
|
||||
-t ipceicis-developerframework:latest \
|
||||
-t ipceicis-developerframework:$(git rev-parse --short HEAD) \
|
||||
.
|
||||
|
||||
test:oci-image:
|
||||
desc: Test the built OCI image
|
||||
deps:
|
||||
- build:oci-image
|
||||
cmds:
|
||||
- |
|
||||
echo "Starting container on port 8080..."
|
||||
docker run -d -p 8080:80 --name hugo-test ipceicis-developerframework:latest
|
||||
sleep 2
|
||||
echo "Testing endpoint..."
|
||||
curl -f http://localhost:8080 > /dev/null && echo "✓ Container is running and responding" || echo "✗ Container test failed"
|
||||
echo "Cleaning up..."
|
||||
docker stop hugo-test
|
||||
docker rm hugo-test
|
||||
|
|
@ -1,524 +1,6 @@
|
|||
/*
|
||||
* Telekom-inspired Theme Variables
|
||||
* Based on https://edp.buildth.ing Telekom Design System
|
||||
*/
|
||||
|
||||
// Bootstrap/Docsy Variable Overrides (must be before imports)
|
||||
$primary: #E20074 !default;
|
||||
$secondary: #B6B6B6 !default;
|
||||
$success: #00b367 !default;
|
||||
$info: #0070ad !default;
|
||||
$warning: #ffcc00 !default;
|
||||
$danger: #d52b1e !default;
|
||||
$dark: #000000 !default;
|
||||
$light: #f9fafb !default;
|
||||
Add styles or override variables from the theme here.
|
||||
|
||||
// Link colors
|
||||
$link-color: #E20074 !default;
|
||||
$link-hover-color: #C2005E !default;
|
||||
|
||||
// Body
|
||||
$body-bg: #ffffff !default;
|
||||
$body-color: #000000 !default;
|
||||
|
||||
// Navbar
|
||||
$navbar-light-color: #000000 !default;
|
||||
$navbar-light-hover-color: #E20074 !default;
|
||||
$navbar-light-active-color: #E20074 !default;
|
||||
|
||||
// Fonts
|
||||
$font-family-sans-serif: 'TeleNeo', -apple-system, "Segoe UI", system-ui, Roboto, "Helvetica Neue", Arial, sans-serif !default;
|
||||
$font-family-base: $font-family-sans-serif !default;
|
||||
|
||||
// Telekom TeleNeo Fonts
|
||||
@font-face {
|
||||
font-family: 'TeleNeo';
|
||||
src: url('../fonts/TeleNeoOffice-Thin.53627df9.ttf') format('truetype');
|
||||
font-weight: 300;
|
||||
font-style: normal;
|
||||
}
|
||||
|
||||
@font-face {
|
||||
font-family: 'TeleNeo';
|
||||
src: url('../fonts/TeleNeoOffice-Regular.b0a2cff1.ttf') format('truetype');
|
||||
font-weight: 400;
|
||||
font-style: normal;
|
||||
}
|
||||
|
||||
@font-face {
|
||||
font-family: 'TeleNeo';
|
||||
src: url('../fonts/TeleNeoOffice-Medium.79fb426d.ttf') format('truetype');
|
||||
font-weight: 500;
|
||||
font-style: normal;
|
||||
}
|
||||
|
||||
@font-face {
|
||||
font-family: 'TeleNeo';
|
||||
src: url('../fonts/TeleNeoOffice-Bold.a7bb592b.ttf') format('truetype');
|
||||
font-weight: 600;
|
||||
font-style: normal;
|
||||
}
|
||||
|
||||
@font-face {
|
||||
font-family: 'TeleNeo';
|
||||
src: url('../fonts/TeleNeoOffice-ExtraBold.fbe9fe42.ttf') format('truetype');
|
||||
font-weight: 700;
|
||||
font-style: normal;
|
||||
}
|
||||
|
||||
// Primary Colors - Telekom Magenta
|
||||
:root {
|
||||
// Telekom Primary Color (Magenta)
|
||||
--color-primary: #E20074;
|
||||
--color-primary-contrast: #ffffff;
|
||||
--color-primary-dark-1: #C2005E;
|
||||
--color-primary-dark-2: #A5004D;
|
||||
--color-primary-dark-3: #87003D;
|
||||
--color-primary-light-1: #E7338A;
|
||||
--color-primary-light-2: #EC66A1;
|
||||
--color-primary-light-3: #F299B8;
|
||||
--color-primary-light-4: #F7CCCF;
|
||||
--color-primary-light-5: #FCEFF6;
|
||||
--color-primary-light-6: #FFF5FA;
|
||||
|
||||
// Secondary Colors
|
||||
--color-secondary: #B6B6B6;
|
||||
--color-secondary-dark: #6a7178;
|
||||
--color-secondary-light: #f9fafb;
|
||||
|
||||
// Semantic Colors
|
||||
--color-success: #00b367;
|
||||
--color-warning: #ffcc00;
|
||||
--color-error: #d52b1e;
|
||||
--color-info: #0070ad;
|
||||
|
||||
// Text Colors
|
||||
--color-text: #000000;
|
||||
--color-text-light: #666666;
|
||||
--color-text-dark: #000000;
|
||||
|
||||
// Background Colors
|
||||
--color-body: #ffffff;
|
||||
--color-card: #F1F1F1;
|
||||
--color-hover: #F1F1F1;
|
||||
--color-active: #F1F1F1;
|
||||
|
||||
// Navigation
|
||||
--color-nav-bg: #ffffff;
|
||||
--color-nav-text: #000000;
|
||||
--nav-border-color: #B6B6B6;
|
||||
|
||||
// UI Elements
|
||||
--color-input-background: #ffffff;
|
||||
--color-input-border: #cccccc;
|
||||
--color-input-text: #000000;
|
||||
--color-box-body: #f2f2f2;
|
||||
--color-box-header: #e6e6e6;
|
||||
|
||||
// Shadows & Overlays
|
||||
--color-shadow: rgba(0, 0, 0, 0.15);
|
||||
--color-overlay-backdrop: rgba(0, 0, 0, 0.5);
|
||||
|
||||
// Font Settings
|
||||
--font-family-base: 'TeleNeo', -apple-system, "Segoe UI", system-ui, Roboto, "Helvetica Neue", Arial, sans-serif;
|
||||
--nav-text-font-weight: 600;
|
||||
}
|
||||
|
||||
// Apply TeleNeo font globally
|
||||
body {
|
||||
font-family: var(--font-family-base);
|
||||
}
|
||||
|
||||
// Dark Mode Support
|
||||
@media (prefers-color-scheme: dark) {
|
||||
:root {
|
||||
// Primary Colors remain same
|
||||
--color-primary: #E20074;
|
||||
--color-primary-contrast: #000000;
|
||||
|
||||
// Dark Mode Adjustments
|
||||
--color-primary-dark-1: #E7338A;
|
||||
--color-primary-dark-2: #EC66A1;
|
||||
--color-primary-light-1: #C2005E;
|
||||
--color-primary-light-2: #A5004D;
|
||||
|
||||
// Secondary Colors for Dark Mode
|
||||
--color-secondary: #1c1c1e;
|
||||
--color-secondary-dark: #4D4D4D;
|
||||
--color-secondary-light: #0D0D0D;
|
||||
|
||||
// Text Colors
|
||||
--color-text: #FFFFFF;
|
||||
--color-text-light: #CCCCCC;
|
||||
--color-text-dark: #FFFFFF;
|
||||
|
||||
// Background Colors
|
||||
--color-body: #000000;
|
||||
--color-card: #1c1c1e;
|
||||
--color-hover: #1c1c1e;
|
||||
--color-active: #0D0D0D;
|
||||
|
||||
// Navigation
|
||||
--color-nav-bg: #000000;
|
||||
--color-nav-text: #FFFFFF;
|
||||
|
||||
// UI Elements
|
||||
--color-input-background: #1c1c1e;
|
||||
--color-input-border: #4D4D4D;
|
||||
--color-input-text: #FFFFFF;
|
||||
--color-box-body: #000000;
|
||||
--color-box-header: #1A1A1A;
|
||||
|
||||
// Semantic Colors for Dark Mode
|
||||
--color-success: #00A94F;
|
||||
--color-warning: #FFCC00;
|
||||
--color-error: #D52B1E;
|
||||
--color-info: #0070AD;
|
||||
|
||||
// Shadows
|
||||
--color-shadow: rgba(0, 0, 0, 0.35);
|
||||
}
|
||||
}
|
||||
|
||||
// Telekom-inspired Component Styling
|
||||
.td-navbar {
|
||||
background-color: var(--color-nav-bg) !important;
|
||||
border-bottom: 1px solid var(--nav-border-color);
|
||||
|
||||
.navbar-brand,
|
||||
.nav-link {
|
||||
color: var(--color-nav-text) !important;
|
||||
font-weight: var(--nav-text-font-weight);
|
||||
}
|
||||
|
||||
.nav-link:hover,
|
||||
.nav-link.active {
|
||||
color: var(--color-primary) !important;
|
||||
background: transparent !important;
|
||||
}
|
||||
}
|
||||
|
||||
// Primary Buttons - Telekom Magenta
|
||||
.btn-primary {
|
||||
background-color: var(--color-primary) !important;
|
||||
border-color: var(--color-primary) !important;
|
||||
color: var(--color-primary-contrast) !important;
|
||||
|
||||
&:hover {
|
||||
background-color: var(--color-primary-dark-1) !important;
|
||||
border-color: var(--color-primary-dark-1) !important;
|
||||
}
|
||||
|
||||
&:active,
|
||||
&:focus {
|
||||
background-color: var(--color-primary-dark-2) !important;
|
||||
border-color: var(--color-primary-dark-2) !important;
|
||||
}
|
||||
}
|
||||
|
||||
// Links
|
||||
a {
|
||||
color: var(--color-primary);
|
||||
|
||||
&:hover {
|
||||
color: var(--color-primary-dark-1);
|
||||
}
|
||||
}
|
||||
|
||||
// Cards with Telekom Style
|
||||
.card {
|
||||
background-color: var(--color-card);
|
||||
border: 1px solid var(--nav-border-color);
|
||||
|
||||
&:hover {
|
||||
background-color: var(--color-hover);
|
||||
}
|
||||
}
|
||||
|
||||
// Active/Selected States - REMOVED harsh black backgrounds
|
||||
// Now using soft Telekom colors instead
|
||||
|
||||
// Sidebar Navigation
|
||||
.td-sidebar-nav {
|
||||
.td-sidebar-link {
|
||||
&:hover {
|
||||
background-color: var(--color-primary-light-5) !important;
|
||||
color: var(--color-primary) !important;
|
||||
}
|
||||
|
||||
&.active {
|
||||
background-color: var(--color-primary-light-6);
|
||||
color: var(--color-primary);
|
||||
font-weight: 500;
|
||||
border-left: 3px solid var(--color-primary);
|
||||
}
|
||||
}
|
||||
|
||||
// All list items in sidebar
|
||||
li a {
|
||||
&:hover {
|
||||
background-color: var(--color-primary-light-5) !important;
|
||||
color: var(--color-primary) !important;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Main navigation tabs
|
||||
.td-sidebar {
|
||||
.td-sidebar-nav__section {
|
||||
.ul-1 > li > a {
|
||||
&.active,
|
||||
&.td-sidebar-link--active {
|
||||
background-color: var(--color-primary-light-6) !important;
|
||||
color: var(--color-primary) !important;
|
||||
font-weight: 500;
|
||||
border-left: 3px solid var(--color-primary);
|
||||
}
|
||||
|
||||
&:hover {
|
||||
background-color: var(--color-primary-light-5) !important;
|
||||
color: var(--color-primary) !important;
|
||||
}
|
||||
}
|
||||
|
||||
// All nested levels
|
||||
li a:hover {
|
||||
background-color: var(--color-primary-light-5) !important;
|
||||
color: var(--color-primary) !important;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Top navigation breadcrumb area
|
||||
.td-sidebar__inner {
|
||||
.td-sidebar-nav__section-title {
|
||||
&.active {
|
||||
background-color: var(--color-primary-light-5) !important;
|
||||
color: var(--color-primary) !important;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Breadcrumb navigation in header
|
||||
.breadcrumb {
|
||||
.active {
|
||||
color: var(--color-primary) !important;
|
||||
}
|
||||
|
||||
a:hover {
|
||||
color: var(--color-primary-dark-1) !important;
|
||||
}
|
||||
}
|
||||
|
||||
// Remove harsh black backgrounds globally
|
||||
.active,
|
||||
.selected {
|
||||
background-color: var(--color-primary-light-6) !important;
|
||||
color: var(--color-primary) !important;
|
||||
}
|
||||
|
||||
// Softer hover states
|
||||
*:hover {
|
||||
transition: all 0.2s ease-in-out;
|
||||
}
|
||||
|
||||
// Override any dark/black hover backgrounds in navigation
|
||||
nav, .td-sidebar, .td-sidebar-nav {
|
||||
a:hover,
|
||||
li:hover > a,
|
||||
.nav-link:hover {
|
||||
background-color: var(--color-primary-light-5) !important;
|
||||
color: var(--color-primary) !important;
|
||||
}
|
||||
}
|
||||
|
||||
// Code Blocks
|
||||
pre,
|
||||
code {
|
||||
background-color: var(--color-box-body);
|
||||
border: 1px solid var(--color-input-border);
|
||||
}
|
||||
|
||||
// Inline code (backticks in text)
|
||||
code {
|
||||
background-color: var(--color-primary-light-6);
|
||||
color: var(--color-primary-dark-2);
|
||||
padding: 2px 6px;
|
||||
border-radius: 3px;
|
||||
border: 1px solid var(--color-primary-light-3);
|
||||
font-size: 0.9em;
|
||||
}
|
||||
|
||||
// Code blocks (fenced code)
|
||||
pre {
|
||||
background-color: var(--color-box-body);
|
||||
border: 1px solid var(--color-input-border);
|
||||
padding: 1rem;
|
||||
border-radius: 4px;
|
||||
|
||||
code {
|
||||
background-color: transparent;
|
||||
border: none;
|
||||
padding: 0;
|
||||
color: inherit;
|
||||
}
|
||||
}
|
||||
|
||||
// Tables
|
||||
table {
|
||||
thead {
|
||||
background-color: var(--color-box-header);
|
||||
}
|
||||
|
||||
tbody tr:hover {
|
||||
background-color: var(--color-hover);
|
||||
}
|
||||
}
|
||||
|
||||
// Alerts/Notifications
|
||||
.alert-success {
|
||||
background-color: var(--color-success);
|
||||
border-color: var(--color-success);
|
||||
}
|
||||
|
||||
.alert-warning {
|
||||
background-color: var(--color-warning);
|
||||
border-color: var(--color-warning);
|
||||
color: #000000;
|
||||
}
|
||||
|
||||
.alert-danger {
|
||||
background-color: var(--color-error);
|
||||
border-color: var(--color-error);
|
||||
}
|
||||
|
||||
.alert-info {
|
||||
background-color: var(--color-info);
|
||||
border-color: var(--color-info);
|
||||
}
|
||||
|
||||
// Docsy Homepage Components
|
||||
.td-cover-block {
|
||||
background-color: var(--color-primary) !important;
|
||||
|
||||
h1, h2, h3, h4, h5, h6, p {
|
||||
color: var(--color-primary-contrast) !important;
|
||||
}
|
||||
}
|
||||
|
||||
// Lead blocks with primary color background
|
||||
.td-block--primary,
|
||||
section[class*="bg-primary"],
|
||||
section[class*="color-primary"] {
|
||||
background-color: var(--color-primary) !important;
|
||||
|
||||
* {
|
||||
color: #FFFFFF !important;
|
||||
}
|
||||
|
||||
h1, h2, h3, h4, h5, h6, p, a, .lead {
|
||||
color: #FFFFFF !important;
|
||||
text-shadow: 0 1px 3px rgba(0, 0, 0, 0.3);
|
||||
}
|
||||
|
||||
// Prevent white background on hover
|
||||
&:hover,
|
||||
*:hover {
|
||||
background-color: transparent !important;
|
||||
color: #FFFFFF !important;
|
||||
}
|
||||
|
||||
a:hover {
|
||||
color: #FFFFFF !important;
|
||||
text-decoration: underline;
|
||||
}
|
||||
}
|
||||
|
||||
.td-box {
|
||||
background-color: var(--color-card);
|
||||
border: 1px solid var(--nav-border-color);
|
||||
|
||||
&:hover {
|
||||
background-color: var(--color-hover);
|
||||
border-color: var(--color-primary);
|
||||
}
|
||||
|
||||
&--primary {
|
||||
background-color: var(--color-primary);
|
||||
border-color: var(--color-primary);
|
||||
color: var(--color-primary-contrast);
|
||||
}
|
||||
|
||||
&--secondary {
|
||||
background-color: var(--color-secondary);
|
||||
border-color: var(--color-secondary);
|
||||
}
|
||||
}
|
||||
|
||||
// Hero/Cover sections
|
||||
.td-cover {
|
||||
background: linear-gradient(135deg, var(--color-primary) 0%, var(--color-primary-dark-2) 100%);
|
||||
|
||||
.display-1, .display-2, .display-3, .display-4 {
|
||||
color: var(--color-primary-contrast) !important;
|
||||
}
|
||||
}
|
||||
|
||||
// Section backgrounds
|
||||
.td-section {
|
||||
&--primary {
|
||||
background-color: var(--color-primary-light-6);
|
||||
}
|
||||
|
||||
&--secondary {
|
||||
background-color: var(--color-secondary-light);
|
||||
}
|
||||
}
|
||||
|
||||
// Feature boxes
|
||||
.td-feature {
|
||||
border: 1px solid var(--nav-border-color);
|
||||
background-color: var(--color-card);
|
||||
|
||||
&:hover {
|
||||
border-color: var(--color-primary);
|
||||
box-shadow: 0 4px 12px var(--color-shadow);
|
||||
}
|
||||
}
|
||||
|
||||
// Feature blocks on homepage (blocks/feature)
|
||||
.td-box--dark,
|
||||
.td-box--colored,
|
||||
section[class*="bg-dark"] .td-box,
|
||||
section[class*="color-dark"] .td-box {
|
||||
.h2, .h3, .h4, .h5, h2, h3, h4, h5, p, a {
|
||||
color: #FFFFFF !important;
|
||||
}
|
||||
|
||||
&:hover {
|
||||
background-color: rgba(0, 0, 0, 0.8) !important;
|
||||
|
||||
.h2, .h3, .h4, .h5, h2, h3, h4, h5, p, a {
|
||||
color: #FFFFFF !important;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure text stays visible in dark sections
|
||||
section[class*="bg-dark"],
|
||||
section[class*="color-dark"] {
|
||||
* {
|
||||
color: #FFFFFF !important;
|
||||
}
|
||||
|
||||
.td-box, .card {
|
||||
&:hover {
|
||||
background-color: rgba(0, 0, 0, 0.8) !important;
|
||||
|
||||
* {
|
||||
color: #FFFFFF !important;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
|
|
|
|||
|
|
@ -3,84 +3,5 @@ title: IPCEI-CIS Developer Framework
|
|||
---
|
||||
|
||||
{{< blocks/cover title="IPCEI-CIS Developer Framework" image_anchor="top" height="full" >}}
|
||||
<p class="lead mt-5">
|
||||
A comprehensive enterprise development platform enabling teams to build, deploy, and operate cloud-native applications with ease.
|
||||
</p>
|
||||
{{< blocks/link-down color="info" >}}
|
||||
|
||||
{{< /blocks/cover >}}
|
||||
|
||||
{{% blocks/lead color="primary" %}}
|
||||
The IPCEI-CIS Developer Framework provides everything you need to deliver modern applications at scale.
|
||||
Built on open standards and battle-tested technologies.
|
||||
{{% /blocks/lead %}}
|
||||
|
||||
{{% blocks/section color="dark" type="row" %}}
|
||||
|
||||
{{% blocks/feature icon="fa-solid fa-diagram-project" title="Architecture Documentation" url="/docs/architecture/" %}}
|
||||
Explore the platform's architecture with interactive C4 diagrams. Understand the system design, components, and deployment topology.
|
||||
|
||||
**Dive into the architecture →**
|
||||
{{% /blocks/feature %}}
|
||||
|
||||
{{% blocks/feature icon="fa-solid fa-book-open" title="Technical Writer Guide" url="/docs/documentation/" %}}
|
||||
Learn how to contribute to this documentation. Write content, test locally, and understand the CI/CD pipeline.
|
||||
|
||||
**Start documenting →**
|
||||
{{% /blocks/feature %}}
|
||||
|
||||
{{% blocks/feature icon="fa-solid fa-archive" title="Legacy Documentation (v1)" url="/docs/v1/" %}}
|
||||
Access the previous version of our documentation including historical project information and early architecture decisions.
|
||||
|
||||
**Browse v1 docs →**
|
||||
{{% /blocks/feature %}}
|
||||
|
||||
{{% /blocks/section %}}
|
||||
|
||||
{{% blocks/section color="white" %}}
|
||||
|
||||
## What's in the Platform?
|
||||
|
||||
<div class="row">
|
||||
<div class="col-lg-4 mb-4">
|
||||
|
||||
### 🚀 Developer Experience
|
||||
|
||||
* **Backstage Portal** - Self-service platform
|
||||
* **GitOps Workflows** - Automated deployments
|
||||
* **Golden Paths** - Best practices built-in
|
||||
|
||||
</div>
|
||||
<div class="col-lg-4 mb-4">
|
||||
|
||||
### 🛠️ Infrastructure as Code
|
||||
|
||||
* **Crossplane** - Cloud resource provisioning
|
||||
* **ArgoCD** - Declarative GitOps
|
||||
* **Terraform** - Infrastructure automation
|
||||
|
||||
</div>
|
||||
<div class="col-lg-4 mb-4">
|
||||
|
||||
### 📊 Observability
|
||||
|
||||
* **Prometheus & Grafana** - Metrics & dashboards
|
||||
* **Loki** - Log aggregation
|
||||
* **OpenTelemetry** - Distributed tracing
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{{% /blocks/section %}}
|
||||
|
||||
{{% blocks/section color="light" %}}
|
||||
|
||||
## Get Started
|
||||
|
||||
Whether you're a **platform engineer**, **application developer**, or **technicalWriter**, we have resources for you:
|
||||
|
||||
* 📖 Read the [Documentation](/docs/) to understand the platform
|
||||
* 🏗️ Explore [Platform Components](/docs/components/) and their usage
|
||||
* ✍️ Learn [How to Document](/docs/DOCUMENTATION-GUIDE/) and contribute
|
||||
* 🔍 Browse [Legacy Documentation](/docs-old/) for historical context
|
||||
|
||||
{{% /blocks/section %}}
|
||||
|
|
|
|||
|
|
@ -1,84 +0,0 @@
|
|||
# Review
|
||||
|
||||
1) 09h35 Marco
|
||||
business plan
|
||||
issue: value of software, depreciation
|
||||
FTE: around 100 overall, 3 full teams of developers
|
||||
tax discussion
|
||||
|
||||
10h04 Discussions
|
||||
|
||||
2) 10h10 Julius
|
||||
|
||||
3) 10h27 Sebastiano - DevDay bis 10h40
|
||||
|
||||
schriften bei votes größer - fragen sollten lesbar sein!
|
||||
|
||||
devops is dead .... claim
|
||||
|
||||
|
||||
4) Stephan bis 10h55
|
||||
|
||||
5) christopher 10h58
|
||||
|
||||
6) robert 11:11
|
||||
* app
|
||||
* devops-pipelines
|
||||
* edp in osc deployed
|
||||
|
||||
7) michal has nothing to show
|
||||
|
||||
8) evgenii wants to finish -- 11:30
|
||||
|
||||
9) patrick 11:32
|
||||
|
||||
|
||||
====
|
||||
|
||||
projekt management meeting
|
||||
|
||||
workshops, externe teams
|
||||
|
||||
customer episodes
|
||||
|
||||
wem was wo prinzipien
|
||||
|
|
||||
Rollen, Personas
|
||||
|
||||
weiter die perspektive des nutzers bekommen, inneres verlangen eines developers, mein anspruch an das EDP
|
||||
(bekommen wir das hin, möchte ic damit arbeiten)
|
||||
|
||||
level 2 erklimmen
|
||||
|
||||
workshops halten
|
||||
|
||||
senioren bekommen
|
||||
|
||||
|
||||
level1: source code structure, artefakte builden, revision control, branching model, e.g. pull requesting, tests der software, local debugging
|
||||
level2: automatisierung des artefakte-builds, versionsmgmt, milestones, tickets, issues, compliances an security
|
||||
level3: deployment auf stages, feedback pipeline verhalten
|
||||
level4: feedback app-verhalten (logs, metrics, alerts) + development loop
|
||||
level5: 3rd level support in production
|
||||
|
||||
level1: coding
|
||||
source code structure, artefakte builden, revision control, branching model, e.g. pull requesting, tests der software, local debugging
|
||||
|
||||
level2: reaching the outdside world with output
|
||||
automatisierung des artefakte-builds, versionsmgmt, milestones, tickets, issues, compliances an security
|
||||
|
||||
level3: run the app anywhere
|
||||
deployment auf stages, feedback pipeline verhalten
|
||||
|
||||
level4: monitoring the app
|
||||
feedback app-verhalten (logs, metrics, alerts) + development loop
|
||||
|
||||
level5: support
|
||||
3rd level support in production (or any outer stage)
|
||||
|
||||
|
||||
sprint 4
|
||||
leveraging säule
|
||||
eigene app säule
|
||||
chore säule
|
||||
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
---
|
||||
title: important links
|
||||
weight: 20
|
||||
---
|
||||
|
||||
* Gardener login to Edge and orca cluster: IPCEICIS-6222
|
||||
|
|
@ -1,23 +0,0 @@
|
|||
---
|
||||
title: Legacy Documentation
|
||||
linkTitle: Docs (Old)
|
||||
menu:
|
||||
main:
|
||||
weight: 50
|
||||
weight: 50
|
||||
cascade:
|
||||
- type: docs
|
||||
---
|
||||
|
||||
# Legacy Documentation
|
||||
|
||||
This section contains the previous version of the documentation for reference purposes.
|
||||
|
||||
**Note**: This documentation is archived and may be outdated. Please refer to the main [Documentation](../docs/) section for current information.
|
||||
|
||||
## Available Sections
|
||||
|
||||
* [Architecture](architecture/) - System architecture and diagrams
|
||||
* [Documentation](documentation/) - Meta documentation about the documentation system
|
||||
* [Platform Overview](platform-overview/) - Overview document
|
||||
* [v1 (Legacy)](v1/) - Original v1 documentation
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
---
|
||||
title: "Architecture"
|
||||
linkTitle: "Architecture"
|
||||
weight: 3
|
||||
description: >
|
||||
System architecture documentation and interactive diagrams
|
||||
---
|
||||
|
||||
This section contains architecture documentation for the IPCEI-CIS Developer Framework, including interactive C4 architecture diagrams.
|
||||
|
|
@ -1,79 +0,0 @@
|
|||
---
|
||||
title: "High Level Architecture"
|
||||
linkTitle: "High Level Architecture"
|
||||
weight: 1
|
||||
description: >
|
||||
Interactive high-level architecture overview of the Enterprise Development Platform
|
||||
---
|
||||
|
||||
This document describes the high-level architecture of our Enterprise Development Platform (EDP) system.
|
||||
|
||||
## Interactive Architecture Diagram
|
||||
|
||||
{{< likec4-view view="otc-faas" project="architecture" title="Enterprise Development Platform - OTC FaaS Deployment Architecture" >}}
|
||||
|
||||
{{< alert title="Interactive Diagram" >}}
|
||||
The diagram above is interactive when viewed in a compatible browser.
|
||||
You can click on components to explore the architecture details.
|
||||
|
||||
**Note:** The interactive diagram requires the LikeC4 webcomponent to be generated.
|
||||
See the [setup instructions]({{< ref "/docs-old/architecture/setup" >}}) for details.
|
||||
{{< /alert >}}
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
The Enterprise Development Platform consists of several key components working together to provide a comprehensive development and deployment environment.
|
||||
|
||||
### Key Components
|
||||
|
||||
1. **OTC Foundry** - Central management and orchestration layer
|
||||
2. **Per-Tenant EDP** - Isolated development environments for each tenant
|
||||
3. **FaaS Environment** - Function-as-a-Service deployment targets on Open Telekom Cloud
|
||||
4. **Cloud Services** - Managed services including databases, storage, and monitoring
|
||||
|
||||
### Deployment Environments
|
||||
|
||||
- **Development Environment** (`*.t09.de`) - For platform team development and testing
|
||||
- **Production Environment** (`*.buildth.ing`) - For production workloads and tenant services
|
||||
|
||||
## Component Details
|
||||
|
||||
The interactive diagram above shows the relationships between different components and how they interact within the system architecture. You can explore the diagram by clicking on different elements to see more details.
|
||||
|
||||
### Infrastructure Components
|
||||
|
||||
- **Kubernetes Clusters** - Container orchestration using OTC CCE (Cloud Container Engine)
|
||||
- **ArgoCD** - GitOps continuous deployment and application lifecycle management
|
||||
- **Forgejo** - Git repository management and CI/CD pipelines
|
||||
- **Observability Stack** - Monitoring (Prometheus, Grafana), logging (Loki), and alerting
|
||||
|
||||
### Security and Management
|
||||
|
||||
- **Keycloak** - Identity and access management (IAM)
|
||||
- **OpenBao** - Secrets management (Hashicorp Vault fork)
|
||||
- **External Secrets Operator** - Kubernetes secrets integration
|
||||
- **Crossplane** - Infrastructure as Code and cloud resource provisioning
|
||||
|
||||
### Developer Experience
|
||||
|
||||
- **Backstage** - Internal developer portal and service catalog
|
||||
- **Forgejo Actions** - CI/CD pipeline execution
|
||||
- **Development Workflows** - GitOps-based inner and outer loop workflows
|
||||
|
||||
## Setup and Maintenance
|
||||
|
||||
To update or modify the architecture diagrams:
|
||||
|
||||
1. Edit the `.c4` files in `resources/edp-likec4/`
|
||||
2. Regenerate the webcomponent:
|
||||
|
||||
```bash
|
||||
cd resources/edp-likec4
|
||||
npx likec4 codegen webcomponent \
|
||||
--webcomponent-prefix likec4 \
|
||||
--outfile ../../static/js/likec4-webcomponent.js
|
||||
```
|
||||
|
||||
3. Commit both the model changes and the regenerated JavaScript file
|
||||
|
||||
For more information, see the [LikeC4 Integration Guide]({{< ref "/docs-old/architecture/setup" >}}).
|
||||
|
|
@ -1,297 +0,0 @@
|
|||
---
|
||||
title: "LikeC4 Setup Guide"
|
||||
linkTitle: "Setup"
|
||||
weight: 10
|
||||
description: >
|
||||
How to set up and use LikeC4 interactive architecture diagrams
|
||||
---
|
||||
|
||||
This guide explains how to set up and use LikeC4 interactive architecture diagrams in this documentation.
|
||||
|
||||
## Overview
|
||||
|
||||
LikeC4 enables you to create interactive C4 architecture diagrams as code. The diagrams are defined in `.c4` files and compiled into a web component that can be embedded in any HTML page.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Node.js (v18 or later)
|
||||
- npm or yarn
|
||||
|
||||
## Initial Setup
|
||||
|
||||
### 1. Install Dependencies
|
||||
|
||||
Navigate to the LikeC4 directory and install dependencies:
|
||||
|
||||
```bash
|
||||
cd resources/edp-likec4
|
||||
npm install
|
||||
```
|
||||
|
||||
### 2. Generate the Web Component
|
||||
|
||||
Create the web component that Hugo will load:
|
||||
|
||||
```bash
|
||||
npx likec4 codegen webcomponent \
|
||||
--webcomponent-prefix likec4 \
|
||||
--outfile ../../static/js/likec4-webcomponent.js
|
||||
```
|
||||
|
||||
This command:
|
||||
|
||||
- Reads all `.c4` files from `models/` and `views/`
|
||||
- Generates a single JavaScript file with all architecture views
|
||||
- Outputs to `static/js/likec4-webcomponent.js`
|
||||
|
||||
### 3. Verify Integration
|
||||
|
||||
The integration should already be configured in:
|
||||
|
||||
- `hugo.toml` - Contains `params.likec4.enable = true`
|
||||
- `layouts/partials/hooks/head-end.html` - Loads CSS and loader script
|
||||
- `static/css/likec4-styles.css` - Diagram styling
|
||||
- `static/js/likec4-loader.js` - Dynamic module loader
|
||||
|
||||
## Directory Structure
|
||||
|
||||
```plaintext
|
||||
resources/edp-likec4/
|
||||
├── models/ # C4 model definitions
|
||||
│ ├── components/ # Component models
|
||||
│ ├── containers/ # Container models
|
||||
│ ├── context/ # System context
|
||||
│ └── code/ # Code-level workflows
|
||||
├── views/ # View definitions
|
||||
│ ├── deployment/ # Deployment views
|
||||
│ ├── edp/ # EDP views
|
||||
│ ├── high-level-concept/ # Conceptual views
|
||||
│ └── dynamic/ # Process flows
|
||||
├── package.json # Dependencies
|
||||
└── INTEGRATION.md # Integration docs
|
||||
```
|
||||
|
||||
## Using in Documentation
|
||||
|
||||
### Basic Usage
|
||||
|
||||
Add this to any Markdown file:
|
||||
|
||||
```html
|
||||
<div class="likec4-container">
|
||||
<div class="likec4-header">
|
||||
Your Diagram Title
|
||||
</div>
|
||||
<likec4-view view-id="YOUR-VIEW-ID" browser="true"></likec4-view>
|
||||
<div class="likec4-loading" id="likec4-loading">
|
||||
Loading architecture diagram...
|
||||
</div>
|
||||
</div>
|
||||
```
|
||||
|
||||
### Available View IDs
|
||||
|
||||
To find available view IDs, search the `.c4` files:
|
||||
|
||||
```bash
|
||||
cd resources/edp-likec4
|
||||
grep -r "view\s\+\w" views/ models/ --include="*.c4"
|
||||
```
|
||||
|
||||
Common views:
|
||||
|
||||
- `otc-faas` - OTC FaaS deployment
|
||||
- `edp` - EDP overview
|
||||
- `landscape` - Developer landscape
|
||||
- `edpbuilderworkflow` - Builder workflow
|
||||
- `keycloak` - Keycloak component
|
||||
|
||||
### With Hugo Alert
|
||||
|
||||
Combine with Docsy alerts for better UX:
|
||||
|
||||
```markdown
|
||||
<div class="likec4-container">
|
||||
<div class="likec4-header">
|
||||
System Architecture
|
||||
</div>
|
||||
<likec4-view view-id="otc-faas" browser="true"></likec4-view>
|
||||
<div class="likec4-loading" id="likec4-loading">
|
||||
Loading...
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{{</* alert title="Note" */>}}
|
||||
Click on components in the diagram to explore the architecture.
|
||||
{{</* /alert */>}}
|
||||
```
|
||||
|
||||
## Workflow for Changes
|
||||
|
||||
### 1. Modify Architecture Models
|
||||
|
||||
Edit the `.c4` files in `resources/edp-likec4/`:
|
||||
|
||||
```bash
|
||||
# Edit a model
|
||||
vi resources/edp-likec4/models/containers/argocd.c4
|
||||
|
||||
# Or edit a view
|
||||
vi resources/edp-likec4/views/deployment/otc/otc-faas.c4
|
||||
```
|
||||
|
||||
### 2. Preview Changes Locally
|
||||
|
||||
Use the LikeC4 CLI to preview:
|
||||
|
||||
```bash
|
||||
cd resources/edp-likec4
|
||||
|
||||
# Start preview server
|
||||
npx likec4 start
|
||||
|
||||
# Opens browser at http://localhost:5173
|
||||
```
|
||||
|
||||
### 3. Regenerate Web Component
|
||||
|
||||
After making changes:
|
||||
|
||||
```bash
|
||||
cd resources/edp-likec4
|
||||
npx likec4 codegen webcomponent \
|
||||
--webcomponent-prefix likec4 \
|
||||
--outfile ../../static/js/likec4-webcomponent.js
|
||||
```
|
||||
|
||||
### 4. Test in Hugo
|
||||
|
||||
Start the Hugo development server:
|
||||
|
||||
```bash
|
||||
# From repository root
|
||||
hugo server -D
|
||||
|
||||
# Open http://localhost:1313
|
||||
```
|
||||
|
||||
### 5. Commit Changes
|
||||
|
||||
Commit both the model files and the regenerated web component:
|
||||
|
||||
```bash
|
||||
git add resources/edp-likec4/
|
||||
git add static/js/likec4-webcomponent.js
|
||||
git commit -m "feat: update architecture diagrams"
|
||||
```
|
||||
|
||||
## Advanced Configuration
|
||||
|
||||
### Custom Styling
|
||||
|
||||
Modify `static/css/likec4-styles.css` to customize appearance:
|
||||
|
||||
```css
|
||||
.likec4-container {
|
||||
height: 800px; /* Adjust height */
|
||||
border-radius: 8px; /* Rounder corners */
|
||||
}
|
||||
```
|
||||
|
||||
### Multiple Diagrams Per Page
|
||||
|
||||
You can include multiple diagrams on a single page:
|
||||
|
||||
```html
|
||||
<!-- First diagram -->
|
||||
<div class="likec4-container">
|
||||
<div class="likec4-header">Deployment View</div>
|
||||
<likec4-view view-id="otc-faas" browser="true"></likec4-view>
|
||||
<div class="likec4-loading">Loading...</div>
|
||||
</div>
|
||||
|
||||
<!-- Second diagram -->
|
||||
<div class="likec4-container">
|
||||
<div class="likec4-header">Component View</div>
|
||||
<likec4-view view-id="edp" browser="true"></likec4-view>
|
||||
<div class="likec4-loading">Loading...</div>
|
||||
</div>
|
||||
```
|
||||
|
||||
### Disable for Specific Pages
|
||||
|
||||
Add to page front matter:
|
||||
|
||||
```yaml
|
||||
---
|
||||
title: "My Page"
|
||||
params:
|
||||
disable_likec4: true
|
||||
---
|
||||
```
|
||||
|
||||
Then update `layouts/partials/hooks/head-end.html`:
|
||||
|
||||
```html
|
||||
{{ if and .Site.Params.likec4.enable (not .Params.disable_likec4) }}
|
||||
<!-- LikeC4 scripts -->
|
||||
{{ end }}
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Diagram Not Loading
|
||||
|
||||
1. **Check browser console** (F12 → Console)
|
||||
2. **Verify webcomponent exists:**
|
||||
|
||||
```bash
|
||||
ls -lh static/js/likec4-webcomponent.js
|
||||
```
|
||||
|
||||
3. **Regenerate if missing:**
|
||||
|
||||
```bash
|
||||
cd resources/edp-likec4
|
||||
npm install
|
||||
npx likec4 codegen webcomponent \
|
||||
--webcomponent-prefix likec4 \
|
||||
--outfile ../../static/js/likec4-webcomponent.js
|
||||
```
|
||||
|
||||
### View Not Found
|
||||
|
||||
- Check view ID matches exactly (case-sensitive)
|
||||
- Search for the view in `.c4` files:
|
||||
|
||||
```bash
|
||||
grep -r "view otc-faas" resources/edp-likec4/
|
||||
```
|
||||
|
||||
### Styling Issues
|
||||
|
||||
- Clear browser cache (Ctrl+Shift+R)
|
||||
- Check `static/css/likec4-styles.css` is loaded in browser DevTools → Network
|
||||
|
||||
### Build Errors
|
||||
|
||||
If LikeC4 codegen fails:
|
||||
|
||||
```bash
|
||||
cd resources/edp-likec4
|
||||
rm -rf node_modules package-lock.json
|
||||
npm install
|
||||
```
|
||||
|
||||
## Resources
|
||||
|
||||
- [LikeC4 Documentation](https://likec4.dev/)
|
||||
- [C4 Model](https://c4model.com/)
|
||||
- [Web Components](https://developer.mozilla.org/en-US/docs/Web/Web_Components)
|
||||
- [Hugo Documentation](https://gohugo.io/documentation/)
|
||||
|
||||
## Migration Notes
|
||||
|
||||
This LikeC4 integration was migrated from the edp-doc repository. This repository (`ipceicis-developerframework`) is now the primary source for architecture models.
|
||||
|
||||
The edp-doc repository can reference these models via git submodule if needed.
|
||||
|
|
@ -1,126 +0,0 @@
|
|||
# CI/CD pipeline tools for composable pipeline
|
||||
|
||||
## Context and Problem Statement
|
||||
|
||||
In order to build a composable pipeline that provides a golden path and reusable components, we need to define the tools that will be used to execute the pipeline.
|
||||
|
||||
ArgoCD is considered set in stone as the tool to manage the deployment of applications. However, the tools to compose and execute the pipeline are still up for debate.
|
||||
|
||||
> Note: The pipeline will use many other tools to perform certain actions such as testing, building, and deploying. This ADR is focused on the tools that will be used to compose and execute the pipeline itself.
|
||||
|
||||
In general, there are 2 decisions to make:
|
||||
|
||||
* What tools should we use to execute the pipeline?
|
||||
* What tools should we use to compose the pipeline?
|
||||
|
||||
The following use-cases should be considered for this decision:
|
||||
|
||||
* **User who wants to manage their own runners (???)**
|
||||
* User who only wants to use our golden path
|
||||
* User who wants to use our golden path and add custom actions
|
||||
* User who wants to use their own templates and import some of our actions
|
||||
* User who wants to import an existing GitHub repository with a pipeline
|
||||
|
||||
## Considered Options
|
||||
|
||||
* Argo Workflows + Events
|
||||
* Argo Workflows + Events + Additional Composition tool
|
||||
* Forgejo Actions
|
||||
* Forgejo Actions + Additional Composition tool
|
||||
* Dagger (as Engine)
|
||||
* Shuttle (as Engine)
|
||||
|
||||
## Decision Outcome
|
||||
|
||||
TBD
|
||||
|
||||
## Pros and Cons of the Options
|
||||
|
||||
### Argo Workflows + Events
|
||||
|
||||
#### Pro
|
||||
|
||||
* integration with ArgoCD
|
||||
* ability to trigger additional workflows based on events.
|
||||
* level of maturity and community support.
|
||||
|
||||
#### Con
|
||||
|
||||
* Ability to self-host runners?
|
||||
* way how composition for pipelines works (based on Kubernetes CRDs)
|
||||
* Templates must be available in the cluster where the pipelines are executed, so any imported templates must be applied into the cluster before the pipeline can be executed and cannot simply reference a repository
|
||||
* This makes it difficult to import existing templates from other repositories when using self-hosted runners
|
||||
* This also makes it difficult to use our golden path, or at least we will need to provide a way to import our golden path into the cluster
|
||||
* This also makes the split of every component has its own repo very difficult
|
||||
* additional UI to manage the pipeline
|
||||
* Additional complexity
|
||||
|
||||
### Argo Workflows + Events + Additional Composition tool
|
||||
|
||||
#### Pro
|
||||
|
||||
* Composability can be offloaded to another tool
|
||||
|
||||
#### Con
|
||||
|
||||
* All cons of the previous option (except composability)
|
||||
* Additional complexity by adding another tool
|
||||
|
||||
### Forgejo Actions
|
||||
|
||||
#### Pro
|
||||
|
||||
* tight integration with GitHub Actions providing a familiar interface for developers and a vast catalog of actions to choose from
|
||||
* ability to compose pipelines without relying on another tool
|
||||
* Self-hosting of runners possible
|
||||
* every component can have its own repository and use different tools (e.g. written in go, bash, python etc.)
|
||||
|
||||
#### Con
|
||||
|
||||
* level of maturity - will require additional investments to provide a production-grade system
|
||||
|
||||
### Forgejo Actions + Additional Tool
|
||||
|
||||
#### Pro
|
||||
|
||||
* may be possible to use GitHub actions alongside another tool
|
||||
|
||||
#### Con
|
||||
|
||||
* additional complexity by adding another tool
|
||||
|
||||
### Shuttle
|
||||
|
||||
#### Pro
|
||||
|
||||
* Possibility to clearly define interfaces for pipeline steps
|
||||
* Relatively simple
|
||||
|
||||
#### Con
|
||||
|
||||
* basically backed by only one company
|
||||
* **centralized templates**, so no mechanism for composing pipelines from multiple repositories
|
||||
|
||||
### Dagger
|
||||
|
||||
#### Pro
|
||||
|
||||
* Pipeline as code
|
||||
* if it runs it should run anywhere and produce the "same" / somewhat stable results
|
||||
* build environments are defined within containers / the dagger config. Dagger is the only dependency one has to install on a machine
|
||||
* DX is extremely nice, especially if you have to debug (image) builds, also type safety due to the ability to code your build in a strong language
|
||||
* additional tooling, like trivy, is added to a build pipeline with low effort due to containers and existing plugin/wrappers
|
||||
* you can create complex test environments similar to test containers and docker compose
|
||||
|
||||
#### Con
|
||||
|
||||
* relies heavily containers, which might not be available some environments (due to policy etc), it also has an effect on reproducibility and verifiability
|
||||
* as a dev you need to properly understand containers
|
||||
* dagger engine has to run privileged locally and/or in the cloud which might be a blocker or at least a big pain in the ...
|
||||
|
||||
#### Suggestion Patrick
|
||||
|
||||
* dagger is a heavy weight and might not be as productive in a dev workflow as it seems (setup lsp etc)
|
||||
* it might be too opinionated to force on teams, especially since it is not near mainstream enough, community might be too small
|
||||
* it feels like dagger gets you 95% of the way, but the remaining 5% are a real struggle
|
||||
* if we like it, we should check the popularity in the dev community before further considering as it has a direct impact on teams and their preferences
|
||||
|
|
@ -1,5 +0,0 @@
|
|||
# ADRs
|
||||
|
||||
Architecture Decision Records (ADRs) are a way to capture the important architectural decisions made during the development of a project. They are a way to document the context, the decision, and the consequences of the decision. They are a way to keep track of the architectural decisions made in a project and to communicate them to the team.
|
||||
|
||||
The [Markdown Architectural Decision Records](https://adr.github.io/madr/) (MADR) format is a simple and easy-to-use format for writing ADRs in Markdown.
|
||||
|
|
@ -1,67 +0,0 @@
|
|||
<!-- we need to disable MD025, because we use the different heading "ADR Template" in the homepage (see above) than it is foreseen in the template -->
|
||||
<!-- markdownlint-disable-next-line MD025 -->
|
||||
# {short title, representative of solved problem and found solution}
|
||||
|
||||
## Context and Problem Statement
|
||||
|
||||
{Describe the context and problem statement, e.g., in free form using two to three sentences or in the form of an illustrative story. You may want to articulate the problem in form of a question and add links to collaboration boards or issue management systems.}
|
||||
|
||||
<!-- This is an optional element. Feel free to remove. -->
|
||||
## Decision Drivers
|
||||
|
||||
* {decision driver 1, e.g., a force, facing concern, …}
|
||||
* {decision driver 2, e.g., a force, facing concern, …}
|
||||
* … <!-- numbers of drivers can vary -->
|
||||
|
||||
## Considered Options
|
||||
|
||||
* {title of option 1}
|
||||
* {title of option 2}
|
||||
* {title of option 3}
|
||||
* … <!-- numbers of options can vary -->
|
||||
|
||||
## Decision Outcome
|
||||
|
||||
Chosen option: "{title of option 1}", because {justification. e.g., only option, which meets k.o. criterion decision driver | which resolves force {force} | … | comes out best (see below)}.
|
||||
|
||||
<!-- This is an optional element. Feel free to remove. -->
|
||||
### Consequences
|
||||
|
||||
* Good, because {positive consequence, e.g., improvement of one or more desired qualities, …}
|
||||
* Bad, because {negative consequence, e.g., compromising one or more desired qualities, …}
|
||||
* … <!-- numbers of consequences can vary -->
|
||||
|
||||
<!-- This is an optional element. Feel free to remove. -->
|
||||
### Confirmation
|
||||
|
||||
{Describe how the implementation of/compliance with the ADR can/will be confirmed. Are the design that was decided for and its implementation in line with the decision made? E.g., a design/code review or a test with a library such as ArchUnit can help validate this. Not that although we classify this element as optional, it is included in many ADRs.}
|
||||
|
||||
<!-- This is an optional element. Feel free to remove. -->
|
||||
## Pros and Cons of the Options
|
||||
|
||||
### {title of option 1}
|
||||
|
||||
<!-- This is an optional element. Feel free to remove. -->
|
||||
{example | description | pointer to more information | …}
|
||||
|
||||
* Good, because {argument a}
|
||||
* Good, because {argument b}
|
||||
<!-- use "neutral" if the given argument weights neither for good nor bad -->
|
||||
* Neutral, because {argument c}
|
||||
* Bad, because {argument d}
|
||||
* … <!-- numbers of pros and cons can vary -->
|
||||
|
||||
### {title of other option}
|
||||
|
||||
{example | description | pointer to more information | …}
|
||||
|
||||
* Good, because {argument a}
|
||||
* Good, because {argument b}
|
||||
* Neutral, because {argument c}
|
||||
* Bad, because {argument d}
|
||||
* …
|
||||
|
||||
<!-- This is an optional element. Feel free to remove. -->
|
||||
## More Information
|
||||
|
||||
{You might want to provide additional evidence/confidence for the decision outcome here and/or document the team agreement on the decision and/or define when/how this decision the decision should be realized and if/when it should be re-visited. Links to other decisions and resources might appear here as well.}
|
||||
|
|
@ -1,43 +0,0 @@
|
|||
---
|
||||
title: "Documentation About Documentation"
|
||||
linkTitle: "Documentation"
|
||||
weight: 10
|
||||
description: >
|
||||
Learn how to create, maintain, and publish documentation for the developer platform.
|
||||
---
|
||||
|
||||
Welcome to the meta-documentation! This section explains how our documentation platform works and guides you through the technicalWriter role.
|
||||
|
||||
## What is a Technical Writer?
|
||||
|
||||
A **Technical Writer** is responsible for creating, maintaining, and publishing the developer platform documentation. This includes:
|
||||
|
||||
- Writing and updating content in Markdown
|
||||
- Creating architecture diagrams with LikeC4
|
||||
- Testing locally before committing
|
||||
- Following the CI/CD pipeline to production
|
||||
|
||||
## Documentation Platform Architecture
|
||||
|
||||
Our documentation is built on a modern stack:
|
||||
|
||||
- **Hugo** with the **Docsy** theme for static site generation
|
||||
- **LikeC4** for architecture visualization
|
||||
- **Taskfile** for local development automation
|
||||
- **GitHub Actions** for continuous testing
|
||||
- **Edge deployment** for hosting
|
||||
|
||||
### System Overview
|
||||
|
||||
{{< likec4-view view="overview" project="documentation-platform" >}}
|
||||
|
||||
This high-level view shows all major components of the documentation platform.
|
||||
|
||||
## Getting Started
|
||||
|
||||
Continue to the next sections to learn about:
|
||||
|
||||
1. [Local Development](local-development/) - How to work on documentation locally
|
||||
2. [Testing](testing/) - Quality assurance processes
|
||||
3. [CI/CD Pipeline](cicd/) - Automated testing and deployment
|
||||
4. [Publishing](publishing/) - How documentation reaches production
|
||||
|
|
@ -1,264 +0,0 @@
|
|||
---
|
||||
title: "CI/CD Pipeline"
|
||||
linkTitle: "CI/CD"
|
||||
weight: 40
|
||||
description: >
|
||||
Automated testing and container build process.
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
Our documentation uses a continuous integration and deployment pipeline to ensure quality and automate deployment.
|
||||
|
||||
{{< likec4-view view="cicdPipeline" project="documentation-platform" >}}
|
||||
|
||||
## GitHub Actions Workflow
|
||||
|
||||
The CI/CD pipeline is defined in `.github/workflows/test.yml` and runs on:
|
||||
|
||||
- **Pushes to `main` branch**
|
||||
- **Pull requests to `main` branch**
|
||||
|
||||
### Workflow Steps
|
||||
|
||||
#### 1. Checkout Code
|
||||
|
||||
```yaml
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
fetch-depth: 0
|
||||
```
|
||||
|
||||
- Clones repository with full history
|
||||
- Includes Git submodules (Hugo modules)
|
||||
|
||||
#### 2. Setup Hugo
|
||||
|
||||
```yaml
|
||||
- name: Setup Hugo
|
||||
uses: peaceiris/actions-hugo@v3
|
||||
with:
|
||||
hugo-version: 'latest'
|
||||
extended: true
|
||||
```
|
||||
|
||||
- Installs Hugo Extended
|
||||
- Uses latest stable version
|
||||
|
||||
#### 3. Setup Node.js
|
||||
|
||||
```yaml
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '24'
|
||||
cache: 'npm'
|
||||
```
|
||||
|
||||
- Installs Node.js v24
|
||||
- Caches npm dependencies for faster builds
|
||||
|
||||
#### 4. Install Dependencies
|
||||
|
||||
```bash
|
||||
npm ci
|
||||
go install github.com/wjdp/htmltest@latest
|
||||
```
|
||||
|
||||
- Installs npm packages (markdownlint, htmlvalidate)
|
||||
- Installs htmltest for link checking
|
||||
|
||||
#### 5. Run Tests
|
||||
|
||||
```bash
|
||||
npm run test:build
|
||||
npm run test:markdown
|
||||
npm run test:html
|
||||
```
|
||||
|
||||
- Validates Hugo build
|
||||
- Lints Markdown files
|
||||
- Validates HTML output
|
||||
|
||||
#### 6. Link Checking
|
||||
|
||||
```yaml
|
||||
- name: Run link checker
|
||||
run: htmltest
|
||||
continue-on-error: true
|
||||
```
|
||||
|
||||
- Checks all links
|
||||
- Continues even if links fail (soft requirement)
|
||||
|
||||
#### 7. Upload Results
|
||||
|
||||
```yaml
|
||||
- name: Upload htmltest results
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: htmltest-report
|
||||
path: tmp/.htmltest/
|
||||
```
|
||||
|
||||
- Uploads link check report
|
||||
- Available for download from GitHub Actions
|
||||
|
||||
## Container Build Process
|
||||
|
||||
After tests pass, a container image is built:
|
||||
|
||||
```bash
|
||||
task build:oci-image
|
||||
```
|
||||
|
||||
### Build Process
|
||||
|
||||
1. **Reads version information** from `.env.versions`:
|
||||
- `NODE_VERSION`
|
||||
- `GO_VERSION`
|
||||
- `HUGO_VERSION`
|
||||
|
||||
2. **Builds Docker image** using `Dockerfile`:
|
||||
- Multi-stage build
|
||||
- Hugo generates static site
|
||||
- Nginx serves the content
|
||||
|
||||
3. **Tags image** with:
|
||||
- `latest`
|
||||
- Git commit SHA (short)
|
||||
|
||||
### Dockerfile Structure
|
||||
|
||||
```dockerfile
|
||||
# Build stage
|
||||
FROM node:${NODE_VERSION} as builder
|
||||
# Install Hugo, build dependencies
|
||||
# Run: hugo --gc --minify
|
||||
# Output: public/ directory
|
||||
|
||||
# Runtime stage
|
||||
FROM nginx:alpine
|
||||
# Copy public/ to /usr/share/nginx/html/
|
||||
# Configure Nginx
|
||||
```
|
||||
|
||||
### Testing the Container
|
||||
|
||||
```bash
|
||||
task test:oci-image
|
||||
```
|
||||
|
||||
This:
|
||||
|
||||
1. Builds the image
|
||||
2. Starts container on port 8080
|
||||
3. Tests HTTP endpoint
|
||||
4. Cleans up container
|
||||
|
||||
## Package.json Scripts
|
||||
|
||||
The `package.json` defines test scripts:
|
||||
|
||||
```json
|
||||
{
|
||||
"scripts": {
|
||||
"test:build": "hugo --gc --minify --logLevel info",
|
||||
"test:markdown": "markdownlint 'content/**/*.md'",
|
||||
"test:html": "htmlvalidate 'public/**/*.html'"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Running CI Locally
|
||||
|
||||
Simulate the CI environment locally:
|
||||
|
||||
```bash
|
||||
task ci
|
||||
```
|
||||
|
||||
This runs the same tests as GitHub Actions.
|
||||
|
||||
## Monitoring CI Results
|
||||
|
||||
### Successful Build
|
||||
|
||||
✅ All tests pass → Ready to deploy
|
||||
|
||||
### Failed Build
|
||||
|
||||
❌ Tests fail:
|
||||
|
||||
1. Click on the failed workflow in GitHub Actions
|
||||
2. Expand the failed step
|
||||
3. Read the error message
|
||||
4. Fix locally: `task test:<specific-test>`
|
||||
5. Commit and push fix
|
||||
|
||||
### Viewing Artifacts
|
||||
|
||||
1. Go to GitHub Actions
|
||||
2. Click on workflow run
|
||||
3. Scroll to "Artifacts" section
|
||||
4. Download `htmltest-report`
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Don't push to main directly** - Use feature branches and PRs
|
||||
2. **Wait for CI before merging** - Green checkmark required
|
||||
3. **Fix broken builds immediately** - Don't let main stay red
|
||||
4. **Review CI logs** - Understand why tests fail
|
||||
5. **Update dependencies** - Keep versions current in `.env.versions`
|
||||
|
||||
## Continuous Deployment
|
||||
|
||||
After successful CI:
|
||||
|
||||
1. Container image is built
|
||||
2. Image is pushed to registry
|
||||
3. Deployment process begins (see [Publishing](../publishing/))
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Tests pass locally but fail in CI
|
||||
|
||||
**Possible causes:**
|
||||
|
||||
- Different Hugo version
|
||||
- Different Node.js version
|
||||
- Missing dependencies
|
||||
- Environment-specific issues
|
||||
|
||||
**Solution:** Check versions in `.env.versions` and ensure local matches CI
|
||||
|
||||
### Build timeouts
|
||||
|
||||
**Possible causes:**
|
||||
|
||||
- Link checker taking too long
|
||||
- Large number of external links
|
||||
|
||||
**Solution:**
|
||||
|
||||
- Use `continue-on-error: true` for link checks
|
||||
- Configure `.htmltest.yml` to skip slow checks
|
||||
|
||||
### Cache issues
|
||||
|
||||
**Solution:** Clear GitHub Actions cache:
|
||||
|
||||
```yaml
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.npm
|
||||
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
||||
```
|
||||
|
||||
Update the cache key to force refresh.
|
||||
|
||||
## Next Steps
|
||||
|
||||
Learn about [deployment to Edge environment](../publishing/).
|
||||
|
|
@ -1,234 +0,0 @@
|
|||
---
|
||||
title: "Local Development"
|
||||
linkTitle: "Local Development"
|
||||
weight: 20
|
||||
description: >
|
||||
Set up your local environment and learn the technicalWriter workflow.
|
||||
---
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before you start, ensure you have:
|
||||
|
||||
- **Devbox** or the following tools installed:
|
||||
- Hugo Extended (latest version)
|
||||
- Node.js (v24+)
|
||||
- Go (for htmltest)
|
||||
- Git
|
||||
|
||||
## Installation
|
||||
|
||||
1. Clone the repository:
|
||||
|
||||
```bash
|
||||
git clone <repository-url>
|
||||
cd ipceicis-developerframework
|
||||
```
|
||||
|
||||
2. Install dependencies:
|
||||
|
||||
```bash
|
||||
task deps:install
|
||||
```
|
||||
|
||||
3. **If using Devbox**, enter the Devbox shell:
|
||||
|
||||
```bash
|
||||
devbox shell
|
||||
```
|
||||
|
||||
This ensures all tools (Hugo, Node.js, Go) are available in the correct versions.
|
||||
|
||||
## Local Development Workflow
|
||||
|
||||
{{< likec4-view view="localDevelopment" project="documentation-platform" >}}
|
||||
|
||||
### Starting the Development Server
|
||||
|
||||
The easiest way to work locally is to start the Hugo development server:
|
||||
|
||||
```bash
|
||||
task serve
|
||||
```
|
||||
|
||||
This will:
|
||||
|
||||
- Generate build information (git commit, version)
|
||||
- Start Hugo server on `http://localhost:1313`
|
||||
- Enable hot reload - changes appear instantly in the browser
|
||||
|
||||
### Content Structure
|
||||
|
||||
```text
|
||||
content/
|
||||
└── en/ # English content
|
||||
├── _index.md # Homepage
|
||||
├── blog/ # Blog posts
|
||||
└── docs/ # Documentation
|
||||
├── architecture/ # Architecture docs
|
||||
├── decisions/ # ADRs
|
||||
└── v1/ # Version-specific docs
|
||||
```
|
||||
|
||||
### Creating Content
|
||||
|
||||
1. **Add a new documentation page:**
|
||||
|
||||
```bash
|
||||
# Create a new markdown file
|
||||
vim content/en/docs/your-topic/_index.md
|
||||
```
|
||||
|
||||
2. **Add frontmatter:**
|
||||
|
||||
```yaml
|
||||
---
|
||||
title: "Your Topic"
|
||||
linkTitle: "Your Topic"
|
||||
weight: 10
|
||||
description: >
|
||||
Brief description of your topic.
|
||||
---
|
||||
```
|
||||
|
||||
3. **Write your content** in Markdown
|
||||
|
||||
4. **Preview changes** - they appear immediately if `task serve` is running
|
||||
|
||||
### Creating Architecture Diagrams
|
||||
|
||||
Architecture diagrams are created with LikeC4:
|
||||
|
||||
1. **Navigate to the appropriate LikeC4 project:**
|
||||
- `resources/edp-likec4/` - Platform architecture
|
||||
- `resources/doc-likec4/` - Documentation platform architecture
|
||||
|
||||
2. **Edit or create `.c4` files** with your model
|
||||
|
||||
Example: Create a simple view in `resources/edp-likec4/views/my-view.c4`:
|
||||
|
||||
```likec4
|
||||
specification {
|
||||
element myperson
|
||||
element mysystem
|
||||
}
|
||||
|
||||
model {
|
||||
customer = myperson 'Customer' {
|
||||
description 'End user of the platform'
|
||||
}
|
||||
|
||||
mySystem = mysystem 'My System' {
|
||||
description 'Example system component'
|
||||
}
|
||||
|
||||
customer -> mySystem 'uses'
|
||||
}
|
||||
|
||||
views {
|
||||
view myCustomView {
|
||||
title "My Custom Architecture View"
|
||||
|
||||
include customer
|
||||
include mySystem
|
||||
|
||||
autoLayout TopBottom
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
3. **Regenerate webcomponents:**
|
||||
|
||||
```bash
|
||||
task likec4:generate
|
||||
```
|
||||
|
||||
4. **Embed diagrams in Markdown:**
|
||||
|
||||
```markdown
|
||||
{{</* likec4-view view="myCustomView" project="architecture" title="My Custom Architecture View" */>}}
|
||||
```
|
||||
|
||||
**Finding available view IDs:**
|
||||
- Open the `.c4` files in your project directory
|
||||
- Look for `view <viewId> {` declarations
|
||||
- The `<viewId>` is what you use in the `view` parameter
|
||||
- Or use: `grep -r "^view " resources/edp-likec4/ --include="*.c4"`
|
||||
|
||||
## Available Tasks
|
||||
|
||||
View all available tasks:
|
||||
|
||||
```bash
|
||||
task --list
|
||||
```
|
||||
|
||||
### Common Development Tasks
|
||||
|
||||
| Task | Description |
|
||||
|------|-------------|
|
||||
| `task serve` | Start development server with hot reload |
|
||||
| `task build` | Build production-ready site |
|
||||
| `task build:dev` | Build development version |
|
||||
| `task clean` | Remove build artifacts |
|
||||
| `task test` | Run all tests |
|
||||
| `task test:quick` | Run tests without link checking |
|
||||
|
||||
## Quick Testing
|
||||
|
||||
Before committing, run quick tests:
|
||||
|
||||
```bash
|
||||
task test:quick
|
||||
```
|
||||
|
||||
This validates:
|
||||
|
||||
- Hugo build succeeds
|
||||
- Markdown syntax is correct
|
||||
|
||||
For comprehensive testing, including link checking:
|
||||
|
||||
```bash
|
||||
task test
|
||||
```
|
||||
|
||||
## Tips for Technical Writers
|
||||
|
||||
1. **Write in present tense** - "The system processes..." not "The system will process..."
|
||||
2. **Use code blocks** with syntax highlighting
|
||||
3. **Include diagrams** for complex concepts
|
||||
4. **Test locally** before pushing
|
||||
5. **Keep it concise** - readers appreciate brevity
|
||||
6. **Update regularly** - stale docs are worse than no docs
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Port 1313 already in use
|
||||
|
||||
```bash
|
||||
# Find and kill the process
|
||||
lsof -ti:1313 | xargs kill -9
|
||||
```
|
||||
|
||||
### Build errors
|
||||
|
||||
```bash
|
||||
# Clean and rebuild
|
||||
task clean
|
||||
task build:dev
|
||||
```
|
||||
|
||||
### Missing dependencies
|
||||
|
||||
```bash
|
||||
# Reinstall all dependencies
|
||||
task deps:install
|
||||
```
|
||||
|
||||
## Next Steps
|
||||
|
||||
Now that you can develop locally, learn about:
|
||||
|
||||
- [Testing processes](../testing/)
|
||||
- [CI/CD pipeline](../cicd/)
|
||||
|
|
@ -1,339 +0,0 @@
|
|||
---
|
||||
title: "Publishing to Edge"
|
||||
linkTitle: "Publishing"
|
||||
weight: 50
|
||||
description: >
|
||||
How documentation is deployed to the edge environment.
|
||||
---
|
||||
|
||||
## Deployment Overview
|
||||
|
||||
After successful CI/CD, the documentation is deployed to an edge computing environment.
|
||||
|
||||
{{< likec4-view view="deploymentFlow" project="documentation-platform" >}}
|
||||
|
||||
## Deployment Architecture
|
||||
|
||||
### Edge Connect Platform
|
||||
|
||||
Our documentation is deployed using **Edge Connect**, which orchestrates deployments to edge cloudlets.
|
||||
|
||||
Configuration: `edgeconnectdeployment.yaml`
|
||||
|
||||
```yaml
|
||||
kind: edgeconnect-deployment
|
||||
metadata:
|
||||
name: "edpdoc"
|
||||
appVersion: "1.0.0"
|
||||
organization: "edp2"
|
||||
spec:
|
||||
k8sApp:
|
||||
manifestFile: "./k8s-deployment.yaml"
|
||||
infraTemplate:
|
||||
- region: "EU"
|
||||
cloudletOrg: "TelekomOP"
|
||||
cloudletName: "Munich"
|
||||
flavorName: "EU.small"
|
||||
```
|
||||
|
||||
**Key settings:**
|
||||
|
||||
- **Deployment name:** `edpdoc`
|
||||
- **Region:** EU (Munich)
|
||||
- **Cloudlet:** TelekomOP Munich
|
||||
- **Flavor:** EU.small (resource allocation)
|
||||
|
||||
### Kubernetes Deployment
|
||||
|
||||
The application runs on Kubernetes: `k8s-deployment.yaml`
|
||||
|
||||
#### Service Definition
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: edpdoc
|
||||
labels:
|
||||
run: edpdoc
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
ports:
|
||||
- name: tcp80
|
||||
protocol: TCP
|
||||
port: 80
|
||||
targetPort: 80
|
||||
selector:
|
||||
run: edpdoc
|
||||
```
|
||||
|
||||
- **Type:** LoadBalancer (external access)
|
||||
- **Port:** 80 (HTTP)
|
||||
- **Selector:** Routes traffic to pods with label `run: edpdoc`
|
||||
|
||||
#### Deployment Configuration
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: edpdoc
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
run: edpdoc
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
run: edpdoc
|
||||
mexDeployGen: kubernetes-basic
|
||||
spec:
|
||||
containers:
|
||||
- name: edpdoc
|
||||
image: ###IMAGETAG###
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 80
|
||||
protocol: TCP
|
||||
```
|
||||
|
||||
- **Replicas:** 1 (single instance)
|
||||
- **Image:** Injected by deployment pipeline (`###IMAGETAG###` placeholder)
|
||||
- **Pull policy:** Always (ensures latest version)
|
||||
|
||||
### Network Configuration
|
||||
|
||||
Outbound connections are configured in `edgeconnectdeployment.yaml`:
|
||||
|
||||
```yaml
|
||||
network:
|
||||
outboundConnections:
|
||||
- protocol: "tcp"
|
||||
portRangeMin: 80
|
||||
portRangeMax: 80
|
||||
remoteCIDR: "0.0.0.0/0"
|
||||
- protocol: "tcp"
|
||||
portRangeMin: 443
|
||||
portRangeMax: 443
|
||||
remoteCIDR: "0.0.0.0/0"
|
||||
```
|
||||
|
||||
- **Port 80:** HTTP outbound
|
||||
- **Port 443:** HTTPS outbound
|
||||
- **CIDR:** `0.0.0.0/0` (all destinations)
|
||||
|
||||
## Deployment Process
|
||||
|
||||
### 1. Container Image Ready
|
||||
|
||||
After CI passes:
|
||||
|
||||
- Docker image built with `task build:oci-image`
|
||||
- Tagged with git commit SHA
|
||||
- Pushed to container registry
|
||||
|
||||
### 2. Edge Connect Orchestration
|
||||
|
||||
Edge Connect:
|
||||
|
||||
1. Pulls container image
|
||||
2. Reads `edgeconnectdeployment.yaml`
|
||||
3. Provisions resources on Munich cloudlet
|
||||
4. Applies Kubernetes manifests
|
||||
|
||||
### 3. Kubernetes Deployment
|
||||
|
||||
Kubernetes:
|
||||
|
||||
1. Creates deployment with 1 replica
|
||||
2. Pulls container image (`imagePullPolicy: Always`)
|
||||
3. Starts pod running Nginx + static Hugo site
|
||||
4. Creates LoadBalancer service
|
||||
5. Assigns external IP
|
||||
|
||||
### 4. Service Available
|
||||
|
||||
Documentation is now accessible:
|
||||
|
||||
- **Protocol:** HTTP
|
||||
- **Port:** 80
|
||||
- **IP:** Assigned by LoadBalancer
|
||||
|
||||
## Complete Workflow
|
||||
|
||||
{{< likec4-view view="fullWorkflow" project="documentation-platform" >}}
|
||||
|
||||
### End-to-End Process
|
||||
|
||||
1. **Technical Writer writes content** (Markdown, LikeC4 models)
|
||||
2. **Local testing** with `task serve` and `task test`
|
||||
3. **Commit and push** to Git repository
|
||||
4. **GitHub Actions triggered** on push to main
|
||||
5. **CI tests run** (build, markdown, HTML, links)
|
||||
6. **Container image built** if tests pass
|
||||
7. **Image pushed** to registry
|
||||
8. **Edge deployment triggered**
|
||||
9. **Kubernetes applies** manifests
|
||||
10. **Service available** on edge cloudlet
|
||||
|
||||
## Monitoring Deployment
|
||||
|
||||
### Check Deployment Status
|
||||
|
||||
```bash
|
||||
kubectl get deployments -n <namespace>
|
||||
kubectl get pods -n <namespace>
|
||||
kubectl get services -n <namespace>
|
||||
```
|
||||
|
||||
### View Logs
|
||||
|
||||
```bash
|
||||
kubectl logs deployment/edpdoc -n <namespace>
|
||||
```
|
||||
|
||||
### Access Documentation
|
||||
|
||||
Find the LoadBalancer external IP:
|
||||
|
||||
```bash
|
||||
kubectl get service edpdoc -n <namespace>
|
||||
```
|
||||
|
||||
Access via: `http://<EXTERNAL-IP>`
|
||||
|
||||
## Rollback
|
||||
|
||||
If issues occur after deployment:
|
||||
|
||||
### Option 1: Revert Commit
|
||||
|
||||
```bash
|
||||
git revert <bad-commit>
|
||||
git push origin main
|
||||
```
|
||||
|
||||
CI will rebuild and redeploy.
|
||||
|
||||
### Option 2: Manual Rollback
|
||||
|
||||
```bash
|
||||
kubectl rollout undo deployment/edpdoc -n <namespace>
|
||||
```
|
||||
|
||||
Returns to previous deployment version.
|
||||
|
||||
### Option 3: Deploy Specific Version
|
||||
|
||||
Update image tag in deployment:
|
||||
|
||||
```bash
|
||||
kubectl set image deployment/edpdoc edpdoc=<registry>/<image>:<tag> -n <namespace>
|
||||
```
|
||||
|
||||
## Scaling
|
||||
|
||||
Currently: **1 replica**
|
||||
|
||||
To scale for higher traffic:
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
replicas: 3
|
||||
```
|
||||
|
||||
Then apply:
|
||||
|
||||
```bash
|
||||
kubectl apply -f k8s-deployment.yaml
|
||||
```
|
||||
|
||||
Or scale dynamically:
|
||||
|
||||
```bash
|
||||
kubectl scale deployment/edpdoc --replicas=3 -n <namespace>
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
1. **Image scanning** - Scan container images for vulnerabilities
|
||||
2. **Resource limits** - Set CPU/memory limits in deployment
|
||||
3. **Network policies** - Restrict pod-to-pod communication
|
||||
4. **HTTPS** - Consider adding TLS termination (Ingress)
|
||||
|
||||
## Performance Optimization
|
||||
|
||||
1. **CDN** - Add CDN in front of LoadBalancer
|
||||
2. **Caching** - Configure Nginx caching headers
|
||||
3. **Compression** - Enable gzip in Nginx
|
||||
4. **Image optimization** - Compress images in documentation
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Pod not starting
|
||||
|
||||
```bash
|
||||
kubectl describe pod <pod-name> -n <namespace>
|
||||
```
|
||||
|
||||
Check:
|
||||
|
||||
- Image pull errors
|
||||
- Resource constraints
|
||||
- Configuration errors
|
||||
|
||||
### Service unreachable
|
||||
|
||||
```bash
|
||||
kubectl describe service edpdoc -n <namespace>
|
||||
```
|
||||
|
||||
Check:
|
||||
|
||||
- LoadBalancer IP assigned
|
||||
- Port configuration
|
||||
- Network policies
|
||||
|
||||
### Old content served
|
||||
|
||||
Check:
|
||||
|
||||
- `imagePullPolicy: Always` in deployment
|
||||
- Image tag is updated
|
||||
- Pod has restarted
|
||||
|
||||
Force pod restart:
|
||||
|
||||
```bash
|
||||
kubectl rollout restart deployment/edpdoc -n <namespace>
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Test before deploying** - Always run `task test` locally
|
||||
2. **Use feature branches** - Don't deploy directly from local
|
||||
3. **Monitor after deployment** - Check logs and access
|
||||
4. **Document changes** - Update RELEASE.md
|
||||
5. **Version control** - Tag releases in Git
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
Potential improvements:
|
||||
|
||||
- **Blue-green deployment** - Zero-downtime updates
|
||||
- **Canary releases** - Gradual rollout to subset of users
|
||||
- **Auto-scaling** - HorizontalPodAutoscaler based on traffic
|
||||
- **Multi-region** - Deploy to multiple cloudlets
|
||||
- **HTTPS** - TLS certificates and Ingress controller
|
||||
|
||||
## Summary
|
||||
|
||||
The deployment process is automated and reliable:
|
||||
|
||||
✅ **CI ensures quality** - Tests prevent broken deployments
|
||||
✅ **Edge infrastructure** - Low-latency access from EU
|
||||
✅ **Kubernetes orchestration** - Reliable, scalable platform
|
||||
✅ **Simple rollback** - Easy to recover from issues
|
||||
|
||||
As a technicalWriter, focus on content quality. The platform handles deployment automatically! 🚀
|
||||
|
|
@ -1,282 +0,0 @@
|
|||
---
|
||||
title: "Quick Reference"
|
||||
linkTitle: "Quick Reference"
|
||||
weight: 60
|
||||
description: >
|
||||
Cheat sheet for common technicalWriter tasks.
|
||||
---
|
||||
|
||||
## Common Commands
|
||||
|
||||
### Local Development
|
||||
|
||||
```bash
|
||||
# Start development server (with hot reload)
|
||||
task serve
|
||||
|
||||
# Build for production
|
||||
task build
|
||||
|
||||
# Build for development (faster, no minification)
|
||||
task build:dev
|
||||
|
||||
# Clean build artifacts
|
||||
task clean
|
||||
```
|
||||
|
||||
### Testing
|
||||
|
||||
```bash
|
||||
# Quick tests (build + markdown)
|
||||
task test:quick
|
||||
|
||||
# Full test suite
|
||||
task test
|
||||
|
||||
# Individual tests
|
||||
task test:build # Hugo build validation
|
||||
task test:markdown # Markdown linting
|
||||
task test:html # HTML validation
|
||||
task test:links # Link checking
|
||||
```
|
||||
|
||||
### Dependencies
|
||||
|
||||
```bash
|
||||
# Install dependencies
|
||||
task deps:install
|
||||
|
||||
# Update dependencies
|
||||
task deps:update
|
||||
|
||||
# Ensure npm dependencies (auto-installs if missing)
|
||||
task deps:ensure-npm
|
||||
```
|
||||
|
||||
### Container Operations
|
||||
|
||||
```bash
|
||||
# Build OCI/Docker image
|
||||
task build:oci-image
|
||||
|
||||
# Build and test container
|
||||
task test:oci-image
|
||||
```
|
||||
|
||||
## File Locations
|
||||
|
||||
### Content
|
||||
|
||||
| Path | Description |
|
||||
|------|-------------|
|
||||
| `content/en/docs/` | Main documentation |
|
||||
| `content/en/blog/` | Blog posts |
|
||||
| `content/en/_index.md` | Homepage |
|
||||
|
||||
### Architecture Models
|
||||
|
||||
| Path | Description |
|
||||
|------|-------------|
|
||||
| `resources/edp-likec4/` | Platform architecture models |
|
||||
| `resources/doc-likec4/` | Documentation platform models |
|
||||
|
||||
### Configuration
|
||||
|
||||
| File | Purpose |
|
||||
|------|---------|
|
||||
| `hugo.toml` | Hugo configuration |
|
||||
| `config.yaml` | Docsy theme config |
|
||||
| `Taskfile.yml` | Task definitions |
|
||||
| `package.json` | npm dependencies and scripts |
|
||||
| `.markdownlint.json` | Markdown linting rules |
|
||||
| `.htmlvalidate.json` | HTML validation rules |
|
||||
| `.htmltest.yml` | Link checker config |
|
||||
|
||||
### Build Output
|
||||
|
||||
| Path | Description |
|
||||
|------|-------------|
|
||||
| `public/` | Generated static site |
|
||||
| `resources/_gen/` | Generated resources (Hugo) |
|
||||
| `data/build_info.json` | Build metadata (git commit, version) |
|
||||
|
||||
## Markdown Frontmatter
|
||||
|
||||
### Standard Page
|
||||
|
||||
```yaml
|
||||
---
|
||||
title: "Page Title"
|
||||
linkTitle: "Short Title"
|
||||
weight: 10
|
||||
description: >
|
||||
Brief description for SEO and navigation.
|
||||
---
|
||||
```
|
||||
|
||||
### Blog Post
|
||||
|
||||
```yaml
|
||||
---
|
||||
title: "Post Title"
|
||||
date: 2025-01-15
|
||||
author: "Your Name"
|
||||
description: >
|
||||
Post summary.
|
||||
---
|
||||
```
|
||||
|
||||
## Embedding Architecture Diagrams
|
||||
|
||||
### Basic Embed
|
||||
|
||||
```markdown
|
||||
{{< likec4-view view="view-name" project="project-name" >}}
|
||||
```
|
||||
|
||||
### Parameters
|
||||
|
||||
- `view` (required) - The view ID from your LikeC4 model
|
||||
- `project` (optional, default: "architecture") - The LikeC4 project name
|
||||
- `title` (optional, default: "Architecture View: {view}") - Custom header text above the diagram
|
||||
|
||||
### Examples
|
||||
|
||||
```markdown
|
||||
{{< likec4-view view="overview" project="documentation-platform" >}}
|
||||
{{< likec4-view view="localDevelopment" project="documentation-platform" >}}
|
||||
{{< likec4-view view="cicdPipeline" project="documentation-platform" >}}
|
||||
{{< likec4-view view="otc-faas" project="architecture" title="OTC FaaS Deployment" >}}
|
||||
```
|
||||
|
||||
## LikeC4 Commands
|
||||
|
||||
### Regenerate Webcomponents
|
||||
|
||||
After modifying `.c4` files:
|
||||
|
||||
```bash
|
||||
task likec4:generate
|
||||
```
|
||||
|
||||
This regenerates both:
|
||||
|
||||
- `static/js/likec4-webcomponent.js` (EDP architecture)
|
||||
- `static/js/likec4-doc-webcomponent.js` (Documentation platform)
|
||||
|
||||
### Start Development Server
|
||||
|
||||
```bash
|
||||
cd resources/doc-likec4 # or resources/edp-likec4
|
||||
npm install
|
||||
npm start
|
||||
```
|
||||
|
||||
Opens LikeC4 IDE at `http://localhost:5173`
|
||||
|
||||
### Export Diagrams
|
||||
|
||||
```bash
|
||||
cd resources/doc-likec4
|
||||
npx likec4 export png -o ./images .
|
||||
```
|
||||
|
||||
## Git Workflow
|
||||
|
||||
### Feature Branch
|
||||
|
||||
```bash
|
||||
# Create feature branch
|
||||
git checkout -b feature/your-feature
|
||||
|
||||
# Make changes and test
|
||||
task serve
|
||||
task test:quick
|
||||
|
||||
# Commit
|
||||
git add .
|
||||
git commit -m "Description of changes"
|
||||
|
||||
# Push
|
||||
git push origin feature/your-feature
|
||||
|
||||
# Create pull request on GitHub
|
||||
```
|
||||
|
||||
### Update from Main
|
||||
|
||||
```bash
|
||||
git checkout main
|
||||
git pull origin main
|
||||
git checkout feature/your-feature
|
||||
git rebase main
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Port 1313 in use
|
||||
|
||||
```bash
|
||||
lsof -ti:1313 | xargs kill -9
|
||||
```
|
||||
|
||||
### Build errors
|
||||
|
||||
```bash
|
||||
task clean
|
||||
task build:dev
|
||||
```
|
||||
|
||||
### Missing dependencies
|
||||
|
||||
```bash
|
||||
task deps:install
|
||||
```
|
||||
|
||||
### Hugo module issues
|
||||
|
||||
```bash
|
||||
hugo mod clean
|
||||
hugo mod get -u
|
||||
hugo mod tidy
|
||||
```
|
||||
|
||||
### LikeC4 language server
|
||||
|
||||
In VS Code: `Ctrl+Shift+P` → "LikeC4: restart language server"
|
||||
|
||||
## URLs
|
||||
|
||||
### Local Development
|
||||
|
||||
- **Documentation:** <http://localhost:1313>
|
||||
- **LikeC4 IDE:** <http://localhost:5173> (when running `npm start` in likec4 folder)
|
||||
|
||||
### Production
|
||||
|
||||
Check `edgeconnectdeployment.yaml` for deployment URL or run:
|
||||
|
||||
```bash
|
||||
kubectl get service edpdoc -n <namespace>
|
||||
```
|
||||
|
||||
## Quick Checks Before Committing
|
||||
|
||||
1. ✅ `task test:quick` passes
|
||||
2. ✅ Preview looks correct in browser
|
||||
3. ✅ No broken links (visual check)
|
||||
4. ✅ Architecture diagrams render
|
||||
5. ✅ Frontmatter is correct
|
||||
|
||||
## Getting Help
|
||||
|
||||
- **Hugo docs:** <https://gohugo.io/documentation/>
|
||||
- **Docsy theme:** <https://www.docsy.dev/docs/>
|
||||
- **LikeC4:** <https://likec4.dev/>
|
||||
- **Task:** <https://taskfile.dev/>
|
||||
|
||||
## View Documentation Architecture
|
||||
|
||||
To understand how this documentation platform works:
|
||||
|
||||
→ Start here: [Documentation About Documentation](../)
|
||||
|
|
@ -1,229 +0,0 @@
|
|||
---
|
||||
title: "Testing"
|
||||
linkTitle: "Testing"
|
||||
weight: 30
|
||||
description: >
|
||||
Quality assurance processes for documentation.
|
||||
---
|
||||
|
||||
## Testing Philosophy
|
||||
|
||||
Quality documentation requires testing. Our testing process validates:
|
||||
|
||||
- **Build integrity** - Hugo can generate the site
|
||||
- **Content quality** - Markdown follows best practices
|
||||
- **HTML validity** - Generated HTML is well-formed
|
||||
- **Link integrity** - No broken internal or external links
|
||||
|
||||
## Testing Capabilities
|
||||
|
||||
{{< likec4-view view="testingCapabilities" project="documentation-platform" >}}
|
||||
|
||||
## Local Testing
|
||||
|
||||
Before committing changes, run tests locally:
|
||||
|
||||
### Quick Tests
|
||||
|
||||
For rapid feedback during development:
|
||||
|
||||
```bash
|
||||
task test:quick
|
||||
```
|
||||
|
||||
This runs:
|
||||
|
||||
- `task test:build` - Hugo build validation
|
||||
- `task test:markdown` - Markdown linting
|
||||
|
||||
### Full Test Suite
|
||||
|
||||
Before creating a pull request:
|
||||
|
||||
```bash
|
||||
task test
|
||||
```
|
||||
|
||||
This runs all tests including:
|
||||
|
||||
- `task test:build` - Build validation
|
||||
- `task test:markdown` - Markdown linting
|
||||
- `task test:html` - HTML validation
|
||||
- `task test:links` - Link checking
|
||||
|
||||
## Individual Tests
|
||||
|
||||
You can run individual tests:
|
||||
|
||||
### Build Test
|
||||
|
||||
Validates that Hugo can build the site:
|
||||
|
||||
```bash
|
||||
task test:build
|
||||
```
|
||||
|
||||
This runs: `hugo --gc --minify --logLevel info`
|
||||
|
||||
**What it checks:**
|
||||
|
||||
- Hugo configuration is valid
|
||||
- Content files have correct frontmatter
|
||||
- Templates render without errors
|
||||
- No circular dependencies in content structure
|
||||
|
||||
### Markdown Lint
|
||||
|
||||
Checks Markdown syntax and style:
|
||||
|
||||
```bash
|
||||
task test:markdown
|
||||
```
|
||||
|
||||
This uses `markdownlint` with custom rules in `.markdownlint.json`.
|
||||
|
||||
**What it checks:**
|
||||
|
||||
- Consistent heading hierarchy
|
||||
- Proper list formatting
|
||||
- Code blocks have language tags
|
||||
- No trailing whitespace
|
||||
- Consistent line length (where applicable)
|
||||
|
||||
**Common issues:**
|
||||
|
||||
- Missing blank lines around code blocks
|
||||
- Inconsistent list markers
|
||||
- Heading levels skipped
|
||||
|
||||
### HTML Validation
|
||||
|
||||
Validates generated HTML:
|
||||
|
||||
```bash
|
||||
task test:html
|
||||
```
|
||||
|
||||
This uses `htmlvalidate` with rules in `.htmlvalidate.json`.
|
||||
|
||||
**What it checks:**
|
||||
|
||||
- Well-formed HTML5
|
||||
- Proper nesting of elements
|
||||
- Valid attributes
|
||||
- Accessible markup
|
||||
|
||||
### Link Checking
|
||||
|
||||
Verifies all links are valid:
|
||||
|
||||
```bash
|
||||
task test:links
|
||||
```
|
||||
|
||||
This uses `htmltest` configured in `.htmltest.yml`.
|
||||
|
||||
**What it checks:**
|
||||
|
||||
- Internal links point to existing pages
|
||||
- External links are reachable
|
||||
- Anchor links target existing elements
|
||||
- No redirects (301/302)
|
||||
|
||||
**Note:** This test can be slow for large sites with many external links.
|
||||
|
||||
## CI Testing
|
||||
|
||||
All tests run automatically on:
|
||||
|
||||
- **Push to `main`** - Full test suite
|
||||
- **Pull requests** - Full test suite
|
||||
|
||||
View the GitHub Actions workflow: `.github/workflows/test.yml`
|
||||
|
||||
### CI Test Results
|
||||
|
||||
If tests fail in CI:
|
||||
|
||||
1. Check the GitHub Actions logs
|
||||
2. Look for specific test failures
|
||||
3. Run the same test locally: `task test:<name>`
|
||||
4. Fix the issue
|
||||
5. Commit and push
|
||||
|
||||
### Artifacts
|
||||
|
||||
CI uploads test artifacts:
|
||||
|
||||
- `htmltest-report/` - Link checking results
|
||||
|
||||
Download these from the GitHub Actions run to investigate failures.
|
||||
|
||||
## Test Configuration Files
|
||||
|
||||
| File | Purpose |
|
||||
|------|---------|
|
||||
| `.markdownlint.json` | Markdown linting rules |
|
||||
| `.htmlvalidate.json` | HTML validation rules |
|
||||
| `.htmltest.yml` | Link checker configuration |
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Test early, test often** - Run `task test:quick` frequently
|
||||
2. **Fix issues immediately** - Don't accumulate technical debt
|
||||
3. **Understand failures** - Read error messages carefully
|
||||
4. **Update tests** - If rules change, update config files
|
||||
5. **Document exceptions** - If you need to ignore a rule, document why
|
||||
|
||||
## Common Issues and Solutions
|
||||
|
||||
### Markdown: MD031 - Blank lines around fences
|
||||
|
||||
**Problem:** Missing blank line before/after code block
|
||||
|
||||
**Solution:** Add blank lines:
|
||||
|
||||
```markdown
|
||||
Some text
|
||||
|
||||
```bash
|
||||
command here
|
||||
```
|
||||
|
||||
More text
|
||||
```
|
||||
|
||||
### Markdown: MD032 - Blank lines around lists
|
||||
|
||||
**Problem:** Missing blank line before/after list
|
||||
|
||||
**Solution:** Add blank lines:
|
||||
|
||||
```markdown
|
||||
Text before
|
||||
|
||||
- List item 1
|
||||
- List item 2
|
||||
|
||||
Text after
|
||||
```
|
||||
|
||||
### HTML: Invalid nesting
|
||||
|
||||
**Problem:** Elements improperly nested
|
||||
|
||||
**Solution:** Check template files and shortcodes
|
||||
|
||||
### Link Check: 404 Not Found
|
||||
|
||||
**Problem:** Link points to non-existent page
|
||||
|
||||
**Solution:**
|
||||
|
||||
- Fix the link
|
||||
- Create the missing page
|
||||
- Remove the link if no longer relevant
|
||||
|
||||
## Next Steps
|
||||
|
||||
Learn about the automated [CI/CD pipeline](../cicd/).
|
||||
|
|
@ -1,75 +0,0 @@
|
|||
---
|
||||
title: "eDF Documentation Overview"
|
||||
description: "Comprehensive guide for users and auditors to understand and use the eDF."
|
||||
---
|
||||
|
||||
# Meta
|
||||
|
||||
## Guidelines
|
||||
|
||||
1. for users/developers/engineers we describe our output / outcome as product
|
||||
* it is usable
|
||||
* there are links / lists to repos
|
||||
2. we have and describe a 'product-structure-tree'
|
||||
3. for auditors / governance we have a list / cross reference to Jira tickets
|
||||
* R&D ?,
|
||||
* mappen auf die projektphasen, wie erstellt ein team eine plattform?
|
||||
* stw. mobbing, mob programming
|
||||
* mapping auf deliverables von IPCEI-CIS ???, bzw. mapping auf epics?
|
||||
* projekthistorie, projektdynamic, teilprojekt von eDF , teilprojekt-abhängigkiet zB 'Platform'
|
||||
* friendly user phase
|
||||
* forgejo community, OSS, PR handling
|
||||
* externe stakeholder, user experience, think ahead integration
|
||||
* technolgien, technologie-schwerpunkte, cutting-edge research
|
||||
* design/specification und bewertung von lösungsentürfen (zB VictoriaMetrics, GARM, terraform, argoCD, ...)
|
||||
* CI/CD, golden paths (anm.: ist in grobkonzept, deployment von apps von developern fehlt)
|
||||
|
||||
# Introduction
|
||||
- Purpose of the eDF
|
||||
- Target audience (developers, engineers, auditors)
|
||||
- High-level product structure overview
|
||||
- High-level architecture overview
|
||||
|
||||
# eDF Components Overview
|
||||
- List of all major components
|
||||
- Vertical and horizontal layers explained
|
||||
- Component maturity/status (fully integrated, partial, experimental)
|
||||
|
||||
# Getting Started
|
||||
- Quickstart guide for developers
|
||||
- Onboarding steps for eDF engineers
|
||||
- Prerequisites and environment setup
|
||||
|
||||
# Component Details
|
||||
For each component:
|
||||
- Description and purpose
|
||||
- Repository link
|
||||
- README summary
|
||||
- Architecture diagrams (link to Miro/Lucid)
|
||||
- Usage instructions
|
||||
- Integration points
|
||||
|
||||
# Development Experience
|
||||
- How to contribute
|
||||
- Local development workflow
|
||||
- CI/CD pipelines
|
||||
- Testing and validation
|
||||
|
||||
# Operational Experience
|
||||
- Deployment guides
|
||||
- Monitoring and observability
|
||||
- Troubleshooting
|
||||
|
||||
# Audit & Compliance
|
||||
- Overview of implemented controls
|
||||
- Ticket references (Jira, changelogs)
|
||||
- Documentation of decisions and reviews
|
||||
- Evidence of value and coverage
|
||||
|
||||
# FAQ & Support
|
||||
- Common issues and solutions
|
||||
- Contact points for help
|
||||
|
||||
# Appendix
|
||||
- Glossary
|
||||
- References to external resources
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
---
|
||||
title: "Documentation (v1 - Legacy)"
|
||||
linkTitle: "v1 (Legacy)"
|
||||
weight: 100
|
||||
description: >
|
||||
Legacy documentation - archived version of the original content.
|
||||
---
|
||||
|
||||
{{% alert title="Note" color="warning" %}}
|
||||
This is the legacy documentation (v1). For the latest version, please visit the [current documentation](/docs/).
|
||||
{{% /alert %}}
|
||||
|
||||
This section contains the original documentation that is being migrated to a new structure.
|
||||
|
Before Width: | Height: | Size: 154 KiB |
|
Before Width: | Height: | Size: 128 KiB |
|
|
@ -1,92 +0,0 @@
|
|||
+++
|
||||
archetype = "sub-chapter"
|
||||
title = "CI/CD Pipeline"
|
||||
weight = 1
|
||||
[params]
|
||||
author = 'florian.fuerstenberg@t-systems.com'
|
||||
date = '2024-10-08'
|
||||
+++
|
||||
|
||||
This document describes the concept of pipelining in the context of the Edge Developer Framework.
|
||||
|
||||
## Overview
|
||||
|
||||
In order to provide a composable pipeline as part of the Edge Developer Framework (EDF), we have defined a set of concepts that can be used to create pipelines for different usage scenarios. These concepts are:
|
||||
|
||||
**Pipeline Contexts** define the context in which a pipeline execution is run. Typically, a context corresponds to a specific step within the software development lifecycle, such as building and testing code, deploying and testing code in staging environments, or releasing code. Contexts define which components are used, in which order, and the environment in which they are executed.
|
||||
|
||||
**Components** are the building blocks, which are used in the pipeline. They define specific steps that are executed in a pipeline such as compiling code, running tests, or deploying an application.
|
||||
|
||||

|
||||
|
||||
## Pipeline Contexts
|
||||
|
||||
We provide 4 Pipeline Contexts that can be used to create pipelines for different usage scenarios. The contexts can be described as the golden path, which is fully configurable and extenable by the users.
|
||||
|
||||
Pipeline runs with a given context can be triggered by different actions. For example, a pipeline run with the `Continuous Integration` context can be triggered by a commit to a repository, while a pipeline run with the `Continuous Delivery` context could be triggered by merging a pull request to a specific branch.
|
||||
|
||||
### Continuous Integration
|
||||
|
||||
This context is focused on running tests and checks on every commit to a repository. It is used to ensure that the codebase is always in a working state and that new changes do not break existing functionality. Tests within this context are typically fast and lightweight, and are used to catch simple errors such as syntax errors, typos, and basic logic errors. Static vulnerability and compliance checks can also be performed in this context.
|
||||
|
||||
### Continuous Delivery
|
||||
|
||||
This context is focused on deploying code to a (ephermal) staging environment after its static checks have been performed. It is used to ensure that the codebase is always deployable and that new changes can be easily reviewed by stakeholders. Tests within this context are typically more comprehensive than those in the Continuous Integration context, and handle more complex scenarios such as integration tests and end-to-end tests. Additionally, live security and compliance checks can be performed in this context.
|
||||
|
||||
### Continuous Deployment
|
||||
|
||||
This context is focused on deploying code to a production environment and/or publishing artefacts after static checks have been performed.
|
||||
|
||||
### Chore
|
||||
|
||||
This context focuses on measures that need to be carried out regularly (e.g. security or compliance scans). They are used to ensure the robustness, security and efficiency of software projects. They enable teams to maintain high standards of quality and reliability while minimizing risks and allowing developers to focus on more critical and creative aspects of development, increasing overall productivity and satisfaction.
|
||||
|
||||
## Components
|
||||
|
||||
Components are the composable and self-contained building blocks for the contexts described above. The aim is to cover most (common) use cases for application teams and make them particularly easy to use by following our golden paths. This way, application teams only have to include and configure the functionalities they actually need. An additional benefit is that this allows for easy extensibility. If a desired functionality has not been implemented as a component, application teams can simply add their own.
|
||||
|
||||
Components must be as small as possible and follow the same concepts of software development and deployment as any other software product. In particular, they must have the following characteristics:
|
||||
|
||||
* designed for a single task
|
||||
* provide a clear and intuitive output
|
||||
* easy to compose
|
||||
* easily customizable or interchangeable
|
||||
* automatically testable
|
||||
|
||||
In the EDF components are divided into different categories. Each category contains components that perform similar actions. For example, the `build` category contains components that compile code, while the `deploy` category contains components that automate the management of the artefacts created in a production-like system.
|
||||
|
||||
> **Note:** Components are comparable to interfaces in programming. Each component defines a certain behaviour, but the actual implementation of these actions depends on the specific codebase and environment.
|
||||
>
|
||||
> For example, the `build` component defines the action of compiling code, but the actual build process depends on the programming language and build tools used in the project. The `vulnerability scanning` component will likely execute different tools and interact with different APIs depending on the context in which it is executed.
|
||||
|
||||
### Build
|
||||
|
||||
Build components are used to compile code. They can be used to compile code written in different programming languages, and can be used to compile code for different platforms.
|
||||
|
||||
### Code Test
|
||||
|
||||
These components define tests that are run on the codebase. They are used to ensure that the codebase is always in a working state and that new changes do not break existing functionality. Tests within this category are typically fast and lightweight, and are used to catch simple errors such as syntax errors, typos, and basic logic errors. Tests must be executable in isolation, and do not require external dependencies such as databases or network connections.
|
||||
|
||||
### Application Test
|
||||
|
||||
Application tests are tests, which run the code in a real execution environment, and provide external dependencies. These tests are typically more comprehensive than those in the `Code Test` category, and handle more complex scenarios such as integration tests and end-to-end tests.
|
||||
|
||||
### Deploy
|
||||
|
||||
Deploy components are used to deploy code to different environments, but can also be used to publish artifacts. They are typically used in the `Continuous Delivery` and `Continuous Deployment` contexts.
|
||||
|
||||
### Release
|
||||
|
||||
Release components are used to create releases of the codebase. They can be used to create tags in the repository, create release notes, or perform other tasks related to releasing code. They are typically used in the `Continuous Deployment` context.
|
||||
|
||||
### Repo House Keeping
|
||||
|
||||
Repo house keeping components are used to manage the repository. They can be used to clean up old branches, update the repository's README file, or perform other maintenance tasks. They can also be used to handle issues, such as automatically closing stale issues.
|
||||
|
||||
### Dependency Management
|
||||
|
||||
Dependency management is used to automate the process of managing dependencies in a codebase. It can be used to create pull requests with updated dependencies, or to automatically update dependencies in a codebase.
|
||||
|
||||
### Security and Compliance
|
||||
|
||||
Security and compliance components are used to ensure that the codebase meets security and compliance requirements. They can be used to scan the codebase for vulnerabilities, check for compliance with coding standards, or perform other security and compliance checks. Depending on the context, different tools can be used to accomplish scanning. In the `Continuous Integration` context, static code analysis can be used to scan the codebase for vulnerabilities, while in the `Continuous Delivery` context, live security and compliance checks can be performed.
|
||||
|
Before Width: | Height: | Size: 732 KiB |
|
|
@ -1,7 +0,0 @@
|
|||
---
|
||||
title: Humanitec
|
||||
weight: 4
|
||||
---
|
||||
|
||||
|
||||
tbd
|
||||
|
|
@ -1,269 +0,0 @@
|
|||
|
||||
|
||||
# Ideen
|
||||
|
||||
* Kanban
|
||||
* product review sessions
|
||||
|
||||
# ChatGPT
|
||||
|
||||
## Fehlendes Backlog
|
||||
|
||||
> welche agilen vorgehensmodelle bzw. best practices gibt es in der software-entwicklung, wenn das product backlog noch nicht da ist. es gibt zwar eine produktvision und auch schon einen PoC, aber das Produkt ist sowohl technisch als auch nutzenseitig immer nocht nicht planbar in user stories. daher ist SCRUM nicht machbar. das team ist ca. 8 personen groß, und wir überlgen, einen meilenstein in den nächsten 8 wochen einfach durch tägliches 'PDCA' zu erstellen: ein morgendliches planning, dann in subgruppen ein erforschen, dann im Plenum wieder C (Check) zusammenführen/integrieren, und ein abschliessendes 'A' als Ergebnissicherung, Versionierierung, Dokumentation. Was hälst du von diesem Vorgehen, bzw. welches Risiken und Verbesserungsvorschläge hast du? Oder gibt es ein Standard-Vorgehen hierfür?
|
||||
|
||||
Dein beschriebenes Szenario – Vision und PoC vorhanden, aber kein ausformuliertes Product Backlog – ist typisch für eine frühe **Explorations- oder Inceptionsphase** in der Produktentwicklung. In dieser Phase ist klassisches **Scrum** tatsächlich oft zu starr, weil es eine gewisse Klarheit über Features und Stories voraussetzt. Das von euch überlegte Vorgehen klingt nach einer sehr pragmatischen und iterativen Methode, die im Kern gut zu den **Lean-Startup-Prinzipien** oder **Design Thinking + Agile Engineering** passt.
|
||||
|
||||
## Bewertung eures PDCA-basierten Vorgehens
|
||||
|
||||
**Positiv:**
|
||||
* **Täglicher PDCA-Zyklus** (Plan-Do-Check-Act) sorgt für schnelle Feedbackschleifen.
|
||||
* **Morgendliches Planning** und **Check-Meeting im Plenum** fördern Transparenz und Selbstorganisation.
|
||||
* **Subgruppen-Erkundung** erlaubt parallele Experimente.
|
||||
* **Abschließendes "A"** zur Ergebnissicherung ist essenziell, sonst bleibt es bei "busy work".
|
||||
|
||||
**Risiken:**
|
||||
|
||||
1. **Fehlende Langfriststruktur:** Ohne grobe übergreifende Richtung (z. B. Meilensteinplan, Zielbild) kann es leicht in ziellosem Explorieren enden.
|
||||
2. **Uneinheitlicher Erkenntnisgewinn:** Subgruppen könnten redundant oder inkompatibel arbeiten, wenn kein gemeinsames Verständnis besteht.
|
||||
3. **Dokumentation als „Nachsorge“:** Wenn die Doku erst am Ende passiert, droht Wissensverlust – lieber „Living Docs“ in Echtzeit pflegen.
|
||||
4. **Stakeholder-Abkopplung:** Ohne explizite Reviews oder Demo-Rituale kann Feedback von außen fehlen.
|
||||
|
||||
## Verbesserungsvorschläge
|
||||
|
||||
1. **Exploration Backlog oder Hypothesenboard:** Auch ohne klassisches Product Backlog könnt ihr mit einem **Experiment-/Hypothesenboard** (à la Lean Startup) arbeiten. Zum Beispiel:
|
||||
* Hypothese: „Feature X wird den Use Case Y verbessern.“
|
||||
* Experiment: „Prototyp bauen und testen mit Nutzergruppe Z.“
|
||||
* Ergebnis & Learnings dokumentieren.
|
||||
|
||||
2. **Wöchentliche Zielsetzungen:** Jeden Montag ein Weekly Planning mit klaren Zielen für die Woche (Outcome-fokussiert, nicht Task-fokussiert).
|
||||
|
||||
3. **Integration mit Dual Track Agile:** Das Vorgehen ähnelt dem **Dual-Track-Modell**: ein **Discovery-Track** (Erkunden, Validieren, Prototypisieren) läuft parallel zum späteren **Delivery-Track** (Releases, Sprints). Ihr seid noch voll im Discovery-Track.
|
||||
|
||||
4. **Design Studio oder Spike-Zeitfenster:** Strukturiert Zeit für z. B. 2-Tages-Design-Studios oder 1-Tages-Spikes – das erhöht die Qualität der Exploration.
|
||||
|
||||
5. **Review/Share-Session freitags:** Eine fixe wöchentliche Review-/Demo-Session (auch intern) erhöht die Disziplin und hilft beim „Check“-Teil im PDCA.
|
||||
|
||||
## Gängige Standard-Methoden in dieser Phase
|
||||
|
||||
* **Design Thinking** (für Nutzerzentrierte Exploration)
|
||||
* **Lean UX / Lean Startup** (Hypothesengetrieben)
|
||||
* **Dual Track Agile**
|
||||
* **Google Design Sprint (5 Tage)** – extrem hilfreich, wenn es um das Testen von Nutzerideen geht
|
||||
* **Shape Up (Basecamp)** – für nicht-sprintbasiertes Arbeiten mit "Bet" auf 6-Wochen-Projekte
|
||||
|
||||
---
|
||||
|
||||
**Fazit:**
|
||||
Euer PDCA-Zyklus ist ein solider, pragmatischer Einstieg in eine unsichere, vorstrukturierte Produktphase. Mit ein paar Ergänzungen (Exploration Backlog, feste Review-Elemente, Hypothesenstruktur) könnt ihr daraus ein effektives Discovery-Vorgehen machen, das den Übergang zu einem klassischen agilen Framework (z. B. Scrum) vorbereitet.
|
||||
|
||||
Möchtest du eine konkrete Vorlage für ein Exploration Backlog oder Hypothesenboard für euer Team?
|
||||
|
||||
## Architektur vorhanden
|
||||
|
||||
> ok, du findest vor allem Tages-Zyklen zu kurz. Ich möchte noch ergänzen, dass wir eine Architektur haben. Das klingt vielleicht paradox, aber es ist tatsächlich so, dass wir das Software-syewtm bereits designed haben, und die Komponenten auch bereits kennen, aber jetzt vor allem die Wege des automatischen Deployments in eine Cloud und die Konfigurations- und Verwaltungsstrukturen unsrerer Software herausfinden müssen. Das ist da, was sich gerade nicht weiter planen lässt. Kannst du mit diesen Ergänzungen dein Feedbach nochmal schärfen?
|
||||
|
||||
Danke für die Ergänzung – das ändert die Ausgangslage entscheidend und erlaubt eine deutlich präzisere Einordnung.
|
||||
|
||||
### Neue Ausgangslage
|
||||
|
||||
* Ihr **habt eine Architektur**: Die Komponenten und Schnittstellen sind klar.
|
||||
* Es gibt **bereits einen PoC** und eine Produktvision.
|
||||
* Das **Backlog fehlt**, weil ihr gerade die **"nicht-funktionalen" Themen wie Deployment, Cloud-Umgebung, Konfiguration und Betrieb** klärt.
|
||||
* Das System ist **technisch grob fertig designt**, aber die **Operations- und Deployment-Prozesse** sind unklar und werden explorativ entwickelt.
|
||||
|
||||
### Schärferes Feedback
|
||||
|
||||
#### 1. **Tages-PDCA-Zyklen wirken in diesem Kontext zu granular**
|
||||
|
||||
Bei Infrastruktur-, Deployment- oder Konfigurationsfragen entstehen oft tiefe „Spikes“ (z. B. CI/CD-Pipeline aufsetzen, Helm-Chart modellieren, Secrets Handling evaluieren). Diese brauchen oft mehr als einen Tag bis zum „Check“, weil Abhängigkeiten (z. B. Rechte, Cloud-Zugänge, Testumgebungen) entstehen. Ein **2- bis 3-Tages-Rhythmus** mit klaren Zwischenzielen wäre realistischer, ergänzt um:
|
||||
|
||||
* **Daily Standup als Taktgeber**, aber nicht zwangsläufig als vollständiger PDCA-Zyklus.
|
||||
* **Weekly Planning mit Zielvorgaben und Review-Ritualen**, um Fortschritt messbar zu machen.
|
||||
|
||||
#### 2. **Was ihr macht, ist kein Produkt-Delivery, sondern "System Enablement"**
|
||||
|
||||
Ihr steckt im Übergang von Architektur zu einem **Infrastructure as Code + Plattform Enablement Track**. Das bedeutet:
|
||||
* Die „User Stories“ sind keine klassischen Features, sondern z. B.:
|
||||
* „Als Team möchte ich unsere Software mit einem Befehl deployen können.“
|
||||
* „Als Betreiber will ich wissen, wie ich Services konfiguriere.“
|
||||
|
||||
Das spricht für ein **Infrastructure Kanban Board**, keine Sprints.
|
||||
|
||||
#### 3. **Mögliche Vorgehensmodelle / Best Practices**
|
||||
|
||||
| Modell / Methode | Warum es jetzt passt |
|
||||
|------------------------------------------|------------------------------------------------------------------|
|
||||
| **Kanban für technische Tasks** | Flexible Work-in-Progress-Kontrolle, ideal für Exploration. |
|
||||
| **Spikes** (aus XP) | Timeboxed Exploration zu offenen Fragen (z. B. Helm, Secrets). |
|
||||
| **Enabler Epics (SAFe)** | Deployment- und Ops-Fragen formal als "Enabler" fassen. |
|
||||
| **Team Topologies – Platform Team Light**| Ihr agiert gerade wie ein internes Plattform-Team. |
|
||||
| **Weekly DevOps Review** | Alle 5–7 Tage Stand der Automatisierung/Doku/Strukturen. |
|
||||
| **ADRs & Living Docs** | Architectural Decision Records für Deployment-/Ops-Entscheidungen.|
|
||||
|
||||
#### 4. **Konkrete Empfehlungen zur Strukturierung**
|
||||
|
||||
* **Ziele für die 8 Wochen formulieren** als **"Capabilities"**, z. B.:
|
||||
* „Greenfield-Clouddeployment in AWS/GCP über GitHub Actions.“
|
||||
* „Parameterisierbares Helm-Chart mit Doku.“
|
||||
* „Konfigurierbarkeit über ENV vs. Secrets evaluieren.“
|
||||
* Dazu **1–2 Woche cadence**, jede Woche ein Review/Checkpoint.
|
||||
* **Boards mit Task-Kategorien**:
|
||||
* 🧪 Spike
|
||||
* 🛠️ Tooling/Automation
|
||||
* 📦 Infrastruktur/Dienst
|
||||
* 📓 Dokumentation
|
||||
* **Ergebnisse sofort versionieren und dokumentieren**, z. B.:
|
||||
* GitOps Repo-Struktur
|
||||
* ADRs
|
||||
* Beispiel-Deployments
|
||||
|
||||
---
|
||||
|
||||
### Fazit
|
||||
|
||||
Ihr seid im **"technisch operativen Enablement"** eures Systems, nicht im Produkt-Delivery. Das rechtfertigt:
|
||||
* ein **kanban-artiges, flexibles Modell mit timeboxed Reviews**,
|
||||
* tägliche Abstimmungen, aber nicht zwingend tägliche Zyklen,
|
||||
* Fokus auf **Enabler-Ziele statt funktionale Stories**.
|
||||
|
||||
SCRUM ist hier fehl am Platz, aber euer Ansatz ist sehr nahe an einer **modernen DevOps-orientierten Arbeitsweise**, die z. B. von **Platform Engineering Teams** eingesetzt wird.
|
||||
|
||||
Möchtest du ein Beispiel für ein solches Kanban-Board mit passenden Task-Typen und Zielstruktur?
|
||||
|
||||
## Kanban
|
||||
|
||||
> ja, gerne!
|
||||
|
||||
Hier ist ein konkretes Beispiel für ein **Kanban-Board**, das speziell auf eure Situation als „Platform/Enablement“-Team mit einem vorhandenen Architekturbild, aber offenem Deployment- und Betriebsdesign zugeschnitten ist.
|
||||
|
||||
---
|
||||
|
||||
## 🎯 **Zielstruktur (übergeordnet für 8 Wochen)**
|
||||
|
||||
Diese formuliert ihr am besten als **Capabilities** – Fähigkeiten, die das System nachher haben soll:
|
||||
|
||||
| Capability ID | Zielbeschreibung |
|
||||
|---------------|---------------------------------------------------------------------------------------------|
|
||||
| C1 | Anwendung kann automatisiert in Cloudumgebung X deployed werden (inkl. Secrets Mgmt) |
|
||||
| C2 | Konfiguration des Systems ist externisiert und dokumentiert (ENV, YAML, o. ä.) |
|
||||
| C3 | Monitoring- und Logging-Infrastruktur ist einsatzbereit und dokumentiert |
|
||||
| C4 | Dev- und Test-Umgebungen sind klar getrennt und automatisch provisionierbar |
|
||||
| C5 | Alle Plattformentscheidungen (z. B. Helm vs. Kustomize) sind versioniert und begründet |
|
||||
|
||||
Diese Capabilities bilden **Spalten** oder **Swimlanes** im Board (wenn euer Tool das unterstützt, z. B. GitHub Projects, Jira oder Trello mit Labels).
|
||||
|
||||
---
|
||||
|
||||
## 🗂️ **Kanban-Board-Spalten (klassisch)**
|
||||
|
||||
| Spalte | Zweck |
|
||||
|------------------|-----------------------------------------------------------|
|
||||
| 🔍 Backlog | Ideen, Hypothesen, Tasks – priorisiert nach Capabilities |
|
||||
| 🧪 In Exploration | Aktive Spikes, Proofs, technische Evaluierungen |
|
||||
| 🛠️ In Progress | Umsetzung mit konkretem Ziel |
|
||||
| ✅ Review / Check | Funktionsprüfung, internes Review |
|
||||
| 📦 Done | Abgeschlossen, dokumentiert, ggf. in Repo |
|
||||
|
||||
---
|
||||
|
||||
## 🏷️ **Task-Typen (Labels oder Emojis zur Kennzeichnung)**
|
||||
|
||||
| Symbol / Label | Typ | Beispiel |
|
||||
|------------------|-----------------------------|--------------------------------------------------------------------------|
|
||||
| 🧪 Spike | Technische Untersuchung | „Untersuche ArgoCD vs. Flux für GitOps Deployment“ |
|
||||
| 📦 Infra | Infrastruktur | „Provisioniere dev/test/stage in GCP mit Terraform“ |
|
||||
| 🔐 Secrets | Sicherheitsrelevante Aufgabe| „Design für Secret-Handling mit Sealed Secrets“ |
|
||||
| 📓 Docs | Dokumentation | „README für Developer Setup schreiben“ |
|
||||
| 🧰 Tooling | CI/CD, Pipelines, Linter | „GitHub Action für Build & Deploy schreiben“ |
|
||||
| 🔁 Entscheidung | Architekturentscheidung | „ADR: Helm vs. Kustomize für Service Deployment“ |
|
||||
|
||||
---
|
||||
|
||||
## 🧩 Beispielhafte Tasks für Capability „C1 – Deployment automatisieren“
|
||||
|
||||
| Task | Typ | Status |
|
||||
|----------------------------------------------|----------|----------------|
|
||||
| Write GitHub Action for Docker image push | 🧰 Tooling | 🛠️ In Progress |
|
||||
| Evaluate ArgoCD vs. Flux | 🧪 Spike | ✅ Review |
|
||||
| Create Helm Chart for Service A | 📦 Infra | 🔍 Backlog |
|
||||
| Create ADR: Deployment-Toolentscheidung | 🔁 Entscheidung | 🔍 Backlog |
|
||||
| Add deployment doc in Wiki | 📓 Docs | 🔍 Backlog |
|
||||
|
||||
---
|
||||
|
||||
## 📅 Cadence-Empfehlung
|
||||
|
||||
* **Daily Standup (15 min)**: Was läuft, was blockiert? Pull statt Push!
|
||||
* **Weekly Planning (Montag)**: Priorisieren und Backlog-Grooming (welche Capabilities sind diese Woche dran?)
|
||||
* **Weekly Review (Freitag)**: Erkenntnisse, Entscheidungen und Dokumentation präsentieren
|
||||
* Optional: **Retrospektive alle 2 Wochen**, falls das Team Energie hat
|
||||
|
||||
---
|
||||
|
||||
Wenn du magst, kann ich dir ein Beispiel-Board als Markdown-Vorlage oder als CSV-Datei zum Import in Trello, GitHub Projects, Jira etc. machen. Wäre das hilfreich?
|
||||
|
||||
## Living Docs
|
||||
|
||||
> kannst du noch etwas mehr zu 'LivingDocs' sagen?
|
||||
|
||||
Ja, gerne! **"Living Docs"** (bzw. **Living Documentation**) ist ein Konzept aus der agilen Softwareentwicklung und DevOps, das sich auf **aktuelle, direkt aus dem System oder aus dem Arbeitsprozess generierte Dokumentation** bezieht. Ziel ist es, dass die Dokumentation *nicht veraltet*, sondern so eng mit Code und Systemzustand gekoppelt ist, dass sie sich (teilweise) selbst aktualisiert oder sich mit dem Entwicklungsprozess mitbewegt.
|
||||
|
||||
---
|
||||
|
||||
## 🔍 **Was macht "Living Documentation" aus?**
|
||||
|
||||
| Merkmal | Beschreibung |
|
||||
|----------------------------------|------------------------------------------------------------------------------|
|
||||
| 🔄 Automatisch erzeugt oder aktualisiert | Generiert aus Code, Tests, Pipelines, ADRs oder Deployments |
|
||||
| 📌 Versionskontrolliert | Im Git abgelegt, gemeinsam mit dem Code gepflegt |
|
||||
| 🧑💻 Entwicklernah | Entwickelt sich mit dem Code weiter – keine Trennung zwischen "Docs" und Dev |
|
||||
| 📈 Änderbar & nachvollziehbar | Jede Änderung an Code/Doku hat einen Commit & Kontext |
|
||||
| 📚 Mehrwert für alle Beteiligten| Richtet sich an Devs, Ops, PMs oder andere Teams – nicht nur "für später" |
|
||||
|
||||
---
|
||||
|
||||
## 🧰 Typische Formen von Living Docs
|
||||
|
||||
| Typ | Beschreibung & Tools |
|
||||
|----------------------|----------------------------------------------------------------------------------------|
|
||||
| **Architecture Decision Records (ADRs)** | Markdown-Dateien im Repo (z. B. `docs/adr/001-helm-vs-kustomize.md`) |
|
||||
| **Code-Doku aus Source** | Mit Tools wie JSDoc, TypeDoc, Sphinx, Doxygen, etc. |
|
||||
| **API-Doku** | Automatisch aus Code oder OpenAPI (Swagger) generiert |
|
||||
| **Test-Doku (z. B. BDD)** | z. B. Gherkin-Style: `Given/When/Then`-Spezifikationen als Dokumentation |
|
||||
| **Monitoring & Deployment-Status** | z. B. Dashboards mit Infrastructure-Doku (Grafana, Backstage, Argo UI) |
|
||||
| **DevDocs im Repo** | z. B. `README.md`, `CONTRIBUTING.md`, `how-to/`, `docs/`, `adr/` |
|
||||
| **Playbooks / Runbooks** | Schritt-für-Schritt-Anleitungen für Betrieb oder Störungen |
|
||||
|
||||
---
|
||||
|
||||
## ✅ **Best Practices**
|
||||
|
||||
1. **Nutze Markdown + Git:** Einfach, versionierbar, lesbar → ADRs, Runbooks, HowTos.
|
||||
2. **Dokumentiere Entscheidungen bewusst:** Lieber knappe, saubere ADRs als keine Doku.
|
||||
3. **Verknüpfe Doku mit Code / CI/CD:** Beispiel: Build-Tool erzeugt automatisch API-Doku oder ADR-Lint-Checks.
|
||||
4. **Verlinke Logs, Dashboards, Monitoring:** z. B. in einem `system-overview.md`.
|
||||
5. **Behandle Doku wie Code:** Reviewpflichtig, Pull Request basiert, CI-checkbar.
|
||||
|
||||
---
|
||||
|
||||
## 🛠️ Tools & Frameworks
|
||||
|
||||
| Zweck | Tools (Beispiele) |
|
||||
|------------------------|-------------------------------------------------------------------------|
|
||||
| ADRs | [`adr-tools`](https://github.com/npryce/adr-tools), einfaches Markdown |
|
||||
| Markdown-Doku | MkDocs, Docusaurus, GitBook |
|
||||
| API-Doku | Swagger/OpenAPI, Redocly |
|
||||
| Infrastruktur-Doku | Backstage (Spotify), Terraform-docs |
|
||||
| Doku-Automatisierung | GitHub Actions, pre-commit Hooks, CI-Checks |
|
||||
|
||||
---
|
||||
|
||||
## 🧠 Denkweise: Doku = Gespräch in Zeit
|
||||
|
||||
> "Dokumentation ist kein Nachtrag, sondern ein asynchrones Gespräch mit deinem zukünftigen Ich – oder dem nächsten Teammitglied."
|
||||
|
||||
Gerade in eurem Kontext (technisches Enablement, Deployment, Betrieb) ist das **entscheidungsorientierte, leichtgewichtige Festhalten von Kontext** Gold wert. ADRs und `docs/runbooks/`-Verzeichnisse sind ein idealer Startpunkt.
|
||||
|
||||
---
|
||||
|
||||
Möchtest du eine Vorlage für ein ADR oder ein Beispielverzeichnis für eine sinnvolle Living-Docs-Struktur im Repo?
|
||||
|
Before Width: | Height: | Size: 114 KiB |
|
Before Width: | Height: | Size: 114 KiB |
|
|
@ -1,93 +0,0 @@
|
|||
---
|
||||
title: Stakeholder Workshop Intro
|
||||
weight: 50
|
||||
description: An overall eDF introduction for stakeholders
|
||||
linktitle: Stakeholder Workshops
|
||||
---
|
||||
|
||||
|
||||
## Edge Developer Framework Solution Overview
|
||||
|
||||
> This section is derived from [conceptual-onboarding-intro](../conceptual-onboarding/1_intro/)
|
||||
|
||||
1. As presented in the introduction: We have the ['Edge Developer Framework'](./edgel-developer-framework/). \
|
||||
In short the mission is:
|
||||
* Build a european edge cloud IPCEI-CIS
|
||||
* which contains typical layers infrastructure, platform, application
|
||||
* and on top has a new layer 'developer platform'
|
||||
* which delivers a **cutting edge developer experience** and enables **easy deploying** of applications onto the IPCEI-CIS
|
||||
2. We think the solution for EDF is in relation to ['Platforming' (Digital Platforms)](../conceptual-onboarding/3_platforming/)
|
||||
1. The next evolution after DevOps
|
||||
2. Gartner predicts 80% of SWE companies to have platforms in 2026
|
||||
3. Platforms have a history since roundabout 2019
|
||||
4. CNCF has a working group which created capabilities and a maturity model
|
||||
3. Platforms evolve - nowadys there are [Platform Orchestrators](../conceptual-onboarding/4_orchestrators/)
|
||||
1. Humanitec set up a Reference Architecture
|
||||
2. There is this 'Orchestrator' thing - declaratively describe, customize and change platforms!
|
||||
4. Mapping our assumptions to the [CNOE solution](../conceptual-onboarding/5_cnoe/)
|
||||
1. CNOE is a hot candidate to help and fulfill our platform building
|
||||
2. CNOE aims to embrace change and customization!
|
||||
|
||||
|
||||
## 2. Platforming as the result of DevOps
|
||||
|
||||
### DevOps since 2010
|
||||
|
||||

|
||||
|
||||
* from 'left' to 'right' - plan to monitor
|
||||
* 'leftshift'
|
||||
* --> turns out to be a right shift for developers with cognitive overload
|
||||
* 'DevOps isd dead' -> we need Platforms
|
||||
|
||||
### Platforming to provide 'golden paths'
|
||||
|
||||
> don't mix up 'golden paths' with pipelines or CI/CD
|
||||
|
||||

|
||||
|
||||
#### Short list of platform using companies
|
||||
|
||||
As [Gartner states](https://www.gartner.com/en/newsroom/press-releases/2023-11-28-gartner-hype-cycle-shows-ai-practices-and-platform-engineering-will-reach-mainstream-adoption-in-software-engineering-in-two-to-five-years): "By 2026, 80% of software engineering organizations will establish platform teams as internal providers of reusable services, components and tools for application delivery."
|
||||
|
||||
Here is a small list of companies alrteady using IDPs:
|
||||
|
||||
* Spotify
|
||||
* Airbnb
|
||||
* Zalando
|
||||
* Uber
|
||||
* Netflix
|
||||
* Salesforce
|
||||
* Google
|
||||
* Booking.com
|
||||
* Amazon
|
||||
* Autodesk
|
||||
* Adobe
|
||||
* Cisco
|
||||
* ...
|
||||
|
||||
## 3 Platform building by 'Orchestrating'
|
||||
|
||||
So the goal of platforming is to build a 'digital platform' which fits [this architecture](https://www.gartner.com/en/infrastructure-and-it-operations-leaders/topics/platform-engineering) ([Ref. in German)](https://www.gartner.de/de/artikel/was-ist-platform-engineering):
|
||||
|
||||

|
||||
|
||||
### Digital Platform blue print: Reference Architecture
|
||||
|
||||
The blue print for such a platform is given by the reference architecture from Humanitec:
|
||||
|
||||
[Platform Orchestrators](../conceptual-onboarding/4_orchestrators/)
|
||||
|
||||
### Digital Platform builder: CNOE
|
||||
|
||||
Since 2023 this is done by 'orchestrating' such platforms. One orchestrator is the [CNOE solution](../conceptual-onboarding/5_cnoe/), which highly inspired our approach.
|
||||
|
||||
In our orchestartion engine we think in 'stacks' of 'packages' containing platform components.
|
||||
|
||||
|
||||
## 4 Sticking all together: Our current platform orchestrating generated platform
|
||||
|
||||
Sticking together the platforming orchestration concept, the reference architecture and the CNOE stack solution, [this is our current running platform minimum viable product](../plan-in-2024/image-2024-8-14_10-50-27.png).
|
||||
|
||||
This will now be presented! Enjoy!
|
||||
|
||||
|
Before Width: | Height: | Size: 212 KiB |
|
Before Width: | Height: | Size: 96 KiB |
|
Before Width: | Height: | Size: 264 KiB |
|
Before Width: | Height: | Size: 295 KiB |
|
|
@ -1,15 +0,0 @@
|
|||
---
|
||||
title: PoC Structure
|
||||
weight: 5
|
||||
description: Building plan of the PoC milestone (end 2024) output
|
||||
---
|
||||
|
||||
Presented and approved on tuesday, 26.11.2024 within the team:
|
||||
|
||||

|
||||
|
||||
|
||||
The use cases/application lifecycle and deployment flow is drawn here: https://confluence.telekom-mms.com/display/IPCEICIS/Proof+of+Concept+2024
|
||||
|
||||
|
||||

|
||||
|
|
@ -1,15 +0,0 @@
|
|||
---
|
||||
title: Deployment
|
||||
weight: 3
|
||||
---
|
||||
|
||||
> **Mantra**:
|
||||
>
|
||||
> 1. Everything as Code.
|
||||
> 1. Cloud natively deployable everywhere.
|
||||
> 1. Ramping up and tearing down oftenly is a no-brainer.
|
||||
> 1. Especially locally (whereby 'locally' means 'under my own control')
|
||||
|
||||
## Entwurf (28.8.24)
|
||||
|
||||

|
||||
|
|
@ -1,8 +0,0 @@
|
|||
---
|
||||
title: POCs
|
||||
weight: 2
|
||||
---
|
||||
|
||||
## Further ideas for POSs
|
||||
|
||||
* see sample apps 'metaphor' in https://docs.kubefirst.io/
|
||||
|
Before Width: | Height: | Size: 376 KiB |
|
Before Width: | Height: | Size: 218 KiB |
|
Before Width: | Height: | Size: 652 KiB |
|
Before Width: | Height: | Size: 726 KiB |
|
Before Width: | Height: | Size: 888 KiB |
|
Before Width: | Height: | Size: 522 KiB |
|
Before Width: | Height: | Size: 256 KiB |
|
Before Width: | Height: | Size: 624 KiB |
|
Before Width: | Height: | Size: 166 KiB |
|
|
@ -1,139 +0,0 @@
|
|||
---
|
||||
title: Team and Work Structure
|
||||
weight: 50
|
||||
description: The way we work and produce runnable, presentable software
|
||||
linkTitle: Team-Process
|
||||
---
|
||||
|
||||
This document describes a proposal to set up a team work structure to primarily get the POC successfully delivered. Later on we will adjust and refine the process to fit for the MVP.
|
||||
|
||||
## Introduction
|
||||
|
||||
### Rationale
|
||||
|
||||
We currently face the following [challenges in our process](https://confluence.telekom-mms.com/display/IPCEICIS/Proof+of+Concept+2024):
|
||||
|
||||
1. missing team alignment on PoC-Output over all components
|
||||
1. Action: team is committed to **clearly defined PoC capabilities**
|
||||
1. Action: every each team-member is aware of **individual and common work** to be done (backlog) to achieve PoC
|
||||
1. missing concept for repository (process, structure,
|
||||
1. Action: the **PoC has a robust repository concept** up & running
|
||||
1. Action: repo concept is applicable for other repositorys as well (esp. documentation repo)
|
||||
|
||||
### General working context
|
||||
|
||||
A **project goal** drives us as a **team** to create valuable **product output**.
|
||||
|
||||
The **backlog** contains the product specification which instructs us by working in **tasks** with the help and usage of **ressources** (like git, 3rd party code and knowledge and so on).
|
||||
|
||||

|
||||
|
||||
Goal, Backlog, Tasks and Output must be in a well-defined context, such that the team can be productive.
|
||||
|
||||
### POC and MVP working context
|
||||
|
||||
This document has two targets: POC and MVP.
|
||||
|
||||
Today is mid november 2024 and we need to package our project results created since july 2024 to deliver the POC product.
|
||||
|
||||

|
||||
|
||||
> Think of the agenda's goal like this: Imagine Ralf the big sponsor passes by and sees 'edge Developer Framework' somewhere on your screen. Then he asks: 'Hey cool, you are one of these famous platform guys?! I always wanted to get a demo how this framework looks like!' \
|
||||
> **What are you going to show him?**
|
||||
|
||||
## Team and Work Structure (POC first, MVP later)
|
||||
|
||||
In the following we will look at the work structure proposal, primarily for the POC, but reusable for any other release or the MVP
|
||||
|
||||
### Consolidated POC (or any release later)
|
||||
|
||||

|
||||
|
||||
#### Responsibilities to reliably specify the deliverables
|
||||
|
||||

|
||||
|
||||
#### Todos
|
||||
|
||||
1. SHOULD: Clarify context (arch, team, leads)
|
||||
1. MUST: Define Deliverables (arch, team) (Hint: Deleiverables could be seen 1:1 as use cases - not sure about that right now)
|
||||
1. MUST: Define Output structure (arch, leads)
|
||||
|
||||
### Process (General): from deliverables to output (POC first, MVP later)
|
||||
|
||||
Most important in the process are:
|
||||
|
||||
* **traces** from tickets to outputs (as the clue to understand and control what is where)
|
||||
* **README.md** (as the clue how to use the output)
|
||||
|
||||

|
||||
|
||||
### Output Structure POC
|
||||
|
||||
Most important in the POC structure are:
|
||||
|
||||
* one repo which is the product
|
||||
* a README which maps project goals to the repo content
|
||||
* the content consists of capabilities
|
||||
* capabilities are shown ('prooven') by use cases
|
||||
* the use cases are described in the deliverables
|
||||
|
||||

|
||||
|
||||
#### Glossary
|
||||
|
||||
* README: user manual and storybook
|
||||
* Outcome: like resolution, but more verbose and detailled (especially when resolution was 'Done'), so that state changes are easily recognisable
|
||||
|
||||
### Work Structure Guidelines (POC first, MVP later)
|
||||
|
||||
#### Structure
|
||||
|
||||
1. each task and/or user story has at least a branch in an existing repo or a new, dedicated task repo
|
||||
> recommended: multi-repo over monorepo
|
||||
1. each repo has a main and development branch. development is the intgration line
|
||||
1. pull requests are used to merge work outputs to the integration line
|
||||
1. optional (my be too cumbersome): each PR should be reflected as comment in jira
|
||||
|
||||
#### Workflow (in any task / user story)
|
||||
|
||||
1. when output comes in own repo: `git init` --> always create as fast as possible a new repo
|
||||
1. commit early and oftenly
|
||||
1. comments on output and outcome when where is new work done. this could typically correlate to a pull request, see above
|
||||
|
||||
#### Definition of Done
|
||||
|
||||
1. Jira: there is a final comment summarizimg the outcome (in a bit more verbose from than just the 'resolution' of the ticket) and the main outputs. This may typically be a link to the commit and/or pull request of the final repo state
|
||||
2. Git/Repo: there is a README.md in the root of the repo. It summarizes in a typical Gihub-manner how to use the repo, so that it does what it is intended to do and reveals all the bells and whistles of the repo to the consumer. If the README doesn't lead to the usable and recognizable added value the work is not done!
|
||||
|
||||
#### Review
|
||||
|
||||
1. Before a ticket gets finished (not defined yet which jira-state this is) there must be a review by a second team member
|
||||
1. the reviewing person may review whatever they want, but must at least check the README
|
||||
|
||||
#### Out of scope (for now)
|
||||
|
||||
The following topics are optional and do not need an agreement at the moment:
|
||||
|
||||
1. Commit message syntax
|
||||
> Recommendation: at least 'WiP' would be good if the state is experimental
|
||||
1. branch permissions
|
||||
1. branch clean up policies
|
||||
1. squashing when merging into the integration line
|
||||
1. CI
|
||||
1. Tech blogs / gists
|
||||
1. Changelogs
|
||||
|
||||
#### Integration of Jira with Forgejo (compare to https://github.com/atlassian/github-for-jira)
|
||||
|
||||
1. Jira -> Forgejo: Create Branch
|
||||
1. Forgejo -> Jira:
|
||||
1. commit
|
||||
2. PR
|
||||
|
||||
## Status of POC Capabilities
|
||||
|
||||
The following table lists an analysis of the status of the ['Funcionality validation' of the POC](https://confluence.telekom-mms.com/display/IPCEICIS/Proof+of+Concept+2024).
|
||||
Assumption: These functionalities should be the aforementioned capabilities.
|
||||
|
||||

|
||||
|
|
@ -1,7 +0,0 @@
|
|||
---
|
||||
title: Design
|
||||
weight: 1
|
||||
description: Edge Developver Framework Design Documents
|
||||
---
|
||||
|
||||
This design documentation structure is inspired by the [design of crossplane](https://github.com/crossplane/crossplane/tree/main/design#readme).
|
||||
|
|
@ -1,39 +0,0 @@
|
|||
# why we have architectural documentation
|
||||
|
||||
TN: Robert, Patrick, Stefan, Stephan
|
||||
25.2.25, 13-14h
|
||||
|
||||
## referring Tickets / Links
|
||||
|
||||
* https://jira.telekom-mms.com/browse/IPCEICIS-2424
|
||||
* https://jira.telekom-mms.com/browse/IPCEICIS-478
|
||||
* Confluence: https://confluence.telekom-mms.com/display/IPCEICIS/Architecture
|
||||
|
||||
## charts
|
||||
|
||||
we need charts, because:
|
||||
|
||||
* external stakeholders (especially architects) want to understand our product and component structure(*)
|
||||
* our team needs visualization in technical discussions(**)
|
||||
* we need to have discussions during creating the documentation
|
||||
|
||||
(*): marker: "jetzt hab' ich das erste mal so halbwegs verstanden was ihr da überhaupt macht"
|
||||
(**) marker: ????
|
||||
|
||||
|
||||
## typed of charts
|
||||
|
||||
* schichtenmodell (frontend, middleware, backend)
|
||||
* bebauungsplan mit abhängigkeiten, domänen
|
||||
* kontext von außen
|
||||
* komponentendiagramm,
|
||||
|
||||
## decisions
|
||||
|
||||
* openbao is backend-system, wird über apis erreicht
|
||||
|
||||
## further topics / new requirements
|
||||
|
||||
* runbook (compare to openbao discussions)
|
||||
* persistenz der EDP konfiguartion (zb postgres)
|
||||
* OIDC vs. SSI
|
||||
|
|
@ -1,85 +0,0 @@
|
|||
|
||||
|
||||
# arbeitsteilung arcihtekur, nach innen und nach aussen
|
||||
|
||||
Sebastiano, Stefan, Robert, Patrick, Stephan
|
||||
25.2.25, 14-15h
|
||||
|
||||
## links
|
||||
|
||||
* https://confluence.telekom-mms.com/display/IPCEICIS/Team+Members
|
||||
|
||||
# montags-call
|
||||
|
||||
* Sebasriano im montags-call, inklusive florian, mindestens interim, solange wir keinen architektur-aussenminister haben
|
||||
|
||||
# workshops
|
||||
|
||||
* nach abstimmung mit hasan zu platform workshops
|
||||
* weitere beteiligung in weiteren workshop-serien to be defined
|
||||
|
||||
# programm-alignment
|
||||
|
||||
* sponsoren finden
|
||||
* erledigt sich durch die workshop-serien
|
||||
|
||||
# interne architekten
|
||||
|
||||
* robert und patrick steigen ein
|
||||
* themen-teilung
|
||||
|
||||
# produkt struktur
|
||||
|
||||
edp standalone
|
||||
ipcei edp
|
||||
|
||||
# architektur themen
|
||||
|
||||
## stl
|
||||
|
||||
produktstruktur
|
||||
application model (cnoe, oam, score, xrd, ...)
|
||||
api
|
||||
backstage (usage scenarios)
|
||||
pipelining
|
||||
'everything as code', deklaratives deployment, crossplane (bzw. orchestrator)
|
||||
|
||||
ggf:
|
||||
identity mgmt
|
||||
|
||||
nicht:
|
||||
security
|
||||
monitoring
|
||||
kubernetes internals
|
||||
|
||||
## robert
|
||||
|
||||
pipelining
|
||||
kubernetes-inetrnals
|
||||
api
|
||||
crossplane
|
||||
platforming - erzeugen von ressourcen in 'clouds' (e.g. gcp, und hetzner :-) )
|
||||
|
||||
## patrick
|
||||
|
||||
security
|
||||
identity-mgmt (SSI)
|
||||
EaC
|
||||
und alles andere macht mir auch total spass!
|
||||
|
||||
# einschätzungen
|
||||
|
||||
* ipceicis-pltaform ist wichtigstes teilprojekt (hasan + patrick)
|
||||
* offener punkt: workload-steuerung, application model (kompatibility mit EDP)
|
||||
* thema security, siehe ssi vs. oidc
|
||||
* wir brauchen eigene workshops zum definieren der zusammenarbiets-modi
|
||||
|
||||
# committements
|
||||
|
||||
* patrick und robert nehmen teil an architektur
|
||||
|
||||
# offen
|
||||
|
||||
* sebastian schwaar onboarding? (>=50%) --- robert fragt
|
||||
* alternative: consulting/support anfallsweise
|
||||
* hält eine kubernetes einführungs-schulung --> termine sind zu vereinbaren (liegt bei sophie)
|
||||
|
|
@ -1,23 +0,0 @@
|
|||
# crossplane dawn?
|
||||
|
||||
* Monday, March 31, 2025
|
||||
|
||||
## Issue
|
||||
|
||||
Robert worked on the kindserver reconciling.
|
||||
|
||||
He got aware that crossplane is able to delete clusters when drift is detected. This mustnt happen for sure in productive clusters.
|
||||
|
||||
Even worse, if crossplane did delete the cluster and then set it up again correctly, argocd would be out of sync and had no idea by default how to relate the old and new cluster.
|
||||
|
||||
## Decisions
|
||||
|
||||
1. quick solution: crosspllane doesn't delete clusters.
|
||||
* If it detects drift with a kind cluster, it shall create an alert (like email) but not act in any way
|
||||
2. analyze how crossplane orchestration logic calls 'business logic' to decide what to do.
|
||||
* In this logic we could decide whether to delete resources like clusters and if so then how. Secondly an 'orchestration' or let's workflow how to correctly set the old state with respect to argocd could be implemented there.
|
||||
3. keep terraform in mind
|
||||
* we probably will need it in adapters anyway
|
||||
* if the crossplane design does not fir, or the benefit is too small, or we definetly ahve more ressources in developing terraform, the we could completley switch
|
||||
4. focus on EDP domain and application logic
|
||||
* for the momen (in MVP1) we need to focus on EDP higher level functionality
|
||||
|
|
@ -1,31 +0,0 @@
|
|||
---
|
||||
title: eDF is self-contained and has an own IAM (WiP)
|
||||
weight: 2
|
||||
description: tbd
|
||||
---
|
||||
|
||||
* Type: Proposal
|
||||
* Owner: Stephan Lo (stephan.lo@telekom.de)
|
||||
* Reviewers: EDF Architects
|
||||
* Status: Speculative, revision 0.1
|
||||
|
||||
## Background
|
||||
|
||||
tbd
|
||||
|
||||
## Proposal
|
||||
|
||||
==== 1 =====
|
||||
|
||||
There is a core eDF which is self-contained and does not have any impelmented dependency to external platforms.
|
||||
eDF depends on abstractions.
|
||||
Each embdding into customer infrastructure works with adapters which implement the abstraction.
|
||||
|
||||
==== 2 =====
|
||||
|
||||
eDF has an own IAM. This may either hold the principals and permissions itself when there is no other IAM or proxy and map them when integrated into external enterprise IAMs.
|
||||
|
||||
|
||||
## Reference
|
||||
|
||||
Arch call from 4.12.24, Florian, Stefan, Stephan-Pierre
|
||||
|
|
@ -1,42 +0,0 @@
|
|||
|
||||
|
||||
# platform-team austausch
|
||||
|
||||
## stefan
|
||||
|
||||
* initiale fragen:
|
||||
* vor 2 wochen workshop tapeten-termin
|
||||
* wer nimmt an den workshops teil?
|
||||
* was bietet platform an?
|
||||
* EDP: könnte 5mio/a kosten
|
||||
* -> produkt pitch mit marko
|
||||
* -> edp ist unabhängig von ipceicis cloud continuum*
|
||||
* generalisierte quality of services ( <-> platform schnittstelle)
|
||||
|
||||
|
||||
## Hasan
|
||||
|
||||
* martin macht: agent based iac generation
|
||||
* platform-workshops mitgestalten
|
||||
* mms-fokus
|
||||
* connectivity enabled cloud offering, e2e von infrastruktur bis endgerät
|
||||
* sdk für latenzarme systeme, beraten und integrieren
|
||||
* monitoring in EDP?
|
||||
* beispiel 'unity'
|
||||
* vorstellung im arch call
|
||||
* wie können unterschieldiche applikationsebenen auf unterschiedliche infrastruktur(compute ebenen) verteit werden
|
||||
* zero touch application deployment model
|
||||
* ich werde gerade 'abgebremst'
|
||||
* workshop beteiligung, TPM application model
|
||||
|
||||
## martin
|
||||
|
||||
* edgeXR erlaubt keine persistenz
|
||||
* openai, llm als abstarktion nicht vorhanden
|
||||
* momentan nur compute vorhanden
|
||||
* roaming von applikationen --> EDP muss das unterstützen
|
||||
* anwendungsfall: sprachmodell übersetzt design-artifakte in architektur, dann wird provisionierung ermöglicht
|
||||
|
||||
? Applikations-modelle
|
||||
? zusammenhang mit golden paths
|
||||
* zB für reines compute faas
|
||||
|
|
@ -1,23 +0,0 @@
|
|||
---
|
||||
title: Agnostic EDF Deployment
|
||||
weight: 2
|
||||
description: The implementation of EDF must be kubernetes provider agnostic
|
||||
---
|
||||
|
||||
* Type: Proposal
|
||||
* Owner: Stephan Lo (stephan.lo@telekom.de)
|
||||
* Reviewers: EDF Architects
|
||||
* Status: Speculative, revision 0.1
|
||||
|
||||
## Background
|
||||
|
||||
EDF is running as a controlplane - or let's say an orchestration plane, correct wording is still to be defined - in a kubernetes cluster.
|
||||
Right now we have at least ArgoCD as controller of manifests which we provide as CNOE stacks of packages and standalone packages.
|
||||
|
||||
## Proposal
|
||||
|
||||
The implementation of EDF must be kubernetes provider agnostic. Thus each provider specific deployment dependency must be factored out into provider specific definitions or deployment procedures.
|
||||
|
||||
## Local deployment
|
||||
|
||||
This implies that EDF must always be deployable into a local cluster, whereby by 'local' we mean a cluster which is under the full control of the platform engineer, e.g. a kind cluster on their laptop.
|
||||
|
|
@ -1,28 +0,0 @@
|
|||
---
|
||||
title: Agnostic Stack Definition
|
||||
weight: 2
|
||||
description: The implementation of EDF stacks must be kubernetes provider agnostic by a templating/hydration mechanism
|
||||
---
|
||||
|
||||
* Type: Proposal
|
||||
* Owner: Stephan Lo (stephan.lo@telekom.de)
|
||||
* Reviewers: EDF Architects
|
||||
* Status: Speculative, revision 0.1
|
||||
|
||||
## Background
|
||||
|
||||
When booting and reconciling the 'final' stack exectuting orchestrator (here: ArgoCD) needs to get rendered (or hydrated) presentations of the manifests.
|
||||
|
||||
It is not possible or unwanted that the orchestrator itself resolves dependencies or configuration values.
|
||||
|
||||
## Proposal
|
||||
|
||||
The hydration takes place for all target clouds/kubernetes providers. There is no 'default' or 'special' setup, like the Kind version.
|
||||
|
||||
## Local development
|
||||
|
||||
This implies that in a development process there needs to be a build step hydrating the ArgoCD manifests for the targeted cloud.
|
||||
|
||||
## Reference
|
||||
|
||||
Discussion from Robert and Stephan-Pierre in the context of stack development - there should be an easy way to have locally changed stacks propagated into the local running platform.
|
||||
|
|
@ -1,55 +0,0 @@
|
|||
+++
|
||||
title = "Existing Backstage Plugins"
|
||||
weight = 4
|
||||
+++
|
||||
|
||||
1. **Catalog**:
|
||||
* Used for managing services and microservices, including registration, visualization, and the ability to track dependencies and relationships between services. It serves as a central directory for all services in an organization.
|
||||
|
||||
2. **Docs**:
|
||||
* Designed for creating and managing documentation, supporting formats such as Markdown. It helps teams organize and access technical and non-technical documentation in a unified interface.
|
||||
|
||||
3. **API Docs**:
|
||||
* Automatically generates API documentation based on OpenAPI specifications or other API definitions, ensuring that your API information is always up to date and accessible for developers.
|
||||
|
||||
4. **TechDocs**:
|
||||
* A tool for creating and publishing technical documentation. It is integrated directly into Backstage, allowing developers to host and maintain documentation alongside their projects.
|
||||
|
||||
5. **Scaffolder**:
|
||||
* Allows the rapid creation of new projects based on predefined templates, making it easier to deploy services or infrastructure with consistent best practices.
|
||||
|
||||
6. **CI/CD**:
|
||||
* Provides integration with CI/CD systems such as GitHub Actions and Jenkins, allowing developers to view build status, logs, and pipelines directly in Backstage.
|
||||
|
||||
7. **Metrics**:
|
||||
* Offers the ability to monitor and visualize performance metrics for applications, helping teams to keep track of key indicators like response times and error rates.
|
||||
|
||||
8. **Snyk**:
|
||||
* Used for dependency security analysis, scanning your codebase for vulnerabilities and helping to manage any potential security risks in third-party libraries.
|
||||
|
||||
9. **SonarQube**:
|
||||
* Integrates with SonarQube to analyze code quality, providing insights into code health, including issues like technical debt, bugs, and security vulnerabilities.
|
||||
|
||||
10. **GitHub**:
|
||||
|
||||
* Enables integration with GitHub repositories, displaying information such as commits, pull requests, and other repository activity, making collaboration more transparent and efficient.
|
||||
|
||||
11. **CircleCI**:
|
||||
|
||||
* Allows seamless integration with CircleCI for managing CI/CD workflows, giving developers insight into build pipelines, test results, and deployment statuses.
|
||||
|
||||
12. **Kubernetes**:
|
||||
|
||||
* Provides tools to manage Kubernetes clusters, including visualizing pod status, logs, and cluster health, helping teams maintain and troubleshoot their cloud-native applications.
|
||||
|
||||
13. **Cloud**:
|
||||
|
||||
* Includes plugins for integration with cloud providers like AWS and Azure, allowing teams to manage cloud infrastructure, services, and billing directly from Backstage.
|
||||
|
||||
14. **OpenTelemetry**:
|
||||
|
||||
* Helps with monitoring distributed applications by integrating OpenTelemetry, offering powerful tools to trace requests, detect performance bottlenecks, and ensure application health.
|
||||
|
||||
15. **Lighthouse**:
|
||||
|
||||
* Integrates Google Lighthouse to analyze web application performance, helping teams identify areas for improvement in metrics like load times, accessibility, and SEO.
|
||||
|
|
@ -1,141 +0,0 @@
|
|||
---
|
||||
title: ArgoCD
|
||||
weight: 30
|
||||
description: A description of ArgoCD and its role in CNOE
|
||||
---
|
||||
|
||||
## What is ArgoCD?
|
||||
|
||||
ArgoCD is a Continuous Delivery tool for kubernetes based on GitOps principles.
|
||||
|
||||
> ELI5: ArgoCD is an application running in kubernetes which monitors Git
|
||||
> repositories containing some sort of kubernetes manifests and automatically
|
||||
> deploys them to some configured kubernetes clusters.
|
||||
|
||||
From ArgoCD's perspective, applications are defined as custom resource
|
||||
definitions within the kubernetes clusters that ArgoCD monitors. Such a
|
||||
definition describes a source git repository that contains kubernetes
|
||||
manifests, in the form of a helm chart, kustomize, jsonnet definitions or plain
|
||||
yaml files, as well as a target kubernetes cluster and namespace the manifests
|
||||
should be applied to. Thus, ArgoCD is capable of deploying applications to
|
||||
various (remote) clusters and namespaces.
|
||||
|
||||
ArgoCD monitors both the source and the destination. It applies changes from
|
||||
the git repository that acts as the source of truth for the destination as soon
|
||||
as they occur, i.e. if a change was pushed to the git repository, the change is
|
||||
applied to the kubernetes destination by ArgoCD. Subsequently, it checks
|
||||
whether the desired state was established. For example, it verifies that all
|
||||
resources were created, enough replicas started, and that all pods are in the
|
||||
`running` state and healthy.
|
||||
|
||||
## Architecture
|
||||
|
||||
### Core Components
|
||||
|
||||
An ArgoCD deployment mainly consists of 3 main components:
|
||||
|
||||
#### Application Controller
|
||||
|
||||
The application controller is a kubernetes operator that synchronizes the live
|
||||
state within a kubernetes cluster with the desired state derived from the git
|
||||
sources. It monitors the live state, can detect derivations, and perform
|
||||
corrective actions. Additionally, it can execute hooks on life cycle stages
|
||||
such as pre- and post-sync.
|
||||
|
||||
#### Repository Server
|
||||
|
||||
The repository server interacts with git repositories and caches their state,
|
||||
to reduce the amount of polling necessary. Furthermore, it is responsible for
|
||||
generating the kubernetes manifests from the resources within the git
|
||||
repositories, i.e. executing helm or jsonnet templates.
|
||||
|
||||
#### API Server
|
||||
|
||||
The API Server is a REST/gRPC Service that allows the Web UI and CLI, as well
|
||||
as other API clients, to interact with the system. It also acts as the callback
|
||||
for webhooks particularly from Git repository platforms such as GitHub or
|
||||
Gitlab to reduce repository polling.
|
||||
|
||||
### Others
|
||||
|
||||
The system primarily stores its configuration as kubernetes resources. Thus,
|
||||
other external storage is not vital.
|
||||
|
||||
Redis
|
||||
: A Redis store is optional but recommended to be used as a cache to reduce
|
||||
load on ArgoCD components and connected systems, e.g. git repositories.
|
||||
|
||||
ApplicationSetController
|
||||
: The ApplicationSet Controller is similar to the Application Controller a
|
||||
kubernetes operator that can deploy applications based on parameterized
|
||||
application templates. This allows the deployment of different versions of an
|
||||
application into various environments from a single template.
|
||||
|
||||
### Overview
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
## Role in CNOE
|
||||
|
||||
ArgoCD is one of the core components besides gitea/forgejo that is being
|
||||
bootstrapped by the idpbuilder. Future project creation, e.g. through
|
||||
backstage, relies on the availability of ArgoCD.
|
||||
|
||||
After the initial bootstrapping phase, effectively all components in the stack
|
||||
that are deployed in kubernetes are managed by ArgoCD. This includes the
|
||||
bootstrapped components of gitea and ArgoCD which are onboarded afterward.
|
||||
Thus, the idpbuilder is only necessary in the bootstrapping phase of the
|
||||
platform and the technical coordination of all components shifts to ArgoCD
|
||||
eventually.
|
||||
|
||||
In general, the creation of new projects and applications should take place in
|
||||
backstop. It is a catalog of software components and best practices that allows
|
||||
developers to grasp and to manage their software portfolio. Underneath,
|
||||
however, the deployment of applications and platform components is managed by
|
||||
ArgoCD. Among others, backstage creates Application CRDs to instruct ArgoCD to
|
||||
manage deployments and subsequently report on their current state.
|
||||
|
||||
## Glossary
|
||||
|
||||
_Initially shamelessly copied from [the docs](https://argo-cd.readthedocs.io/en/stable/core_concepts/)_
|
||||
|
||||
Application
|
||||
: A group of Kubernetes resources as defined by a manifest. This is a Custom Resource Definition (CRD).
|
||||
|
||||
ApplicationSet
|
||||
: A CRD that is a template that can create multiple parameterized Applications.
|
||||
|
||||
Application source type
|
||||
: Which Tool is used to build the application.
|
||||
|
||||
Configuration management tool
|
||||
: See Tool.
|
||||
|
||||
Configuration management plugin
|
||||
: A custom tool.
|
||||
|
||||
Health
|
||||
: The health of the application, is it running correctly? Can it serve requests?
|
||||
|
||||
Live state
|
||||
: The live state of that application. What pods etc are deployed.
|
||||
|
||||
Refresh
|
||||
: Compare the latest code in Git with the live state. Figure out what is different.
|
||||
|
||||
Sync
|
||||
: The process of making an application move to its target state. E.g. by applying changes to a Kubernetes cluster.
|
||||
|
||||
Sync status
|
||||
: Whether or not the live state matches the target state. Is the deployed application the same as Git says it should be?
|
||||
|
||||
Sync operation status
|
||||
: Whether or not a sync succeeded.
|
||||
|
||||
Target state
|
||||
: The desired state of an application, as represented by files in a Git repository.
|
||||
|
||||
Tool
|
||||
: A tool to create manifests from a directory of files. E.g. Kustomize. See Application Source Type.
|
||||
|
Before Width: | Height: | Size: 86 KiB |
|
Before Width: | Height: | Size: 76 KiB |
|
Before Width: | Height: | Size: 48 KiB |
|
Before Width: | Height: | Size: 38 KiB |
|
|
@ -1,178 +0,0 @@
|
|||
---
|
||||
title: Http Routing
|
||||
weight: 100
|
||||
---
|
||||
|
||||
### Routing switch
|
||||
|
||||
The idpbuilder supports creating platforms using either path based or subdomain
|
||||
based routing:
|
||||
|
||||
```shell
|
||||
idpbuilder create --log-level debug --package https://github.com/cnoe-io/stacks//ref-implementation
|
||||
```
|
||||
|
||||
```shell
|
||||
idpbuilder create --use-path-routing --log-level debug --package https://github.com/cnoe-io/stacks//ref-implementation
|
||||
```
|
||||
|
||||
However, even though argo does report all deployments as green eventually, not
|
||||
the entire demo is actually functional (verification?). This is due to
|
||||
hardcoded values that for example point to the path-routed location of gitea to
|
||||
access git repos. Thus, backstage might not be able to access them.
|
||||
|
||||
Within the demo / ref-implementation, a simple search & replace is suggested to
|
||||
change urls to fit the given environment. But proper scripting/templating could
|
||||
take care of that as the hostnames and necessary properties should be
|
||||
available. This is, however, a tedious and repetitive task one has to keep in
|
||||
mind throughout the entire system, which might lead to an explosion of config
|
||||
options in the future. Code that addresses correct routing is located in both
|
||||
the stack templates and the idpbuilder code.
|
||||
|
||||
### Cluster internal routing
|
||||
|
||||
For the most part, components communicate with either the cluster API using the
|
||||
default DNS or with each other via http(s) using the public DNS/hostname (+
|
||||
path-routing scheme). The latter is necessary due to configs that are visible
|
||||
and modifiable by users. This includes for example argocd config for components
|
||||
that has to sync to a gitea git repo. Using the same URL for internal and
|
||||
external resolution is imperative.
|
||||
|
||||
The idpbuilder achieves transparent internal DNS resolution by overriding the
|
||||
public DNS name in the cluster's internal DNS server (coreDNS). Subsequently,
|
||||
within the cluster requests to the public hostnames resolve to the IP of the
|
||||
internal ingress controller service. Thus, internal and external requests take
|
||||
a similar path and run through proper routing (rewrites, ssl/tls, etc).
|
||||
|
||||
### Conclusion
|
||||
|
||||
One has to keep in mind that some specific app features might not
|
||||
work properly or without haxx when using path based routing (e.g. docker
|
||||
registry in gitea). Futhermore, supporting multiple setup strategies will
|
||||
become cumbersome as the platforms grows. We should probably only support one
|
||||
type of setup to keep the system as simple as possible, but allow modification
|
||||
if necessary.
|
||||
|
||||
DNS solutions like `nip.io` or the already used `localtest.me` mitigate the
|
||||
need for path based routing
|
||||
|
||||
## Excerpt
|
||||
|
||||
HTTP is a cornerstone of the internet due to its high flexibility. Starting
|
||||
from HTTP/1.1 each request in the protocol contains among others a path and a
|
||||
`Host`name in its header. While an HTTP request is sent to a single IP address
|
||||
/ server, these two pieces of data allow (distributed) systems to handle
|
||||
requests in various ways.
|
||||
|
||||
```shell
|
||||
$ curl -v http://google.com/something > /dev/null
|
||||
|
||||
* Connected to google.com (2a00:1450:4001:82f::200e) port 80
|
||||
* using HTTP/1.x
|
||||
> GET /something HTTP/1.1
|
||||
> Host: google.com
|
||||
> User-Agent: curl/8.10.1
|
||||
> Accept: */*
|
||||
...
|
||||
```
|
||||
|
||||
### Path-Routing
|
||||
|
||||
Imagine requesting `http://myhost.foo/some/file.html`, in a simple setup, the
|
||||
web server `myhost.foo` resolves to would serve static files from some
|
||||
directory, `/<some_dir>/some/file.html`.
|
||||
|
||||
In more complex systems, one might have multiple services that fulfill various
|
||||
roles, for example a service that generates HTML sites of articles from a CMS
|
||||
and a service that can convert images into various formats. Using path-routing
|
||||
both services are available on the same host from a user's POV.
|
||||
|
||||
An article served from `http://myhost.foo/articles/news1.html` would be
|
||||
generated from the article service and points to an image
|
||||
`http://myhost.foo/images/pic.jpg` which in turn is generated by the image
|
||||
converter service. When a user sends an HTTP request to `myhost.foo`, they hit
|
||||
a reverse proxy which forwards the request based on the requested path to some
|
||||
other system, waits for a response, and subsequently returns that response to
|
||||
the user.
|
||||
|
||||

|
||||
|
||||
Such a setup hides the complexity from the user and allows the creation of
|
||||
large distributed, scalable systems acting as a unified entity from the
|
||||
outside. Since everything is served on the same host, the browser is inclined
|
||||
to trust all downstream services. This allows for easier 'communication'
|
||||
between services through the browser. For example, cookies could be valid for
|
||||
the entire host and thus authentication data could be forwarded to requested
|
||||
downstream services without the user having to explicitly re-authenticate.
|
||||
|
||||
Furthermore, services 'know' their user-facing location by knowing their path
|
||||
and the paths to other services as paths are usually set as a convention and /
|
||||
or hard-coded. In practice, this makes configuration of the entire system
|
||||
somewhat easier, especially if you have various environments for testing,
|
||||
development, and production. The hostname of the system does not matter as one
|
||||
can use hostname-relative URLs, e.g. `/some/service`.
|
||||
|
||||
Load balancing is also easily achievable by multiplying the number of service
|
||||
instances. Most reverse proxy systems are able to apply various load balancing
|
||||
strategies to forward traffic to downstream systems.
|
||||
|
||||
Problems might arise if downstream systems are not built with path-routing in
|
||||
mind. Some systems require to be served from the root of a domain, see for
|
||||
example the container registry spec.
|
||||
|
||||
|
||||
### Hostname-Routing
|
||||
|
||||
Each downstream service in a distributed system is served from a different
|
||||
host, typically a subdomain, e.g. `serviceA.myhost.foo` and
|
||||
`serviceB.myhost.foo`. This gives services full control over their respective
|
||||
host, and even allows them to do path-routing within each system. Moreover,
|
||||
hostname-routing allows the entire system to create more flexible and powerful
|
||||
routing schemes in terms of scalability. Intra-system communication becomes
|
||||
somewhat harder as the browser treats each subdomain as a separate host,
|
||||
shielding cookies for example form one another.
|
||||
|
||||
Each host that serves some services requires a DNS entry that has to be
|
||||
published to the clients (from some DNS server). Depending on the environment
|
||||
this can become quite tedious as DNS resolution on the internet and intranets
|
||||
might have to deviate. This applies to intra-cluster communication as well, as
|
||||
seen with the idpbuilder's platform. In this case, external DNS resolution has
|
||||
to be replicated within the cluster to be able to use the same URLs to address
|
||||
for example gitea.
|
||||
|
||||
The following example depicts DNS-only routing. By defining separate DNS
|
||||
entries for each service / subdomain requests are resolved to the respective
|
||||
servers. In theory, no additional infrastructure is necessary to route user
|
||||
traffic to each service. However, as services are completely separated other
|
||||
infrastructure like authentication possibly has to be duplicated.
|
||||
|
||||

|
||||
|
||||
When using hostname based routing, one does not have to set different IPs for
|
||||
each hostname. Instead, having multiple DNS entries pointing to the same set of
|
||||
IPs allows re-using existing infrastructure. As shown below, a reverse proxy is
|
||||
able to forward requests to downstream services based on the `Host` request
|
||||
parameter. This way specific hostname can be forwarded to a defined service.
|
||||
|
||||

|
||||
|
||||
At the same time, one could imagine a multi-tenant system that differentiates
|
||||
customer systems by name, e.g. `tenant-1.cool.system` and
|
||||
`tenant-2.cool.system`. Configured as a wildcard-sytle domain, `*.cool.system`
|
||||
could point to a reverse proxy that forwards requests to a tenants instance of
|
||||
a system, allowing re-use of central infrastructure while still hosting
|
||||
separate systems per tenant.
|
||||
|
||||
|
||||
The implicit dependency on DNS resolution generally makes this kind of routing
|
||||
more complex and error-prone as changes to DNS server entries are not always
|
||||
possible or modifiable by everyone. Also, local changes to your `/etc/hosts`
|
||||
file are a constant pain and should be seen as a dirty hack. As mentioned
|
||||
above, dynamic DNS solutions like `nip.io` are often helpful in this case.
|
||||
|
||||
### Conclusion
|
||||
|
||||
Path and hostname based routing are the two most common methods of HTTP traffic
|
||||
routing. They can be used separately but more often they are used in
|
||||
conjunction. Due to HTTP's versatility other forms of HTTP routing, for example
|
||||
based on the `Content-Type` Header are also very common.
|
||||
|
Before Width: | Height: | Size: 52 KiB |
|
|
@ -1,5 +0,0 @@
|
|||
---
|
||||
title: Included Backstage Templates
|
||||
weight: 2
|
||||
description: Here you will find information about backstage templates that are included into idpbuilder's ref-implementation
|
||||
---
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
+++
|
||||
title = "Template for basic Argo Workflow"
|
||||
weight = 4
|
||||
+++
|
||||
|
||||
# Backstage Template for Basic Argo Workflow with Spark Job
|
||||
|
||||
This Backstage template YAML automates the creation of an Argo Workflow for Kubernetes that includes a basic Spark job, providing a convenient way to configure and deploy workflows involving data processing or machine learning jobs. Users can define key parameters, such as the application name and the path to the main Spark application file. The template creates necessary Kubernetes resources, publishes the application code to a Gitea Git repository, registers the application in the Backstage catalog, and deploys it via ArgoCD for easy CI/CD management.
|
||||
|
||||
## Use Case
|
||||
|
||||
This template is designed for teams that need a streamlined approach to deploy and manage data processing or machine learning jobs using Spark within an Argo Workflow environment. It simplifies the deployment process and integrates the application with a CI/CD pipeline. The template performs the following:
|
||||
|
||||
* **Workflow and Spark Job Setup**: Defines a basic Argo Workflow and configures a Spark job using the provided application file path, ideal for data processing tasks.
|
||||
* **Repository Setup**: Publishes the workflow configuration to a Gitea repository, enabling version control and easy updates to the job configuration.
|
||||
* **ArgoCD Integration**: Creates an ArgoCD application to manage the Spark job deployment, ensuring continuous delivery and synchronization with Kubernetes.
|
||||
* **Backstage Registration**: Registers the application in Backstage, making it easily discoverable and manageable through the Backstage catalog.
|
||||
|
||||
This template boosts productivity by automating steps required for setting up Argo Workflows and Spark jobs, integrating version control, and enabling centralized management and visibility, making it ideal for projects requiring efficient deployment and scalable data processing solutions.
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
+++
|
||||
title = "Template for basic kubernetes deployment"
|
||||
weight = 4
|
||||
+++
|
||||
|
||||
# Backstage Template for Kubernetes Deployment
|
||||
|
||||
This Backstage template YAML automates the creation of a basic Kubernetes Deployment, aimed at simplifying the deployment and management of applications in Kubernetes for the user. The template allows users to define essential parameters, such as the application’s name, and then creates and configures the Kubernetes resources, publishes the application code to a Gitea Git repository, and registers the application in the Backstage catalog for tracking and management.
|
||||
|
||||
## Use Case
|
||||
|
||||
The template is designed for teams needing a streamlined approach to deploy applications in Kubernetes while automatically configuring their CI/CD pipelines. It performs the following:
|
||||
|
||||
* **Deployment Creation**: A Kubernetes Deployment YAML is generated based on the provided application name, specifying a basic setup with an Nginx container.
|
||||
* **Repository Setup**: Publishes the deployment code in a Gitea repository, allowing for version control and future updates.
|
||||
* **ArgoCD Integration**: Automatically creates an ArgoCD application for the deployment, facilitating continuous delivery and synchronization with Kubernetes.
|
||||
* **Backstage Registration**: Registers the application in Backstage to make it discoverable and manageable via the Backstage catalog.
|
||||
|
||||
This template enhances productivity by automating several steps required for deployment, version control, and registration, making it ideal for projects where fast, consistent deployment and centralized management are required.
|
||||
|
|
@ -1,4 +0,0 @@
|
|||
---
|
||||
title: Crossplane
|
||||
description: Crossplane is a tool to provision cloud resources. it can act as a backend for platform orchestrators as well
|
||||
---
|
||||
|
|
@ -1,764 +0,0 @@
|
|||
---
|
||||
title: Howto develop a crossplane kind provider
|
||||
weight: 1
|
||||
description: A provider-kind allows using crossplane locally
|
||||
---
|
||||
|
||||
To support local development and usage of crossplane compositions, a crossplane provider is needed.
|
||||
Every big hyperscaler already has support in crossplane (e.g. provider-gcp and provider-aws).
|
||||
|
||||
Each provider has two main parts, the provider config and implementations of the cloud resources.
|
||||
|
||||
The provider config takes the credentials to log into the cloud provider and provides a token
|
||||
(e.g. a kube config or even a service account) that the implementations can use to provision cloud resources.
|
||||
|
||||
The implementations of the cloud resources reflect each type of cloud resource, typical resources are:
|
||||
|
||||
* S3 Bucket
|
||||
* Nodepool
|
||||
* VPC
|
||||
* GkeCluster
|
||||
|
||||
## Architecture of provider-kind
|
||||
|
||||
To have the crossplane concepts applied, the provider-kind consists of two components: kindserver and provider-kind.
|
||||
|
||||
The kindserver is used to manage local kind clusters. It provides an HTTP REST interface to create, delete and get informations of a running cluster, using an Authorization HTTP header field used as a password:
|
||||
|
||||

|
||||
|
||||
The two properties to connect the provider-kind to kindserver are the IP address and password of kindserver. The IP address is required because the kindserver needs to be executed outside the kind cluster, directly on the local machine, as it need to control
|
||||
kind itself:
|
||||
|
||||

|
||||
|
||||
The provider-kind provides two crossplane elements, the `ProviderConfig` and `KindCluster` as the (only) cloud resource. The
|
||||
`ProviderConfig` is configured with the IP address and password of the running kindserver. The `KindCluster` type is configured
|
||||
to use the provided `ProviderConfig`. Kind clusters can be managed by adding and removing kubernetes manifests of type
|
||||
`KindCluster`. The crossplane reconcilation loop makes use of the kindserver HTTP GET method to see if a new cluster needs to be
|
||||
created by HTTP POST or being removed by HTTP DELETE.
|
||||
|
||||
The password used by `ProviderConfig` is configured as an kubernetes secret, while the kindserver IP address is configured
|
||||
inside the `ProviderConfig` as the field endpoint.
|
||||
|
||||
When provider-kind created a new cluster by processing a `KindCluster` manifest, the two providers which are used to deploy applications, provider-helm and provider-kubernetes, can be configured to use the `KindCluster`.
|
||||
|
||||

|
||||
|
||||
A Crossplane composition can be created by concaternating different providers and their objects. A composition is managed as a
|
||||
custom resource definition and defined in a single file.
|
||||
|
||||

|
||||
|
||||
## Configuration
|
||||
|
||||
Two kubernetes manifests are defines by provider-kind: `ProviderConfig` and `KindCluster`. The third needed kubernetes
|
||||
object is a secret.
|
||||
|
||||
The need for the following inputs arise when developing a provider-kind:
|
||||
|
||||
* kindserver password as a kubernetes secret
|
||||
* endpoint, the IP address of the kindserver as a detail of `ProviderConfig`
|
||||
* kindConfig, the kind configuration file as a detail of `KindCluster`
|
||||
|
||||
The following outputs arise:
|
||||
|
||||
* kubernetesVersion, kubernetes version of a created kind cluster as a detail of `KindCluster`
|
||||
* internalIP, IP address of a created kind cluster as a detail of `KindCluster`
|
||||
* readiness as a detail of `KindCluster`
|
||||
* kube config of a created kind cluster as a kubernetes secret reference of `KindCluster`
|
||||
|
||||
### Inputs
|
||||
|
||||
#### kindserver password
|
||||
|
||||
The kindserver password needs to be defined first. It is realized as a kubernetes secret and contains the password
|
||||
which the kindserver has been configured with:
|
||||
|
||||
```
|
||||
apiVersion: v1
|
||||
data:
|
||||
credentials: MTIzNDU=
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: kind-provider-secret
|
||||
namespace: crossplane-system
|
||||
type: Opaque
|
||||
```
|
||||
|
||||
#### endpoint
|
||||
|
||||
The IP address of the kindserver `endpoint` is configured in the provider-kind `ProviderConfig`. This config also references the kindserver password (`kind-provider-secret`):
|
||||
|
||||
```
|
||||
apiVersion: kind.crossplane.io/v1alpha1
|
||||
kind: ProviderConfig
|
||||
metadata:
|
||||
name: kind-provider-config
|
||||
spec:
|
||||
credentials:
|
||||
source: Secret
|
||||
secretRef:
|
||||
namespace: crossplane-system
|
||||
name: kind-provider-secret
|
||||
key: credentials
|
||||
endpoint:
|
||||
url: https://172.18.0.1:7443/api/v1/kindserver
|
||||
```
|
||||
|
||||
It is suggested that the kindserver runs on the IP of the docker host, so that all kind clusters can access it without extra routing.
|
||||
|
||||
#### kindConfig
|
||||
|
||||
The kind config is provided as the field `kindConfig` in each `KindCluster` manifest. The manifest also references the provider-kind `ProviderConfig` (`kind-provider-config` in the `providerConfigRef` field):
|
||||
|
||||
```
|
||||
apiVersion: container.kind.crossplane.io/v1alpha1
|
||||
kind: KindCluster
|
||||
metadata:
|
||||
name: example-kind-cluster
|
||||
spec:
|
||||
forProvider:
|
||||
kindConfig: |
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
kubeadmConfigPatches:
|
||||
- |
|
||||
kind: InitConfiguration
|
||||
nodeRegistration:
|
||||
kubeletExtraArgs:
|
||||
node-labels: "ingress-ready=true"
|
||||
extraPortMappings:
|
||||
- containerPort: 80
|
||||
hostPort: 80
|
||||
protocol: TCP
|
||||
- containerPort: 443
|
||||
hostPort: 443
|
||||
protocol: TCP
|
||||
containerdConfigPatches:
|
||||
- |-
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."gitea.cnoe.localtest.me:443"]
|
||||
endpoint = ["https://gitea.cnoe.localtest.me"]
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.configs."gitea.cnoe.localtest.me".tls]
|
||||
insecure_skip_verify = true
|
||||
providerConfigRef:
|
||||
name: kind-provider-config
|
||||
writeConnectionSecretToRef:
|
||||
namespace: default
|
||||
name: kind-connection-secret
|
||||
```
|
||||
|
||||
After the kind cluster has been created, it's kube config is stored in a kubernetes secret `kind-connection-secret` which `writeConnectionSecretToRef` references.
|
||||
|
||||
### Outputs
|
||||
|
||||
The three outputs can be recieved by getting the `KindCluster` manifest after the cluster has been created. The `KindCluster` is
|
||||
available for reading even before the cluster has been created, but the three outputfields are empty until then. The ready state
|
||||
will also switch from `false` to `true` after the cluster has finally been created.
|
||||
|
||||
#### kubernetesVersion, internalIP and readiness
|
||||
|
||||
This fields can be recieved with a standard kubectl get command:
|
||||
|
||||
```
|
||||
$ kubectl get kindclusters kindcluster-fw252 -o yaml
|
||||
...
|
||||
status:
|
||||
atProvider:
|
||||
internalIP: 192.168.199.19
|
||||
kubernetesVersion: v1.31.0
|
||||
conditions:
|
||||
- lastTransitionTime: "2024-11-12T18:22:39Z"
|
||||
reason: Available
|
||||
status: "True"
|
||||
type: Ready
|
||||
- lastTransitionTime: "2024-11-12T18:21:38Z"
|
||||
reason: ReconcileSuccess
|
||||
status: "True"
|
||||
type: Synced
|
||||
```
|
||||
|
||||
#### kube config
|
||||
|
||||
The kube config is stored in a kubernetes secret (`kind-connection-secret`) which can be accessed after the cluster has been
|
||||
created:
|
||||
|
||||
```
|
||||
$ kubectl get kindclusters kindcluster-fw252 -o yaml
|
||||
...
|
||||
writeConnectionSecretToRef:
|
||||
name: kind-connection-secret
|
||||
namespace: default
|
||||
...
|
||||
|
||||
$ kubectl get secret kind-connection-secret
|
||||
NAME TYPE DATA AGE
|
||||
kind-connection-secret connection.crossplane.io/v1alpha1 2 107m
|
||||
```
|
||||
|
||||
The API endpoint of the new cluster `endpoint` and it's kube config `kubeconfig` is stored in that secret. This values are set in
|
||||
the Obbserve function of the kind controller of provider-kind. They are set with the special crossplane function managed
|
||||
ExternalObservation.
|
||||
|
||||
## The reconciler loop of a crossplane provider
|
||||
|
||||
The reconciler loop is the heart of every crossplane provider. As it is coupled async, it's best to describe it working in words:
|
||||
|
||||
Internally, the Connect function get's triggered in the kindcluster controller `internal/controller/kindcluster/kindcluster.go`
|
||||
first, to setup the provider and configure it with the kindserver password and IP address of the kindserver.
|
||||
|
||||
After that the provider-kind has been configured with the kindserver secret and it's `ProviderConfig`, the provider is ready to
|
||||
be activated by applying a `KindCluster` manifest to kubernetes.
|
||||
|
||||
When the user applies a new `KindCluster` manifest, a observe loop is started. The provider regulary triggers the `Observe`
|
||||
function of the controller. As there has yet been created nothing yet, the controller will return
|
||||
`managed.ExternalObservation{ResourceExists: false}` to signal that the kind cluster resource has not been created yet.
|
||||
As the is a kindserver SDK available, the controller is using the `Get` function of the SDK to query the kindserver.
|
||||
|
||||
The `KindCluster` is already applied and can be retrieved with `kubectl get kindclusters`. As the cluster has not been
|
||||
created yet, it readiness state is `false`.
|
||||
|
||||
In parallel, the `Create` function is triggered in the controller. This function has acces to the desired kind config
|
||||
`cr.Spec.ForProvider.KindConfig` and the name of the kind cluster `cr.ObjectMeta.Name`. It can now call the kindserver SDK to
|
||||
create a new cluster with the given config and name. The create function is supposed not to run too long, therefore
|
||||
it directly returns in the case of provider-kind. The kindserver already knows the name of the new cluster and even it is
|
||||
not yet ready, it will respond with a partial success.
|
||||
|
||||
The observe loops is triggered regulary in parallel. It will be triggered after the create call but before the kind cluster has been
|
||||
created. Now it will get a step further. It gets the information of kindserver, that the cluster is already knows, but not
|
||||
finished creating yet.
|
||||
|
||||
After the cluster has been finished creating, the kindserver has all important informations for the provider-kind. That is
|
||||
The API server endpoint of the new cluster and it's kube config. After another round of the observer loop, the controller
|
||||
gets now the full set of information of kindcluster (cluster ready, it's API server endpoint and it's kube config).
|
||||
When this informations has been recieved by the kindserver SDk in form of a JSON file, it is able to signal successfull
|
||||
creating of the cluster. That is done by returning the following structure from inside the observe function:
|
||||
|
||||
```
|
||||
return managed.ExternalObservation{
|
||||
ResourceExists: true,
|
||||
ResourceUpToDate: true,
|
||||
ConnectionDetails: managed.ConnectionDetails{
|
||||
xpv1.ResourceCredentialsSecretEndpointKey: []byte(clusterInfo.Endpoint),
|
||||
xpv1.ResourceCredentialsSecretKubeconfigKey: []byte(clusterInfo.KubeConfig),
|
||||
},
|
||||
}, nil
|
||||
```
|
||||
|
||||
Note that the managed.ConnectionDetails will automatically write the API server endpoint and it's kube config to the kubernetes
|
||||
secret which `writeConnectionSecretToRef`of `KindCluster` points to.
|
||||
|
||||
It also set the availability flag before returning, that will mark the `KindCluster` as ready:
|
||||
|
||||
```
|
||||
cr.Status.SetConditions(xpv1.Available())
|
||||
```
|
||||
|
||||
Before returning, it will also set the informations which are transfered into fields of `kindCluster` which can be retrieved by a
|
||||
`kubectl get`, the `kubernetesVersion` and the `internalIP` fields:
|
||||
|
||||
```
|
||||
cr.Status.AtProvider.KubernetesVersion = clusterInfo.K8sVersion
|
||||
cr.Status.AtProvider.InternalIP = clusterInfo.NodeIp
|
||||
```
|
||||
|
||||
Now the `KindCluster` is setup completly and when it's data is retrieved by `kubectl get`, all data is available and it's readiness
|
||||
is set to `true`.
|
||||
|
||||
The observer loops continies to be called to enable drift detection. That detection is currently not implemented, but is
|
||||
prepared for future implementations. When the observer function would detect that the kind cluster with a given name is set
|
||||
up with a kind config other then the desired, the controller would call the controller `Update` function, which would
|
||||
delete the currently runnign kind cluster and recreates it with the desired kind config.
|
||||
|
||||
When the user is deleting the `KindCluster` manifest at a later stage, the `Delete` function of the controller is triggered
|
||||
to call the kindserver SDK to delete the cluster with the given name. The observer loop will acknowledge that the cluster
|
||||
is deleted successfully by retrieving `kind cluster not found` when the deletion had been successfull. If not, the controller
|
||||
will trigger the delete function in a loop as well, until the kind cluster has been deleted.
|
||||
|
||||
That assembles the reconciler loop.
|
||||
|
||||
## kind API server IP address
|
||||
|
||||
Each newly created kind cluster has a practially random kubernetes API server endpoint. As the IP address of a new kind cluster
|
||||
can't determined before creation, the kindserver manages the API server field of the kind config. It will map all
|
||||
kind server kubernets API endpoints on it's own IP address, but on different ports. That garantees that alls kind
|
||||
clusters can access the kubernetes API endpoints of all other kind clusters by using the docker host IP of the kindserver
|
||||
itself. This is needed as the kube config hardcodes the kubernets API server endpoint. By using the docker host IP
|
||||
but with different ports, every usage of a kube config from one kind cluster to another is working successfully.
|
||||
|
||||
The management of the kind config in the kindserver is implemented in the `Post` function of the kindserver `main.go` file.
|
||||
|
||||
## Create a the crossplane provider-kind
|
||||
|
||||
The official way for creating crossplane providers is to use the provider-template. Process the following steps to create
|
||||
a new provider.
|
||||
|
||||
First, clone the provider-template. The commit ID when this howto has been written is 2e0b022c22eb50a8f32de2e09e832f17161d7596.
|
||||
Rename the new folder after cloning.
|
||||
|
||||
```
|
||||
git clone https://github.com/crossplane/provider-template.git
|
||||
mv provider-template provider-kind
|
||||
cd provider-kind/
|
||||
```
|
||||
|
||||
The informations in the provided README.md are incomplete. Folow this steps to get it running:
|
||||
|
||||
> Please use bash for the next commands (`${type,,}` e.g. is not a mistake)
|
||||
|
||||
```
|
||||
make submodules
|
||||
export provider_name=Kind # Camel case, e.g. GitHub
|
||||
make provider.prepare provider=${provider_name}
|
||||
export group=container # lower case e.g. core, cache, database, storage, etc.
|
||||
export type=KindCluster # Camel casee.g. Bucket, Database, CacheCluster, etc.
|
||||
make provider.addtype provider=${provider_name} group=${group} kind=${type}
|
||||
sed -i "s/sample/${group}/g" apis/${provider_name,,}.go
|
||||
sed -i "s/mytype/${type,,}/g" internal/controller/${provider_name,,}.go
|
||||
```
|
||||
|
||||
Patch the Makefile:
|
||||
|
||||
```
|
||||
dev: $(KIND) $(KUBECTL)
|
||||
@$(INFO) Creating kind cluster
|
||||
+ @$(KIND) delete cluster --name=$(PROJECT_NAME)-dev
|
||||
@$(KIND) create cluster --name=$(PROJECT_NAME)-dev
|
||||
@$(KUBECTL) cluster-info --context kind-$(PROJECT_NAME)-dev
|
||||
- @$(INFO) Installing Crossplane CRDs
|
||||
- @$(KUBECTL) apply --server-side -k https://github.com/crossplane/crossplane//cluster?ref=master
|
||||
+ @$(INFO) Installing Crossplane
|
||||
+ @helm install crossplane --namespace crossplane-system --create-namespace crossplane-stable/crossplane --wait
|
||||
@$(INFO) Installing Provider Template CRDs
|
||||
@$(KUBECTL) apply -R -f package/crds
|
||||
@$(INFO) Starting Provider Template controllers
|
||||
```
|
||||
|
||||
Generate, build and execute the new provider-kind:
|
||||
|
||||
```
|
||||
make generate
|
||||
make build
|
||||
make dev
|
||||
```
|
||||
|
||||
Now it's time to add the required fields (internalIP, endpoint, etc.) to the spec fields in go api sources found in:
|
||||
|
||||
* apis/container/v1alpha1/kindcluster_types.go
|
||||
* apis/v1alpha1/providerconfig_types.go
|
||||
|
||||
The file `apis/kind.go` may also be modified. The word `sample` can be replaces with `container` in our case.
|
||||
|
||||
When that's done, the yaml specifications needs to be modified to also include the required fields (internalIP, endpoint, etc.)
|
||||
|
||||
Next, a kindserver SDK can be implemented. That is a helper class which encapsulates the get, create and delete HTTP calls to the kindserver. Connection infos (kindserver IP address and password) will be stored by the constructor.
|
||||
|
||||
After that we can add the usage of the kindclient sdk in kindcluster controller `internal/controller/kindcluster/kindcluster.go`.
|
||||
|
||||
Finally we can update the `Makefile` to better handle the primary kind cluster creation and adding of a cluster role binding
|
||||
so that crossplane can access the `KindCluster` objects. Examples and updating the README.md will finish the development.
|
||||
|
||||
All this steps are documented in: https://forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/DevFW/provider-kind/pulls/1
|
||||
|
||||
## Publish the provider-kind to a user defined docker registry
|
||||
|
||||
Every provider-kind release needs to be tagged first in the git repository:
|
||||
|
||||
```
|
||||
git tag v0.1.0
|
||||
git push origin v0.1.0
|
||||
```
|
||||
|
||||
Next, make sure you have docker logged in into the target registry:
|
||||
|
||||
```
|
||||
docker login forgejo.edf-bootstrap.cx.fg1.ffm.osc.live
|
||||
```
|
||||
|
||||
Now it's time to specify the target registry, build the provider-kind for ARM64 and AMD64 CPU architectures and publish it to the target registry:
|
||||
|
||||
```
|
||||
XPKG_REG_ORGS_NO_PROMOTE="" XPKG_REG_ORGS="forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/richardrobertreitz" make build.all publish BRANCH_NAME=main
|
||||
```
|
||||
|
||||
The parameter `BRANCH_NAME=main` is needed when the tagging and publishing happens from another branch. The version of the provider-kind that of the tag name. The output of the make call ends then like this:
|
||||
|
||||
```
|
||||
$ XPKG_REG_ORGS_NO_PROMOTE="" XPKG_REG_ORGS="forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/richardrobertreitz" make build.all publish BRANCH_NAME=main
|
||||
...
|
||||
14:09:19 [ .. ] Skipping image publish for docker.io/provider-kind:v0.1.0
|
||||
Publish is deferred to xpkg machinery
|
||||
14:09:19 [ OK ] Image publish skipped for docker.io/provider-kind:v0.1.0
|
||||
14:09:19 [ .. ] Pushing package forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/richardrobertreitz/provider-kind:v0.1.0
|
||||
xpkg pushed to forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/richardrobertreitz/provider-kind:v0.1.0
|
||||
14:10:19 [ OK ] Pushed package forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/richardrobertreitz/provider-kind:v0.1.0
|
||||
```
|
||||
|
||||
After publishing, the provider-kind can be installed in-cluster similar to other providers like
|
||||
provider-helm and provider-kubernetes. To install it apply the following manifest:
|
||||
|
||||
```
|
||||
apiVersion: pkg.crossplane.io/v1
|
||||
kind: Provider
|
||||
metadata:
|
||||
name: provider-kind
|
||||
spec:
|
||||
package: forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/richardrobertreitz/provider-kind:v0.1.0
|
||||
```
|
||||
|
||||
The output of `kubectl get providers`:
|
||||
|
||||
```
|
||||
$ kubectl get providers
|
||||
NAME INSTALLED HEALTHY PACKAGE AGE
|
||||
provider-helm True True xpkg.upbound.io/crossplane-contrib/provider-helm:v0.19.0 38m
|
||||
provider-kind True True forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/richardrobertreitz/provider-kind:v0.1.0 39m
|
||||
provider-kubernetes True True xpkg.upbound.io/crossplane-contrib/provider-kubernetes:v0.15.0 38m
|
||||
```
|
||||
|
||||
The provider-kind can now be used.
|
||||
|
||||
## Crossplane Composition `edfbuilder`
|
||||
|
||||
Together with the implemented provider-kind and it's config to create a composition which can create kind clusters and
|
||||
the ability to deploy helm and kubernetes objects in the newly created cluster.
|
||||
|
||||
A composition is realized as a custom resource definition (CRD) considting of three parts:
|
||||
|
||||
* A definition
|
||||
* A composition
|
||||
* One or more deplyoments of the composition
|
||||
|
||||
### definition.yaml
|
||||
|
||||
The definition of the CRD will most probably contain one additional fiel, the ArgoCD repository URL to easily select
|
||||
the stacks which should be deployed:
|
||||
|
||||
```
|
||||
apiVersion: apiextensions.crossplane.io/v1
|
||||
kind: CompositeResourceDefinition
|
||||
metadata:
|
||||
name: edfbuilders.edfbuilder.crossplane.io
|
||||
spec:
|
||||
connectionSecretKeys:
|
||||
- kubeconfig
|
||||
group: edfbuilder.crossplane.io
|
||||
names:
|
||||
kind: EDFBuilder
|
||||
listKind: EDFBuilderList
|
||||
plural: edfbuilders
|
||||
singular: edfbuilders
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
referenceable: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: A EDFBuilder is a composite resource that represents a K8S Cluster with edfbuilder Installed
|
||||
type: object
|
||||
properties:
|
||||
spec:
|
||||
type: object
|
||||
properties:
|
||||
repoURL:
|
||||
type: string
|
||||
description: URL to ArgoCD stack of stacks repo
|
||||
required:
|
||||
- repoURL
|
||||
```
|
||||
|
||||
### composition.yaml
|
||||
|
||||
This is a shortened version of the file `examples/composition_deprecated/composition.yaml`. It combines a `KindCluster` with
|
||||
deployments of of provider-helm and provider-kubernetes. Note that the `ProviderConfig` and the kindserver secret has already been
|
||||
applied to kubernetes (by the Makefile) before applying this composition.
|
||||
|
||||
```
|
||||
apiVersion: apiextensions.crossplane.io/v1
|
||||
kind: Composition
|
||||
metadata:
|
||||
name: edfbuilders.edfbuilder.crossplane.io
|
||||
spec:
|
||||
writeConnectionSecretsToNamespace: crossplane-system
|
||||
compositeTypeRef:
|
||||
apiVersion: edfbuilder.crossplane.io/v1alpha1
|
||||
kind: EDFBuilder
|
||||
resources:
|
||||
|
||||
### kindcluster
|
||||
- base:
|
||||
apiVersion: container.kind.crossplane.io/v1alpha1
|
||||
kind: KindCluster
|
||||
metadata:
|
||||
name: example
|
||||
spec:
|
||||
forProvider:
|
||||
kindConfig: |
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
kubeadmConfigPatches:
|
||||
- |
|
||||
kind: InitConfiguration
|
||||
nodeRegistration:
|
||||
kubeletExtraArgs:
|
||||
node-labels: "ingress-ready=true"
|
||||
extraPortMappings:
|
||||
- containerPort: 80
|
||||
hostPort: 80
|
||||
protocol: TCP
|
||||
- containerPort: 443
|
||||
hostPort: 443
|
||||
protocol: TCP
|
||||
containerdConfigPatches:
|
||||
- |-
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."gitea.cnoe.localtest.me:443"]
|
||||
endpoint = ["https://gitea.cnoe.localtest.me"]
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.configs."gitea.cnoe.localtest.me".tls]
|
||||
insecure_skip_verify = true
|
||||
providerConfigRef:
|
||||
name: example-provider-config
|
||||
writeConnectionSecretToRef:
|
||||
namespace: default
|
||||
name: my-connection-secret
|
||||
|
||||
### helm provider config
|
||||
- base:
|
||||
apiVersion: helm.crossplane.io/v1beta1
|
||||
kind: ProviderConfig
|
||||
spec:
|
||||
credentials:
|
||||
source: Secret
|
||||
secretRef:
|
||||
namespace: default
|
||||
name: my-connection-secret
|
||||
key: kubeconfig
|
||||
patches:
|
||||
- fromFieldPath: metadata.name
|
||||
toFieldPath: metadata.name
|
||||
readinessChecks:
|
||||
- type: None
|
||||
|
||||
### ingress-nginx
|
||||
- base:
|
||||
apiVersion: helm.crossplane.io/v1beta1
|
||||
kind: Release
|
||||
metadata:
|
||||
annotations:
|
||||
crossplane.io/external-name: ingress-nginx
|
||||
spec:
|
||||
rollbackLimit: 99999
|
||||
forProvider:
|
||||
chart:
|
||||
name: ingress-nginx
|
||||
repository: https://kubernetes.github.io/ingress-nginx
|
||||
version: 4.11.3
|
||||
namespace: ingress-nginx
|
||||
values:
|
||||
controller:
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
hostPort:
|
||||
enabled: true
|
||||
terminationGracePeriodSeconds: 0
|
||||
service:
|
||||
type: NodePort
|
||||
watchIngressWithoutClass: true
|
||||
|
||||
nodeSelector:
|
||||
ingress-ready: "true"
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/master"
|
||||
operator: "Equal"
|
||||
effect: "NoSchedule"
|
||||
- key: "node-role.kubernetes.io/control-plane"
|
||||
operator: "Equal"
|
||||
effect: "NoSchedule"
|
||||
|
||||
publishService:
|
||||
enabled: false
|
||||
extraArgs:
|
||||
publish-status-address: localhost
|
||||
# added for idpbuilder
|
||||
enable-ssl-passthrough: ""
|
||||
|
||||
# added for idpbuilder
|
||||
allowSnippetAnnotations: true
|
||||
|
||||
# added for idpbuilder
|
||||
config:
|
||||
proxy-buffer-size: 32k
|
||||
use-forwarded-headers: "true"
|
||||
patches:
|
||||
- fromFieldPath: metadata.name
|
||||
toFieldPath: spec.providerConfigRef.name
|
||||
|
||||
### kubernetes provider config
|
||||
- base:
|
||||
apiVersion: kubernetes.crossplane.io/v1alpha1
|
||||
kind: ProviderConfig
|
||||
spec:
|
||||
credentials:
|
||||
source: Secret
|
||||
secretRef:
|
||||
namespace: default
|
||||
name: my-connection-secret
|
||||
key: kubeconfig
|
||||
patches:
|
||||
- fromFieldPath: metadata.name
|
||||
toFieldPath: metadata.name
|
||||
readinessChecks:
|
||||
- type: None
|
||||
|
||||
### kubernetes argocd stack of stacks application
|
||||
- base:
|
||||
apiVersion: kubernetes.crossplane.io/v1alpha2
|
||||
kind: Object
|
||||
spec:
|
||||
forProvider:
|
||||
manifest:
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: edfbuilder
|
||||
namespace: argocd
|
||||
labels:
|
||||
env: dev
|
||||
spec:
|
||||
destination:
|
||||
name: in-cluster
|
||||
namespace: argocd
|
||||
source:
|
||||
path: registry
|
||||
repoURL: 'https://gitea.cnoe.localtest.me/giteaAdmin/edfbuilder-shoot'
|
||||
targetRevision: HEAD
|
||||
project: default
|
||||
syncPolicy:
|
||||
automated:
|
||||
prune: true
|
||||
selfHeal: true
|
||||
syncOptions:
|
||||
- CreateNamespace=true
|
||||
patches:
|
||||
- fromFieldPath: metadata.name
|
||||
toFieldPath: spec.providerConfigRef.name
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Set this values to allow many kind clusters running in parallel, if needed:
|
||||
|
||||
```
|
||||
sudo sysctl fs.inotify.max_user_watches=524288
|
||||
sudo sysctl fs.inotify.max_user_instances=512
|
||||
|
||||
To make the changes persistent, edit the file /etc/sysctl.conf and add these lines:
|
||||
fs.inotify.max_user_watches = 524288
|
||||
fs.inotify.max_user_instances = 512
|
||||
```
|
||||
|
||||
Start provider-kind:
|
||||
|
||||
```
|
||||
make build
|
||||
kind delete clusters $(kind get clusters)
|
||||
kind create cluster --name=provider-kind-dev
|
||||
DOCKER_HOST_IP="$(docker inspect $(docker ps | grep kindest | awk '{ print $1 }' | head -n1) | jq -r .[0].NetworkSettings.Networks.kind.Gateway)" make dev
|
||||
```
|
||||
|
||||
Wait until debug output of the provider-kind is shown:
|
||||
|
||||
```
|
||||
...
|
||||
namespace/crossplane-system configured
|
||||
secret/example-provider-secret created
|
||||
providerconfig.kind.crossplane.io/example-provider-config created
|
||||
14:49:50 [ .. ] Starting Provider Kind controllers
|
||||
2024-11-12T14:49:54+01:00 INFO controller-runtime.metrics Starting metrics server
|
||||
2024-11-12T14:49:54+01:00 INFO Starting EventSource {"controller": "providerconfig/providerconfig.kind.crossplane.io", "controllerGroup": "kind.crossplane.io", "controllerKind": "ProviderConfig", "source": "kind source: *v1alpha1.ProviderConfig"}
|
||||
2024-11-12T14:49:54+01:00 INFO Starting EventSource {"controller": "providerconfig/providerconfig.kind.crossplane.io", "controllerGroup": "kind.crossplane.io", "controllerKind": "ProviderConfig", "source": "kind source: *v1alpha1.ProviderConfigUsage"}
|
||||
2024-11-12T14:49:54+01:00 INFO Starting Controller {"controller": "providerconfig/providerconfig.kind.crossplane.io", "controllerGroup": "kind.crossplane.io", "controllerKind": "ProviderConfig"}
|
||||
2024-11-12T14:49:54+01:00 INFO Starting EventSource {"controller": "managed/kindcluster.container.kind.crossplane.io", "controllerGroup": "container.kind.crossplane.io", "controllerKind": "KindCluster", "source": "kind source: *v1alpha1.KindCluster"}
|
||||
2024-11-12T14:49:54+01:00 INFO Starting Controller {"controller": "managed/kindcluster.container.kind.crossplane.io", "controllerGroup": "container.kind.crossplane.io", "controllerKind": "KindCluster"}
|
||||
2024-11-12T14:49:54+01:00 INFO controller-runtime.metrics Serving metrics server {"bindAddress": ":8080", "secure": false}
|
||||
2024-11-12T14:49:54+01:00 INFO Starting workers {"controller": "providerconfig/providerconfig.kind.crossplane.io", "controllerGroup": "kind.crossplane.io", "controllerKind": "ProviderConfig", "worker count": 10}
|
||||
2024-11-12T14:49:54+01:00 DEBUG provider-kind Reconciling {"controller": "providerconfig/providerconfig.kind.crossplane.io", "request": {"name":"example-provider-config"}}
|
||||
2024-11-12T14:49:54+01:00 INFO Starting workers {"controller": "managed/kindcluster.container.kind.crossplane.io", "controllerGroup": "container.kind.crossplane.io", "controllerKind": "KindCluster", "worker count": 10}
|
||||
2024-11-12T14:49:54+01:00 INFO KubeAPIWarningLogger metadata.finalizers: "in-use.crossplane.io": prefer a domain-qualified finalizer name to avoid accidental conflicts with other finalizer writers
|
||||
2024-11-12T14:49:54+01:00 DEBUG provider-kind Reconciling {"controller": "providerconfig/providerconfig.kind.crossplane.io", "request": {"name":"example-provider-config"}}
|
||||
|
||||
```
|
||||
|
||||
Start kindserver:
|
||||
|
||||
see kindserver/README.md
|
||||
|
||||
When kindserver is started:
|
||||
|
||||
```bash
|
||||
cd examples/composition_deprecated
|
||||
kubectl apply -f definition.yaml
|
||||
kubectl apply -f composition.yaml
|
||||
kubectl apply -f cluster.yaml
|
||||
```
|
||||
|
||||
List the created elements, wait until the new cluster is created, then switch back to the primary cluster:
|
||||
|
||||
```bash
|
||||
kubectl config use-context kind-provider-kind-dev
|
||||
```
|
||||
|
||||
Show edfbuilder compositions:
|
||||
|
||||
```bash
|
||||
kubectl get edfbuilders
|
||||
NAME SYNCED READY COMPOSITION AGE
|
||||
kindcluster True True edfbuilders.edfbuilder.crossplane.io 4m45s
|
||||
```
|
||||
|
||||
Show kind clusters:
|
||||
|
||||
```bash
|
||||
kubectl get kindclusters
|
||||
NAME READY SYNCED EXTERNAL-NAME INTERNALIP VERSION AGE
|
||||
kindcluster-wlxrt True True kindcluster-wlxrt 192.168.199.19 v1.31.0 5m12s
|
||||
```
|
||||
|
||||
Show helm deployments:
|
||||
|
||||
```bash
|
||||
kubectl get releases
|
||||
NAME CHART VERSION SYNCED READY STATE REVISION DESCRIPTION AGE
|
||||
kindcluster-29dgf ingress-nginx 4.11.3 True True deployed 1 Install complete 5m32s
|
||||
kindcluster-w2dxl forgejo 10.0.2 True True deployed 1 Install complete 5m32s
|
||||
kindcluster-x8x9k argo-cd 7.6.12 True True deployed 1 Install complete 5m32s
|
||||
```
|
||||
|
||||
Show kubernetes objects:
|
||||
|
||||
```bash
|
||||
kubectl get objects
|
||||
NAME KIND PROVIDERCONFIG SYNCED READY AGE
|
||||
kindcluster-8tbv8 ConfigMap kindcluster True True 5m50s
|
||||
kindcluster-9lwc9 ConfigMap kindcluster True True 5m50s
|
||||
kindcluster-9sgmd Deployment kindcluster True True 5m50s
|
||||
kindcluster-ct2h7 Application kindcluster True True 5m50s
|
||||
kindcluster-s5knq ConfigMap kindcluster True True 5m50s
|
||||
```
|
||||
|
||||
Open the composition in VS Code: examples/composition_deprecated/composition.yaml
|
||||
|
||||
## What is missing
|
||||
|
||||
Currently missing is the third and final part, the imperative steps which need to be processed:
|
||||
|
||||
* creation of TLS certificates and giteaAdmin password
|
||||
* creation of a Forgejo repository for the stacks
|
||||
* uploading the stacks in the Forgejo repository
|
||||
|
||||
Connecting the definition field (ArgoCD repo URL) and composition interconnects (function-patch-and-transform) are also missing.
|
||||
|
|
@ -1,72 +0,0 @@
|
|||
<mxfile host="65bd71144e">
|
||||
<diagram id="IShv2I7JLD2IyEDAFXRT" name="Page-1">
|
||||
<mxGraphModel dx="813" dy="535" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="850" pageHeight="1100" background="#ffffff" math="0" shadow="0">
|
||||
<root>
|
||||
<mxCell id="0"/>
|
||||
<mxCell id="1" parent="0"/>
|
||||
<mxCell id="19" value="" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="240" y="20" width="300" height="520" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="2" value="provider-kind<br><b>Secret</b>" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="80" y="80" width="120" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="14" style="edgeStyle=none;html=1;entryX=1;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="3" target="2">
|
||||
<mxGeometry relative="1" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="3" value="provider-kind<br><b>ProviderConfig</b>" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="280" y="80" width="120" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="15" style="edgeStyle=none;html=1;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="4" target="3">
|
||||
<mxGeometry relative="1" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="4" value="provider-kind<br><b>KindCluster</b>" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="280" y="160" width="120" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="16" style="edgeStyle=none;html=1;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="5" target="4">
|
||||
<mxGeometry relative="1" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="5" value="provider-helm<br><b>ProviderConfig</b>" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="280" y="240" width="120" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="18" style="edgeStyle=none;html=1;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="6" target="5">
|
||||
<mxGeometry relative="1" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="6" value="provider-helm<br><b>Release</b>" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="280" y="320" width="120" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="7" value="creates kind<br>cluster" style="ellipse;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="390" y="160" width="120" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="8" value="deploys argocd" style="ellipse;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="390" y="320" width="120" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="9" value="provider-kubernetes<br><b>ProviderConfig</b>" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="280" y="400" width="120" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="17" style="edgeStyle=none;html=1;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="10" target="9">
|
||||
<mxGeometry relative="1" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="10" value="provider-kubernetes<br><b>Object</b>" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="280" y="480" width="120" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="11" value="deploys app of apps" style="ellipse;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="390" y="480" width="120" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="13" value="" style="curved=1;endArrow=classic;html=1;exitX=0;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="9" target="4">
|
||||
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
||||
<mxPoint x="390" y="280" as="sourcePoint"/>
|
||||
<mxPoint x="440" y="230" as="targetPoint"/>
|
||||
<Array as="points">
|
||||
<mxPoint x="260" y="400"/>
|
||||
<mxPoint x="260" y="300"/>
|
||||
<mxPoint x="260" y="200"/>
|
||||
</Array>
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="20" value="Composition" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="240" y="20" width="120" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
</root>
|
||||
</mxGraphModel>
|
||||
</diagram>
|
||||
</mxfile>
|
||||
|
Before Width: | Height: | Size: 40 KiB |
|
|
@ -1,31 +0,0 @@
|
|||
<mxfile host="65bd71144e">
|
||||
<diagram id="gTaMLqmeyucP2gS6krt6" name="Page-1">
|
||||
<mxGraphModel dx="813" dy="535" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="850" pageHeight="1100" background="#ffffff" math="0" shadow="0">
|
||||
<root>
|
||||
<mxCell id="0"/>
|
||||
<mxCell id="1" parent="0"/>
|
||||
<mxCell id="2" value="" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="40" y="60" width="510" height="240" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="3" value="kindserver HTTP interface" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="40" y="60" width="210" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="4" value="&nbsp; GET /api/v1/kindserver/{clustername}" style="rounded=0;whiteSpace=wrap;html=1;align=left;" vertex="1" parent="1">
|
||||
<mxGeometry x="60" y="120" width="250" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="5" value="&nbsp; DELETE /api/v1/kindserver/{clustername}" style="rounded=0;whiteSpace=wrap;html=1;align=left;" vertex="1" parent="1">
|
||||
<mxGeometry x="60" y="180" width="250" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="6" value="&nbsp; POST /api/v1/kindserver/{clustername}" style="rounded=0;whiteSpace=wrap;html=1;align=left;" vertex="1" parent="1">
|
||||
<mxGeometry x="60" y="240" width="250" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="7" value="required HTTP header" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="390" y="60" width="160" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="8" value="Authorization" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="390" y="100" width="160" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
</root>
|
||||
</mxGraphModel>
|
||||
</diagram>
|
||||
</mxfile>
|
||||
|
Before Width: | Height: | Size: 19 KiB |
|
|
@ -1,49 +0,0 @@
|
|||
<mxfile host="65bd71144e">
|
||||
<diagram id="88xMscIdxIgwiurMMPnB" name="Page-1">
|
||||
<mxGraphModel dx="813" dy="535" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="850" pageHeight="1100" background="#ffffff" math="0" shadow="0">
|
||||
<root>
|
||||
<mxCell id="0"/>
|
||||
<mxCell id="1" parent="0"/>
|
||||
<mxCell id="18" value="" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry width="630" height="340" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="17" value="" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="240" y="20" width="370" height="300" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="6" value="" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
|
||||
<mxGeometry x="270" y="80" width="320" height="220" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="7" value="crossplane" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
|
||||
<mxGeometry x="270" y="80" width="90" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="8" value="provider-kind" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
|
||||
<mxGeometry x="300" y="170" width="120" height="60" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="10" style="html=1;startArrow=classic;startFill=1;" parent="1" source="9" target="8" edge="1">
|
||||
<mxGeometry relative="1" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="9" value="kindserver" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
|
||||
<mxGeometry x="20" y="170" width="120" height="60" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="12" value="has password" style="ellipse;whiteSpace=wrap;html=1;" parent="1" vertex="1">
|
||||
<mxGeometry x="110" y="220" width="90" height="60" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="13" value="uses password" style="ellipse;whiteSpace=wrap;html=1;" parent="1" vertex="1">
|
||||
<mxGeometry x="390" y="220" width="90" height="60" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="15" value="has IP" style="ellipse;whiteSpace=wrap;html=1;" parent="1" vertex="1">
|
||||
<mxGeometry x="10" y="220" width="90" height="60" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="16" value="uses IP" style="ellipse;whiteSpace=wrap;html=1;" parent="1" vertex="1">
|
||||
<mxGeometry x="290" y="220" width="90" height="60" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="20" value="running on the local host" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry width="150" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="21" value="running inside kind cluster" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="240" y="20" width="160" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
</root>
|
||||
</mxGraphModel>
|
||||
</diagram>
|
||||
</mxfile>
|
||||
|
Before Width: | Height: | Size: 26 KiB |
|
|
@ -1,71 +0,0 @@
|
|||
<mxfile host="65bd71144e">
|
||||
<diagram id="OIxMhAz8XNpLu5mdxKmc" name="Page-1">
|
||||
<mxGraphModel dx="813" dy="535" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="850" pageHeight="1100" background="#ffffff" math="0" shadow="0">
|
||||
<root>
|
||||
<mxCell id="0"/>
|
||||
<mxCell id="1" parent="0"/>
|
||||
<mxCell id="3" value="" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="5" y="40" width="585" height="410" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="4" value="kubernetes objects" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="5" y="40" width="140" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="5" value="provider-kind ProviderConfig secret" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="20" y="100" width="230" height="50" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="13" style="edgeStyle=none;html=1;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="6" target="5">
|
||||
<mxGeometry relative="1" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="6" value="provider-kind&nbsp;ProviderConfig" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="20" y="170" width="230" height="50" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="11" style="edgeStyle=none;html=1;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="7" target="6">
|
||||
<mxGeometry relative="1" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="7" value="provider-kind&nbsp;KindCluster" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="20" y="240" width="230" height="50" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="17" style="edgeStyle=none;html=1;" edge="1" parent="1" source="8" target="16">
|
||||
<mxGeometry relative="1" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="8" value="provider-helm ProviderConfig" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="210" y="310" width="210" height="50" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="9" value="password 12345" style="ellipse;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="240" y="105" width="120" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="10" value="endpoint 172.18.0.1" style="ellipse;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="240" y="175" width="150" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="15" value="deploys to KindCluster" style="ellipse;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="410" y="317.5" width="150" height="35" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="16" value="writes connection secret" style="ellipse;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="240" y="245" width="150" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="22" style="edgeStyle=none;html=1;" edge="1" parent="1" source="18">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<mxPoint x="300" y="360" as="targetPoint"/>
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="18" value="argocd" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="160" y="390" width="90" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="20" style="edgeStyle=none;html=1;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="19" target="8">
|
||||
<mxGeometry relative="1" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="19" value="forgejo" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="270" y="390" width="90" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
<mxCell id="23" style="edgeStyle=none;html=1;entryX=0.579;entryY=1.014;entryDx=0;entryDy=0;entryPerimeter=0;" edge="1" parent="1" source="21" target="8">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<mxPoint x="320" y="360" as="targetPoint"/>
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="21" value="ingress-nginx" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="380" y="390" width="90" height="40" as="geometry"/>
|
||||
</mxCell>
|
||||
</root>
|
||||
</mxGraphModel>
|
||||
</diagram>
|
||||
</mxfile>
|
||||
|
Before Width: | Height: | Size: 35 KiB |
|
|
@ -1,32 +0,0 @@
|
|||
---
|
||||
title: Kube-prometheus-stack
|
||||
description: Kube-prometheus-stack contains Kubernetes manifests, Prometheus and Grafana, including preconfigured dashboards
|
||||
---
|
||||
|
||||
## Kube-prometheus-stack Overview
|
||||
|
||||
Grafana is an open-source monitoring solution that enables viusalization of metrics and logs.
|
||||
Prometheus is an open-source monitoring and alerting system which collects metrics from services and allows the metrics to be shown in Grafana.
|
||||
|
||||
### Implementation Details
|
||||
|
||||
The application ist started in edfbuilder/kind/stacks/core/kube-prometheus.yaml.
|
||||
The application has the sync option spec.syncPolicy.syncOptions ServerSideApply=true. This is necessary, since kube-prometheus-stack exceeds the size limit for secrets and without this option a sync attempt will fail and throw an exception.
|
||||
The Helm values file edfbuilder/kind/stacks/core/kube-prometheus/values.yaml contains configuration values:
|
||||
grafana.additionalDataSources contains Loki as a Grafana Data Source.
|
||||
grafana.ingress contains the Grafana ingress configuratione, like the host url (cnoe.localtest.me).
|
||||
grafana.sidecar.dashboards contains necessary configurations so additional user defined dashboards are loaded when Grafana is started.
|
||||
grafana.grafana.ini.server contains configuration details that are necessary, so the ingress points to the correct url.
|
||||
|
||||
### Start
|
||||
|
||||
Once Grafana is running it is accessible under https://cnoe.localtest.me/grafana.
|
||||
Many preconfigured dashboards can be used by klicking the menu option Dashboards.
|
||||
|
||||
### Adding your own dashboards
|
||||
|
||||
The application edfbuilder/kind/stacks/core/kube-prometheus.yaml is used to import new Loki dashboards. Examples for imported dashboards can be found in the folder edfbuilder/kind/stacks/core/kube-prometheus/dashboards.
|
||||
|
||||
It is possible to add your own dashboards: Dashboards must be in JSON format. To add your own dashboard create a new ConfigMap in YAML format using onw of the examples as a blueprint. The new dashboard in JSON format has to be added as the value for data.k8s-dashboard-[...].json like in the examples. (It is important to use a unique name for data.k8s-dashboard-[...].json for each dashboard.)
|
||||
|
||||
Currently preconfigured dashboards include several dahboards for Loki and a dashboard to showcase Nginx-Ingress metrics.
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
---
|
||||
title: Loki
|
||||
description: Grafana Loki is a scalable open-source log aggregation system
|
||||
---
|
||||
|
||||
## Loki Overview
|
||||
|
||||
The application Grafana Loki is started in edfbuilder/kind/stacks/core/loki.yaml.
|
||||
Loki is started in microservices mode and contains the components ingester, distributor, querier, and query-frontend.
|
||||
The Helm values file edfbuilder/kind/stacks/core/loki/values.yaml contains configuration values.
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
---
|
||||
title: Promtail
|
||||
description: Grafana Promtail is an agent that ships logs to a Grafan Loki instance (log-shipper)
|
||||
---
|
||||
|
||||
## Promtail Overview
|
||||
|
||||
The application Grafana Promtail is started in edfbuilder/kind/stacks/core/promtail.yaml.
|
||||
The Helm values file edfbuilder/kind/stacks/core/promtail/values.yaml contains configuration values.
|
||||
46
content/en/docs/_index.md
Normal file → Executable file
|
|
@ -1,45 +1,9 @@
|
|||
---
|
||||
title: "Documentation"
|
||||
linkTitle: "Documentation"
|
||||
menu:
|
||||
main:
|
||||
weight: 20
|
||||
title: Developer Framework Documentation
|
||||
linkTitle: Docs
|
||||
menu: {main: {weight: 20}}
|
||||
weight: 20
|
||||
---
|
||||
|
||||
{{% alert title="Draft" color="warning" %}}
|
||||
**Editorial Status**: This page is currently being developed.
|
||||
This section is the project documentation for IPCEI-CIS Developer Framework.
|
||||
|
||||
* **Jira Ticket**: [TICKET-XXX](https://your-jira/browse/TICKET-XXX)
|
||||
* **Assignee**: [Name or Team]
|
||||
* **Status**: Draft
|
||||
* **Last Updated**: YYYY-MM-DD
|
||||
* **TODO**:
|
||||
* [ ] Add detailed component description
|
||||
* [ ] Include usage examples and code samples
|
||||
* [ ] Add architecture diagrams
|
||||
* [ ] Review and finalize content
|
||||
{{% /alert %}}
|
||||
|
||||
# Edge Developer Platform (EDP) Documentation
|
||||
|
||||
Welcome to the EDP documentation. This documentation serves developers, engineers, and auditors who want to understand, use, and audit the Edge Developer Platform.
|
||||
|
||||
## Target Audience
|
||||
|
||||
* **Developers & Engineers**: Learn how to use the platform, deploy applications, and integrate services
|
||||
* **Platform Engineers**: Understand the architecture, components, and operational aspects
|
||||
* **Auditors & Governance**: Access project history, decisions, and compliance information
|
||||
|
||||
## Documentation Structure
|
||||
|
||||
The documentation follows a top-down approach focusing on outcomes and practical usage:
|
||||
|
||||
* **Platform Overview**: High-level introduction and product structure
|
||||
* **Components**: Individual platform components and their usage
|
||||
* **Getting Started**: Onboarding and quick start guides
|
||||
* **Operations**: Deployment, monitoring, and troubleshooting
|
||||
* **Governance**: Project history, decisions, and compliance
|
||||
|
||||
## Purpose
|
||||
|
||||
This documentation describes the outcomes and products of the edgeDeveloperFramework (eDF) project. The EDP is designed as a usable, integrated platform with clear links to repositories and implementation details.
|
||||
|
|
|
|||
|
|
@ -1,141 +0,0 @@
|
|||
---
|
||||
title: "[Component Name]"
|
||||
linkTitle: "[Short Name]"
|
||||
weight: 1
|
||||
description: >
|
||||
[Brief one-line description of the component]
|
||||
---
|
||||
|
||||
{{% alert title="Draft" color="warning" %}}
|
||||
**Editorial Status**: This page is currently being developed.
|
||||
|
||||
* **Jira Ticket**: [TICKET-XXX](https://your-jira/browse/TICKET-XXX)
|
||||
* **Assignee**: [Name or Team]
|
||||
* **Status**: Draft
|
||||
* **Last Updated**: YYYY-MM-DD
|
||||
* **TODO**:
|
||||
* [ ] Add detailed component description
|
||||
* [ ] Include usage examples and code samples
|
||||
* [ ] Add architecture diagrams
|
||||
* [ ] Review and finalize content
|
||||
{{% /alert %}}
|
||||
|
||||
## Overview
|
||||
|
||||
[Detailed description of the component - what it is, what it does, and why it exists]
|
||||
|
||||
## Key Features
|
||||
|
||||
* [Feature 1]
|
||||
* [Feature 2]
|
||||
* [Feature 3]
|
||||
|
||||
## Purpose in EDP
|
||||
|
||||
[Explain the role this component plays in the Edge Developer Platform and how it contributes to the overall platform capabilities]
|
||||
|
||||
## Repository
|
||||
|
||||
**Code**: [Link to source code repository]
|
||||
|
||||
**Documentation**: [Link to component-specific documentation]
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Prerequisites
|
||||
|
||||
* [Prerequisite 1]
|
||||
* [Prerequisite 2]
|
||||
|
||||
### Quick Start
|
||||
|
||||
[Step-by-step guide to get started with this component]
|
||||
|
||||
1. [Step 1]
|
||||
2. [Step 2]
|
||||
3. [Step 3]
|
||||
|
||||
### Verification
|
||||
|
||||
[How to verify the component is working correctly]
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### [Use Case 1]
|
||||
|
||||
[Example with code/commands showing common use case]
|
||||
|
||||
```bash
|
||||
# Example commands
|
||||
```
|
||||
|
||||
### [Use Case 2]
|
||||
|
||||
[Another common scenario]
|
||||
|
||||
## Integration Points
|
||||
|
||||
* **[Component A]**: [How it integrates]
|
||||
* **[Component B]**: [How it integrates]
|
||||
* **[Component C]**: [How it integrates]
|
||||
|
||||
## Architecture
|
||||
|
||||
[Optional: Add architectural diagrams and descriptions]
|
||||
|
||||
### C4 charts
|
||||
|
||||
Embed C4 charts this way:
|
||||
|
||||
1. add a likec4-view with the name of the view
|
||||
{{< likec4-view view="components-template-documentation" project="architecture" title="Example Documentation Diagram" >}}
|
||||
|
||||
2. create the LikeC4 view somewhere in ```./resources/edp-likec4/views```, the example above is in ```./resources/edp-likec4/views/documentation/components-template-documentation.c4```
|
||||
|
||||
3. run ```task likec4:generate``` to create the webcomponent
|
||||
|
||||
4. if you are in ```task:serve``` hot-reload mode the view will show up directly
|
||||
|
||||
### Component Architecture (C4)
|
||||
|
||||
[Add C4 Container or Component diagrams showing the internal structure]
|
||||
|
||||
### Sequence Diagrams
|
||||
|
||||
[Add sequence diagrams showing key interaction flows with other components]
|
||||
|
||||
### Deployment Architecture
|
||||
|
||||
[Add infrastructure and deployment diagrams showing how the component is deployed]
|
||||
|
||||
## Configuration
|
||||
|
||||
[Key configuration options and how to set them]
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### [Common Issue 1]
|
||||
|
||||
**Problem**: [Description]
|
||||
|
||||
**Solution**: [How to fix]
|
||||
|
||||
### [Common Issue 2]
|
||||
|
||||
**Problem**: [Description]
|
||||
|
||||
**Solution**: [How to fix]
|
||||
|
||||
## Status
|
||||
|
||||
**Maturity**: [Production / Beta / Experimental]
|
||||
|
||||
## Additional Resources
|
||||
|
||||
* [Link to external documentation]
|
||||
* [Link to community resources]
|
||||
* [Link to related components]
|
||||
|
||||
## Documentation Notes
|
||||
|
||||
[Instructions for team members filling in this documentation - remove this section once complete]
|
||||