Merge remote-tracking branch 'refs/remotes/ipceicis/development' into development

This commit is contained in:
Stephan Lo 2025-10-25 09:29:19 +02:00
commit eec679657b
300 changed files with 11315 additions and 39833 deletions

22
.dockerignore Normal file
View file

@ -0,0 +1,22 @@
.github
.gitignore
.vscode
.devbox
.devcontainer
node_modules
public
resources
tmp
*.md
!content/**/*.md
TESTING.md
Taskfile.yml
devbox.json
devbox.lock
.hugo_build.lock
.htmltest.yml
.htmlvalidate.json
.markdownlint.json
# Ensure package-lock.json is included for npm ci
!package-lock.json

9
.env.versions Normal file
View file

@ -0,0 +1,9 @@
# Tool versions for development and CI/CD
# These versions are used in:
# - devbox.json (pinned versions)
# - Dockerfile (build arguments)
# - .github/workflows/ci.yaml (CI/CD pipeline)
NODE_VERSION=24.10.0
GO_VERSION=1.25.1
HUGO_VERSION=0.151.0

79
.github/workflows/ci.yaml vendored Normal file
View file

@ -0,0 +1,79 @@
name: ci
on:
push:
workflow_dispatch:
jobs:
build:
runs-on: ubuntu-22.04
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
submodules: recursive
fetch-depth: 0
- name: Load versions from .env.versions
id: versions
run: |
# Source the versions file
set -a
source .env.versions
set +a
echo "node_version=${NODE_VERSION}" >> "$GITHUB_OUTPUT"
echo "go_version=${GO_VERSION}" >> "$GITHUB_OUTPUT"
echo "hugo_version=${HUGO_VERSION}" >> "$GITHUB_OUTPUT"
echo "Node: ${NODE_VERSION}"
echo "Go: ${GO_VERSION}"
echo "Hugo: ${HUGO_VERSION}"
- name: Repository meta
id: repository
run: |
registry=${{ github.server_url }}
registry=${registry##http*://}
echo "registry=${registry}" >> "$GITHUB_OUTPUT"
echo "registry=${registry}"
repository="$(echo "${{ github.repository }}" | tr '[:upper:]' '[:lower:]')"
echo "repository=${repository}" >> "$GITHUB_OUTPUT"
echo "repository=${repository}"
- name: Docker meta
uses: docker/metadata-action@v5
id: docker
with:
images: ${{ steps.repository.outputs.registry }}/${{ steps.repository.outputs.repository }}
tags: |
type=sha,prefix=
type=ref,event=tag
-
name: Login to registry
uses: docker/login-action@v3
with:
registry: ${{ steps.repository.outputs.registry }}
username: "${{ secrets.PACKAGES_USER }}"
password: "${{ secrets.PACKAGES_TOKEN }}"
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
buildkitd-flags: '--allow-insecure-entitlement network.host'
driver-opts: network=host
- name: Build and push
uses: docker/build-push-action@v6
with:
context: .
push: true
allow: network.host
network: host
tags: ${{ steps.docker.outputs.tags }}
labels: ${{ steps.docker.outputs.labels }}
build-args: |
NODE_VERSION=${{ steps.versions.outputs.node_version }}
GO_VERSION=${{ steps.versions.outputs.go_version }}
HUGO_VERSION=${{ steps.versions.outputs.hugo_version }}

32
.github/workflows/delete-edge.yaml vendored Normal file
View file

@ -0,0 +1,32 @@
name: delete-edge
on:
workflow_run:
workflows: [build]
types:
- completed
workflow_dispatch:
jobs:
build:
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Replace Image Version
run: |
sha="${{ github.sha }}"
shortSha="${sha:0:7}"
echo "Setting image version to: edp.buildth.ing/devfw-cicd/website-and-documentation:${shortSha}"
sed -i "s@###IMAGETAG###@edp.buildth.ing/devfw-cicd/website-and-documentation:${shortSha}@g" ./k8s-deployment.yaml
- name: Delete action
uses: https://edp.buildth.ing/DevFW-CICD/edge-connect-delete-action@main
id: delete
with:
configFile: ./edgeconnectdeployment.yaml
baseUrl: https://hub.apps.edge.platform.mg3.mdb.osc.live
username: ${{ secrets.EDGEXR_PLATFORM_USERNAME }}
password: ${{ secrets.EDGEXR_PLATFORM_PASSWORD }}

32
.github/workflows/deploy-edge.yaml vendored Normal file
View file

@ -0,0 +1,32 @@
name: deploy-edge
on:
workflow_run:
workflows: [build]
types:
- completed
workflow_dispatch:
jobs:
build:
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Replace Image Version
run: |
sha="${{ github.sha }}"
shortSha="${sha:0:7}"
echo "Setting image version to: edp.buildth.ing/devfw-cicd/website-and-documentation:${shortSha}"
sed -i "s@###IMAGETAG###@edp.buildth.ing/devfw-cicd/website-and-documentation:${shortSha}@g" ./k8s-deployment.yaml
- name: Deploy action
uses: https://edp.buildth.ing/DevFW-CICD/edge-connect-deploy-action@main
id: deploy
with:
configFile: ./edgeconnectdeployment.yaml
baseUrl: https://hub.apps.edge.platform.mg3.mdb.osc.live
username: ${{ secrets.EDGEXR_PLATFORM_USERNAME }}
password: ${{ secrets.EDGEXR_PLATFORM_PASSWORD }}

149
.github/workflows/release.yaml vendored Normal file
View file

@ -0,0 +1,149 @@
name: release
on:
push:
tags:
- 'v*.*.*' # Triggert auf Semantic Versioning Tags (v1.0.0, v2.1.3, etc.)
permissions:
contents: write
packages: write
jobs:
release:
runs-on: ubuntu-22.04
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
submodules: recursive
fetch-depth: 0
- name: Load versions from .env.versions
id: versions
run: |
set -a
source .env.versions
set +a
echo "node_version=${NODE_VERSION}" >> "$GITHUB_OUTPUT"
echo "go_version=${GO_VERSION}" >> "$GITHUB_OUTPUT"
echo "hugo_version=${HUGO_VERSION}" >> "$GITHUB_OUTPUT"
echo "Node: ${NODE_VERSION}"
echo "Go: ${GO_VERSION}"
echo "Hugo: ${HUGO_VERSION}"
- name: Extract version from tag
id: version
run: |
VERSION=${GITHUB_REF#refs/tags/v}
echo "version=${VERSION}" >> "$GITHUB_OUTPUT"
echo "Version: ${VERSION}"
- name: Repository meta
id: repository
run: |
registry=${{ github.server_url }}
registry=${registry##http*://}
echo "registry=${registry}" >> "$GITHUB_OUTPUT"
echo "registry=${registry}"
repository="$(echo "${{ github.repository }}" | tr '[:upper:]' '[:lower:]')"
echo "repository=${repository}" >> "$GITHUB_OUTPUT"
echo "repository=${repository}"
- name: Docker meta
uses: docker/metadata-action@v5
id: docker
with:
images: ${{ steps.repository.outputs.registry }}/${{ steps.repository.outputs.repository }}
tags: |
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
type=raw,value=latest
- name: Login to registry
uses: docker/login-action@v3
with:
registry: ${{ steps.repository.outputs.registry }}
username: "${{ secrets.PACKAGES_USER }}"
password: "${{ secrets.PACKAGES_TOKEN }}"
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
buildkitd-flags: '--allow-insecure-entitlement network.host'
driver-opts: network=host
- name: Build and push release images
uses: docker/build-push-action@v6
with:
context: .
push: true
allow: network.host
network: host
platforms: linux/amd64,linux/arm64
tags: ${{ steps.docker.outputs.tags }}
labels: ${{ steps.docker.outputs.labels }}
build-args: |
NODE_VERSION=${{ steps.versions.outputs.node_version }}
GO_VERSION=${{ steps.versions.outputs.go_version }}
HUGO_VERSION=${{ steps.versions.outputs.hugo_version }}
- name: Generate changelog
id: changelog
run: |
# Finde vorheriges Tag
PREVIOUS_TAG=$(git describe --abbrev=0 --tags ${GITHUB_REF}^ 2>/dev/null || echo "")
if [ -z "$PREVIOUS_TAG" ]; then
echo "Erster Release - Changelog von Anfang an"
CHANGELOG=$(git log --pretty=format:"- %s (%h)" --no-merges)
else
echo "Changelog seit ${PREVIOUS_TAG}"
CHANGELOG=$(git log ${PREVIOUS_TAG}..${GITHUB_REF} --pretty=format:"- %s (%h)" --no-merges)
fi
# Schreibe in Output-Datei (multiline)
{
echo 'changelog<<EOF'
echo "$CHANGELOG"
echo 'EOF'
} >> "$GITHUB_OUTPUT"
- name: Create Forgejo/Gitea Release
uses: actions/forgejo-release@v2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
direction: upload
release-dir: .
title: "Release ${{ steps.version.outputs.version }}"
tag: ${{ github.ref_name }}
token: ${{ secrets.GITHUB_TOKEN }}
release-notes: |
# Release ${{ steps.version.outputs.version }}
## Docker Images
Multi-platform images (linux/amd64, linux/arm64) sind verfügbar:
```bash
docker pull ${{ steps.repository.outputs.registry }}/${{ steps.repository.outputs.repository }}:${{ steps.version.outputs.version }}
docker pull ${{ steps.repository.outputs.registry }}/${{ steps.repository.outputs.repository }}:latest
```
## Build Versions
- Node.js: ${{ steps.versions.outputs.node_version }}
- Go: ${{ steps.versions.outputs.go_version }}
- Hugo: ${{ steps.versions.outputs.hugo_version }}
## Changes
${{ steps.changelog.outputs.changelog }}

51
.github/workflows/test.yml vendored Normal file
View file

@ -0,0 +1,51 @@
name: Hugo Site Tests
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
fetch-depth: 0
- name: Setup Hugo
uses: peaceiris/actions-hugo@v3
with:
hugo-version: 'latest'
extended: true
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version: '24'
cache: 'npm'
- name: Install dependencies
run: |
npm ci
go install github.com/wjdp/htmltest@latest
- name: Run tests
run: |
npm run test:build
npm run test:markdown
npm run test:html
- name: Run link checker
run: htmltest
continue-on-error: true
- name: Upload htmltest results
uses: actions/upload-artifact@v4
if: always()
with:
name: htmltest-report
path: tmp/.htmltest/

32
.gitignore vendored
View file

@ -1,7 +1,31 @@
# Hugo
.hugo_build.lock
public
public/
resources/_gen/
# for npm devcontainer cli
package-lock.json
package.json
# Node.js / NPM
node_modules/
# Test outputs
tmp/
.htmltest/
# devbox
.devbox/
# IDE
.vscode/
.idea/
*.swp
*.swo
*~
# OS
.DS_Store
Thumbs.db
# Logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*

10
.htmltest.yml Normal file
View file

@ -0,0 +1,10 @@
DirectoryPath: "public"
CheckExternal: true
CheckInternalHash: true
IgnoreURLs:
- "^https://example\\.docsy\\.dev"
- "^https://example\\.com"
- "^http://localhost"
IgnoreDirectoryMissingTrailingSlash: true
IgnoreAltMissing: true
CheckDoctype: true

12
.htmlvalidate.json Normal file
View file

@ -0,0 +1,12 @@
{
"extends": ["html-validate:recommended"],
"rules": {
"no-inline-style": "off",
"require-sri": "off",
"no-trailing-whitespace": "off",
"void-style": "off"
},
"elements": [
"html5"
]
}

8
.markdownlint.json Normal file
View file

@ -0,0 +1,8 @@
{
"default": true,
"MD013": false,
"MD033": false,
"MD041": false,
"MD024": { "siblings_only": true },
"MD025": { "front_matter_title": "" }
}

21
.vscode/settings.json vendored
View file

@ -1,3 +1,22 @@
{
"peacock.remoteColor": "#61dafb"
"peacock.remoteColor": "#61dafb",
"workbench.colorCustomizations": {
"activityBar.activeBackground": "#93e6fc",
"activityBar.background": "#93e6fc",
"activityBar.foreground": "#15202b",
"activityBar.inactiveForeground": "#15202b99",
"activityBarBadge.background": "#fa45d4",
"activityBarBadge.foreground": "#15202b",
"commandCenter.border": "#15202b99",
"sash.hoverBorder": "#93e6fc",
"statusBar.background": "#61dafb",
"statusBar.foreground": "#15202b",
"statusBarItem.hoverBackground": "#2fcefa",
"statusBarItem.remoteBackground": "#61dafb",
"statusBarItem.remoteForeground": "#15202b",
"titleBar.activeBackground": "#61dafb",
"titleBar.activeForeground": "#15202b",
"titleBar.inactiveBackground": "#61dafb99",
"titleBar.inactiveForeground": "#15202b99"
}
}

98
DOCKER.md Normal file
View file

@ -0,0 +1,98 @@
# Docker Build
This project uses a multi-stage Docker build that matches the local devbox development environment.
## Version Management
All tool versions are defined in `.env.versions` as the single source of truth:
```bash
NODE_VERSION=24.10.0
GO_VERSION=1.25.1
HUGO_VERSION=0.151.0
```
These versions are used in:
- `devbox.json` - Local development environment
- `Dockerfile` - Docker build arguments (with defaults)
- `.github/workflows/ci.yaml` - CI/CD pipeline
**Important:** When updating versions, modify `.env.versions` and sync with `devbox.json`.
## Local Build
### Using Task (recommended)
The easiest way to build the OCI image:
```bash
task build:oci-image
```
This automatically:
- Loads versions from `.env.versions`
- Builds the image with correct build arguments
- Tags with `latest` and git commit hash
To build and test:
```bash
task test:oci-image
```
### Automatic version loading
Use the helper script to load versions from `.env.versions`:
```bash
source scripts/get-versions.sh
```
This will show you the Docker build command with the correct versions.
### Manual build
```bash
docker build --network=host \
--build-arg NODE_VERSION=24.10.0 \
--build-arg GO_VERSION=1.25.1 \
--build-arg HUGO_VERSION=0.151.0 \
-t ipceicis-developerframework:latest .
```
### Test the image
```bash
docker run -d -p 8080:80 --name hugo-test ipceicis-developerframework:latest
curl http://localhost:8080
docker stop hugo-test && docker rm hugo-test
```
## CI/CD Pipeline
The GitHub Actions workflow (`.github/workflows/ci.yaml`) automatically:
1. Extracts versions from devbox environment
2. Builds multi-arch images (amd64 + arm64)
3. Pushes to the container registry with appropriate tags
### Required Secrets
Configure these secrets in your GitHub repository:
- `PACKAGES_USER`: Container registry username
- `PACKAGES_TOKEN`: Container registry token/password
## Image Structure
- **Build Stage**: Uses Node.js base image, installs Go and Hugo
- **Runtime Stage**: Uses nginx:alpine to serve static content (~50MB)
The build process:
1. Installs npm dependencies
2. Downloads Hugo modules
3. Builds static site with `hugo --gc --minify`
4. Copies built site to minimal nginx container

69
Dockerfile Normal file
View file

@ -0,0 +1,69 @@
# Build arguments for version pinning (matching devbox.json)
ARG NODE_VERSION=24.10.0
ARG GO_VERSION=1.25.1
ARG HUGO_VERSION=0.151.0
# Build stage - use same versions as local devbox environment
FROM node:${NODE_VERSION}-bookworm AS builder
# Get target architecture for multi-platform builds
ARG TARGETARCH
# Install Git (needed for Hugo's enableGitInfo)
RUN apt-get update && apt-get install -y git && rm -rf /var/lib/apt/lists/*
# Install Go (map TARGETARCH: amd64->amd64, arm64->arm64)
ARG GO_VERSION
RUN wget -q https://go.dev/dl/go${GO_VERSION}.linux-${TARGETARCH}.tar.gz && \
tar -C /usr/local -xzf go${GO_VERSION}.linux-${TARGETARCH}.tar.gz && \
rm go${GO_VERSION}.linux-${TARGETARCH}.tar.gz
ENV PATH="/usr/local/go/bin:${PATH}"
ENV GOPATH="/go"
ENV PATH="${GOPATH}/bin:${PATH}"
# Install Hugo extended (map TARGETARCH: amd64->amd64, arm64->arm64)
ARG HUGO_VERSION
RUN wget -q https://github.com/gohugoio/hugo/releases/download/v${HUGO_VERSION}/hugo_extended_${HUGO_VERSION}_linux-${TARGETARCH}.tar.gz && \
tar -xzf hugo_extended_${HUGO_VERSION}_linux-${TARGETARCH}.tar.gz && \
mv hugo /usr/local/bin/ && \
rm hugo_extended_${HUGO_VERSION}_linux-${TARGETARCH}.tar.gz && \
hugo version
WORKDIR /src
# Copy package files and install npm dependencies
COPY package*.json ./
RUN npm ci
# Copy all source files
COPY . .
# Build Hugo site (Git info wird aus dem aktuellen Kontext genommen, nicht aus .git)
# Hugo sucht nach .git, findet es nicht, und überspringt Git-Info automatisch
RUN hugo --gc --minify
# Runtime stage - nginx to serve static content
FROM nginx:1.27-alpine
# Copy built site from builder
COPY --from=builder /src/public /usr/share/nginx/html
# Copy custom nginx config
RUN echo 'server {' > /etc/nginx/conf.d/default.conf && \
echo ' listen 80;' >> /etc/nginx/conf.d/default.conf && \
echo ' server_name _;' >> /etc/nginx/conf.d/default.conf && \
echo ' root /usr/share/nginx/html;' >> /etc/nginx/conf.d/default.conf && \
echo ' index index.html;' >> /etc/nginx/conf.d/default.conf && \
echo '' >> /etc/nginx/conf.d/default.conf && \
echo ' location / {' >> /etc/nginx/conf.d/default.conf && \
echo ' try_files $uri $uri/ /index.html;' >> /etc/nginx/conf.d/default.conf && \
echo ' }' >> /etc/nginx/conf.d/default.conf && \
echo '' >> /etc/nginx/conf.d/default.conf && \
echo ' gzip on;' >> /etc/nginx/conf.d/default.conf && \
echo ' gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;' >> /etc/nginx/conf.d/default.conf && \
echo '}' >> /etc/nginx/conf.d/default.conf
EXPOSE 80
CMD ["nginx", "-g", "daemon off;"]

112
LIKEC4-QUICKSTART.md Normal file
View file

@ -0,0 +1,112 @@
# LikeC4 Integration - Quick Start
Dieses Repository enthält jetzt die LikeC4-Architekturdokumentation aus edp-doc mit vollständiger Git-Historie.
## Was wurde gemacht?
1. ✅ **LikeC4-Modelle migriert** - `resources/likec4/` enthält alle C4-Modelle mit Git-Historie
2. ✅ **Hugo-Integration erstellt** - CSS, JS und Loader-Scripte für Hugo/Docsy
3. ✅ **Beispielseiten erstellt** - Dokumentation unter `content/en/docs/architecture/`
4. ✅ **Konfiguration** - `hugo.toml` und Layout-Hooks konfiguriert
## Nächste Schritte
### 1. Webcomponent generieren
```bash
cd resources/likec4
# Dependencies installieren (nur einmalig)
npm install
# Webcomponent generieren
npx likec4 codegen webcomponent \
--webcomponent-prefix likec4 \
--outfile ../../static/js/likec4-webcomponent.js
```
Dies erzeugt `static/js/likec4-webcomponent.js` (~2-3 MB).
### 2. Hugo Server starten
```bash
# Im Repository-Root
hugo server -D
# Öffne http://localhost:1313/docs/architecture/highlevelarch/
```
### 3. Änderungen committen
```bash
git add resources/likec4/
git add static/
git add layouts/
git add content/en/docs/architecture/
git add hugo.toml
git commit -m "feat: integrate LikeC4 architecture documentation from edp-doc"
```
## Verfügbare Seiten
- `/docs/architecture/` - Architektur-Übersicht
- `/docs/architecture/highlevelarch/` - High-Level Architektur mit interaktivem Diagramm
- `/docs/architecture/setup/` - Setup und Verwendungs-Anleitung
## Workflow für Architektur-Änderungen
1. **Modelle bearbeiten**: `.c4` Dateien in `resources/likec4/models/` oder `views/`
2. **Vorschau**: `cd resources/likec4 && npx likec4 start` (öffnet http://localhost:5173)
3. **Generieren**: Webcomponent neu generieren (siehe oben)
4. **Testen**: Hugo Server starten und Seiten prüfen
5. **Committen**: Sowohl `.c4` Dateien als auch `static/js/likec4-webcomponent.js`
## Technische Details
### Integration-Komponenten
- `resources/likec4/` - LikeC4 Quellcode (migriert mit Git-Historie)
- `static/js/likec4-loader.js` - Dynamischer Module Loader
- `static/css/likec4-styles.css` - Styling inkl. Dark Mode
- `layouts/partials/hooks/head-end.html` - Hugo Hook für JS/CSS Einbindung
- `hugo.toml` - Konfiguration (`params.likec4.enable = true`)
### Verwendung in Markdown
```html
<div class="likec4-container">
<div class="likec4-header">
Dein Diagramm-Titel
</div>
<likec4-view view-id="otc-faas" browser="true"></likec4-view>
<div class="likec4-loading" id="likec4-loading">
Loading...
</div>
</div>
```
### Verfügbare View-IDs finden
```bash
cd resources/likec4
grep -r "view\s\+\w" views/ models/ --include="*.c4"
```
## Häufige View-IDs
- `otc-faas` - OTC FaaS Deployment
- `edp` - EDP Übersicht
- `landscape` - Developer Landscape
- `edpbuilderworkflow` - Builder Workflow
- `keycloak`, `forgejo`, `argoCD`, etc. - Komponenten-Views
## Migration von edp-doc
Dieses Repository ist jetzt die primäre Quelle für LikeC4-Architektur. Das edp-doc Repository kann diese Modelle bei Bedarf als Git Submodule referenzieren.
## Support
Bei Problemen siehe:
- `resources/likec4/INTEGRATION.md` - Detaillierte Integration-Dokumentation
- `content/en/docs/architecture/setup.md` - Setup-Anleitung
- https://likec4.dev/ - LikeC4 Dokumentation

311
README-developer.md Normal file
View file

@ -0,0 +1,311 @@
# Developer Guide - IPCEI-CIS Developer Framework
## 🚀 Quick Start
### Prerequisites
Install [Devbox](https://www.jetify.com/devbox/):
```bash
curl -fsSL https://get.jetify.com/devbox | bash
```
### Setup
1. Clone the repository
2. Start devbox shell:
```bash
devbox shell
```
Devbox automatically installs all required tools:
- Hugo (v0.151.0+extended)
- Go (v1.25.1)
- Node.js (v24.10.0)
- Dart Sass
- htmltest
- go-task
## 📚 Technology Stack
### Hugo (v0.151.0+extended)
Hugo is a fast static site generator. This project uses Hugo in extended mode with:
- **Docsy Theme** (v0.12.0) - Documentation theme with responsive design
- **Bootstrap 5.3.8** - UI framework
- **PostCSS** - CSS processing
### Docsy Theme
Docsy is a Hugo theme optimized for technical documentation:
- Multi-language support
- Search functionality
- Navigation menu system
- Code syntax highlighting
- Responsive design
Key directories:
- `content/en/` - English content (Markdown files)
- `layouts/` - Custom layout overrides
- `assets/scss/` - Custom styles
- `static/` - Static assets (images, etc.)
### Task (Taskfile)
Task is a task runner / build tool that replaces Makefiles. It uses `Taskfile.yml` for defining tasks.
## 🛠️ Development Workflow
### Using Task (Recommended)
View all available tasks:
```bash
task
# or
task --list
```
Common tasks:
#### Development
```bash
task serve # Start Hugo dev server (http://localhost:1313)
task build # Build production site
task build:dev # Build development site
task clean # Clean build artifacts
```
#### Testing
```bash
task test # Run all tests
task test:quick # Run quick tests (without link checking)
task test:build # Test if Hugo builds successfully
task test:markdown # Lint Markdown files
task test:html # Validate HTML output
task test:links # Check all links (internal & external)
```
#### Dependencies
```bash
task deps:install # Install all dependencies
task deps:update # Update all dependencies
```
#### CI/CD
```bash
task ci # Run full CI pipeline locally
```
### Using NPM Scripts (Alternative)
If you prefer NPM:
```bash
npm test # All tests
npm run test:quick # Quick tests
npm run test:build # Build test
npm run test:markdown # Markdown linting
npm run test:html # HTML validation
npm run test:links # Link checking
```
### Using Hugo Directly
```bash
# Development server
hugo server
# Production build
hugo --gc --minify
# Check version
hugo version
```
## 📁 Project Structure
```
.
├── content/ # Content files (Markdown)
│ └── en/ # English content
│ ├── docs/ # Documentation
│ └── blog/ # Blog posts
├── layouts/ # Custom HTML templates
├── static/ # Static files
├── assets/ # Assets (SCSS, images)
├── public/ # Generated site (not in Git)
├── resources/ # Hugo cache (not in Git)
├── hugo.toml # Hugo configuration
├── go.mod # Hugo modules (Docsy theme)
├── Taskfile.yml # Task definitions
├── package.json # NPM dependencies & scripts
└── devbox.json # Devbox configuration
```
## 📝 Content Creation
### Creating New Pages
```bash
# Using Hugo
hugo new docs/concepts/my-page.md
hugo new blog/my-post.md
# Or create manually in content/en/
```
### Front Matter
Every content file needs front matter:
```yaml
---
title: "My Page Title"
description: "Page description"
date: 2025-10-23
weight: 10 # Order in navigation
---
Your content here...
```
### Using Docsy Shortcodes
Docsy provides helpful shortcodes:
#### Tabbed Panes
```markdown
{{</* tabpane */>}}
{{</* tab "Tab 1" */>}}
Content for tab 1
{{</* /tab */>}}
{{</* tab "Tab 2" */>}}
Content for tab 2
{{</* /tab */>}}
{{</* /tabpane */>}}
```
#### Code Blocks
```markdown
{{</* code lang="yaml" */>}}
key: value
{{</* /code */>}}
```
#### Alerts
```markdown
{{</* alert title="Note" */>}}
Important information
{{</* /alert */>}}
```
## 🧪 Testing
See [TESTING.md](TESTING.md) for detailed testing documentation.
Quick reference:
- `task test` - Run all tests before committing
- `task test:quick` - Fast checks during development
- Tests run automatically on GitHub Actions for PRs
## 🔧 Configuration
### Hugo Configuration (`hugo.toml`)
Main settings:
- `baseURL` - Site URL
- `title` - Site title
- `defaultContentLanguage` - Default language
- Module imports (Docsy theme)
### Docsy Configuration
Docsy-specific settings in `hugo.toml`:
```toml
[params]
github_repo = "your-repo"
github_branch = "main"
```
### Devbox Configuration (`devbox.json`)
Defines all development tools and their versions.
Update tools:
```bash
devbox update # Update all packages
task deps:update # Update all dependencies (devbox + npm + hugo modules)
```
## 🎨 Styling
Custom styles in `assets/scss/_variables_project.scss`:
```scss
// Override Bootstrap/Docsy variables
$primary: #your-color;
```
Hugo will process SCSS automatically with PostCSS and Autoprefixer.
## 🌐 Multi-Language Support
Add new language:
1. Create `content/<lang>/` directory
2. Add language config in `hugo.toml`:
```toml
[languages.<lang>]
languageName = "Language Name"
weight = 2
```
## 🐛 Troubleshooting
### "Module not found" errors
```bash
hugo mod get -u
hugo mod tidy
```
### PostCSS errors
```bash
npm install
```
### Build errors
```bash
task clean
task build
```
### Devbox issues
```bash
devbox update
devbox shell --refresh
```
## 📚 Resources
- [Hugo Documentation](https://gohugo.io/documentation/)
- [Docsy Documentation](https://www.docsy.dev/docs/)
- [Taskfile Documentation](https://taskfile.dev/)
- [Devbox Documentation](https://www.jetify.com/devbox/docs/)
## 🤝 Contributing
1. Create a feature branch
2. Make your changes
3. Run tests: `task test`
4. Commit with semantic messages:
- `feat(scope): add new feature`
- `fix(scope): fix bug`
- `docs(scope): update documentation`
- `test(scope): add tests`
- `chore(scope): maintenance`
5. Push and create pull request
## 📦 Deployment
Build for production:
```bash
task build
```
Output will be in `public/` directory, ready for deployment.

207
RELEASE.md Normal file
View file

@ -0,0 +1,207 @@
# Release Process
This document describes the release process for the IPCEI-CIS Developer Framework.
## Overview
The project uses **Semantic Versioning** (SemVer) for releases. Each release is triggered by a Git tag and automatically creates:
- Multi-platform Docker images (linux/amd64, linux/arm64)
- Forgejo release with release notes
- Automatically generated changelog
## Versioning Schema
We follow [Semantic Versioning 2.0.0](https://semver.org/):
- `MAJOR.MINOR.PATCH` (e.g., `v1.2.3`)
- **MAJOR**: Breaking changes (incompatible API changes)
- **MINOR**: New features (backwards compatible)
- **PATCH**: Bug fixes (backwards compatible)
## Creating a Release
### 1. Check Prerequisites
Ensure that:
- All tests are passing (`task test`)
- CI pipeline runs successfully
- All desired changes are in the `main` branch
- You have the latest version: `git pull origin main`
### 2. Determine Version
Determine the new version number based on the changes:
```bash
# Show current tag
git describe --tags --abbrev=0
# Show commits since last release
git log $(git describe --tags --abbrev=0)..HEAD --oneline
```
### 3. Create Tag
Create an annotated tag with the new version:
```bash
# For a new patch release (e.g., v1.2.3 → v1.2.4)
git tag -a v1.2.4 -m "Release v1.2.4"
# For a minor release (e.g., v1.2.3 → v1.3.0)
git tag -a v1.3.0 -m "Release v1.3.0"
# For a major release (e.g., v1.2.3 → v2.0.0)
git tag -a v2.0.0 -m "Release v2.0.0"
```
### 4. Push Tag
Push the tag to the repository - this triggers the release pipeline:
```bash
git push origin v1.2.4
```
### 5. Monitor Release Pipeline
The release pipeline (`release.yaml`) starts automatically:
1. Open the Actions tab in Forgejo
2. Monitor the `release` workflow
3. On success: Release is visible on the Releases page
## What Happens Automatically?
The release pipeline (`release.yaml`) performs the following steps:
1. **Build Docker Images**
- Multi-platform build (AMD64 + ARM64)
- Images are tagged with:
- `vX.Y.Z` (exact version, e.g., `v1.2.3`)
- `vX.Y` (minor version, e.g., `v1.2`)
- `vX` (major version, e.g., `v1`)
- `latest` (latest release)
2. **Push Images**
- To the container registry (Forgejo Packages)
3. **Generate Changelog**
- Automatically from Git commits since last release
- Format: `- Commit Message (hash)`
4. **Create Forgejo Release**
- With generated release notes
- Contains build versions (Node, Go, Hugo)
- Docker pull commands
- Changelog
## Using Docker Images
After a successful release, the images are available:
```bash
# Specific version
docker pull <registry>/<repository>:v1.2.3
# Latest
docker pull <registry>/<repository>:latest
# Major/Minor version
docker pull <registry>/<repository>:v1
docker pull <registry>/<repository>:v1.2
```
## Best Practices
### Commit Messages
Use meaningful commit messages, as they will appear in the changelog:
```bash
# Good
git commit -m "fix: correct multi-platform Docker build for ARM64"
git commit -m "feat: add automatic release pipeline"
git commit -m "docs: update RELEASE.md"
# Bad
git commit -m "fix stuff"
git commit -m "wip"
```
**Conventional Commits** help with categorization:
- `feat:` - New features
- `fix:` - Bug fixes
- `docs:` - Documentation
- `chore:` - Maintenance
- `refactor:` - Code restructuring
- `test:` - Tests
### Release Frequency
- **Patch releases**: As needed (bug fixes)
- **Minor releases**: Regular (new features)
- **Major releases**: Rare (breaking changes)
### Hotfixes
For urgent bug fixes:
1. Create branch from last release tag
2. Fix the bug
3. Create new patch release
```bash
git checkout v1.2.3
git checkout -b hotfix/critical-bug
# Implement fix
git commit -m "fix: critical bugfix"
git tag -a v1.2.4 -m "Release v1.2.4 - Hotfix"
git push origin v1.2.4
```
## Troubleshooting
### Release Pipeline Fails
1. **Check Secrets**: `PACKAGES_USER`, `PACKAGES_TOKEN` must be set
2. **Check Logs**: Open the failed workflow in the Actions tab
3. **Local Test**:
```bash
task build:oci-image
task test:oci-image
```
### Delete/Correct Tag
**Locally:**
```bash
git tag -d v1.2.3
```
**Remote:**
```bash
git push --delete origin v1.2.3
```
⚠️ **Warning**: Releases should not be deleted after they have been published!
### Edit Release Notes Afterwards
Release notes can be manually edited in Forgejo:
1. Go to Releases
2. Click on the release
3. Click "Edit"
## Further Information
- [Semantic Versioning](https://semver.org/)
- [Conventional Commits](https://www.conventionalcommits.org/)
- [Keep a Changelog](https://keepachangelog.com/)

62
TESTING.md Normal file
View file

@ -0,0 +1,62 @@
# Hugo Site Testing
Dieses Projekt verwendet mehrere automatisierte Tests zur Qualitätssicherung.
## Verfügbare Tests
### 1. Build-Test
```bash
npm run test:build
```
Prüft ob die Hugo-Seite überhaupt baut und zeigt Warnungen/Fehler an.
### 2. Markdown-Linting
```bash
npm run test:markdown
```
Validiert Markdown-Dateien auf Stilprobleme und Best Practices.
### 3. HTML-Validierung
```bash
npm run test:html
```
Prüft die generierte HTML auf HTML5-Konformität.
### 4. Link-Checker
```bash
npm run test:links
```
Testet alle internen und externen Links auf Gültigkeit.
### Alle Tests ausführen
```bash
npm test
```
### Schnelle Tests (ohne Link-Check)
```bash
npm run test:quick
```
## Konfigurationsdateien
- `.htmltest.yml` - Link-Checker-Konfiguration
- `.htmlvalidate.json` - HTML-Validierungs-Regeln
- `.markdownlint.json` - Markdown-Linting-Regeln
## CI/CD Integration
GitHub Actions führt diese Tests automatisch bei jedem Push/PR aus:
- `.github/workflows/test.yml`
## Lokale Entwicklung
Vor dem Commit empfohlen:
```bash
npm run test:quick # Schnelle Tests
```
Vor dem Push:
```bash
npm test # Alle Tests inkl. Link-Check
```

122
Taskfile.yml Normal file
View file

@ -0,0 +1,122 @@
version: '3'
vars:
HUGO_CMD: hugo
NPM_CMD: npm
tasks:
default:
desc: Show available tasks
cmds:
- task --list
# Build tasks
build:
desc: Build Hugo site
cmds:
- "{{.HUGO_CMD}} --gc --minify"
build:dev:
desc: Build Hugo site for development
cmds:
- "{{.HUGO_CMD}}"
serve:
desc: Start Hugo dev server
cmds:
- "{{.HUGO_CMD}} server"
clean:
desc: Clean build artifacts
cmds:
- rm -rf public resources/_gen .hugo_build.lock
# Test tasks
test:
desc: Run all tests
deps:
- test:build
- test:markdown
- test:html
- test:links
test:quick:
desc: Run quick tests (without link check)
deps:
- test:build
- test:markdown
test:build:
desc: Test Hugo build
cmds:
- "{{.HUGO_CMD}} --gc --minify --logLevel info"
test:markdown:
desc: Lint markdown files
cmds:
- "{{.NPM_CMD}} run test:markdown"
test:html:
desc: Validate HTML
cmds:
- "{{.NPM_CMD}} run test:html"
test:links:
desc: Check links
cmds:
- htmltest
# Development tasks
deps:install:
desc: Install all dependencies
cmds:
- "{{.NPM_CMD}} install"
- "{{.HUGO_CMD}} mod get -u"
- "{{.HUGO_CMD}} mod tidy"
deps:update:
desc: Update dependencies
cmds:
- devbox update
- "{{.NPM_CMD}} update"
- "{{.HUGO_CMD}} mod get -u"
# CI/CD
ci:
desc: Run CI pipeline locally
deps:
- test
build:oci-image:
desc: Build OCI/Docker image with versions from .env.versions
cmds:
- |
set -a
source .env.versions
set +a
echo "Building OCI image with versions:"
echo " NODE_VERSION=${NODE_VERSION}"
echo " GO_VERSION=${GO_VERSION}"
echo " HUGO_VERSION=${HUGO_VERSION}"
docker build --network=host \
--build-arg NODE_VERSION=${NODE_VERSION} \
--build-arg GO_VERSION=${GO_VERSION} \
--build-arg HUGO_VERSION=${HUGO_VERSION} \
-t ipceicis-developerframework:latest \
-t ipceicis-developerframework:$(git rev-parse --short HEAD) \
.
test:oci-image:
desc: Test the built OCI image
deps:
- build:oci-image
cmds:
- |
echo "Starting container on port 8080..."
docker run -d -p 8080:80 --name hugo-test ipceicis-developerframework:latest
sleep 2
echo "Testing endpoint..."
curl -f http://localhost:8080 > /dev/null && echo "✓ Container is running and responding" || echo "✗ Container test failed"
echo "Cleaning up..."
docker stop hugo-test
docker rm hugo-test

69
VERSIONS.md Normal file
View file

@ -0,0 +1,69 @@
# Version Management
## Single Source of Truth: `.env.versions`
All tool versions are centrally managed in `.env.versions`:
```bash
NODE_VERSION=24.10.0
GO_VERSION=1.25.1
HUGO_VERSION=0.151.0
```
## Where are versions used?
1. **devbox.json** - Local development environment (manual sync required)
2. **Dockerfile** - Build arguments with defaults
3. **.github/workflows/ci.yaml** - CI/CD pipeline (automatic)
4. **scripts/get-versions.sh** - Helper script for local builds
## Updating Versions
### Step 1: Update `.env.versions`
Edit the file with new versions:
```bash
NODE_VERSION=24.12.0
GO_VERSION=1.25.2
HUGO_VERSION=0.152.0
```
### Step 2: Update `devbox.json`
Manually sync the versions in `devbox.json`:
```json
{
"packages": [
"hugo@0.152.0",
"go@1.25.2",
"nodejs@24.12.0",
...
]
}
```
### Step 3: Rebuild devbox environment
```bash
devbox shell --refresh
```
### Step 4: Test Docker build
```bash
source scripts/get-versions.sh
# Follow the printed docker build command
```
## Why not automatic devbox sync?
- devbox.json uses a different version format (e.g., `@latest` vs specific versions)
- devbox package names may differ from Docker image names
- Keeps devbox.json simple and readable
- Manual sync ensures intentional version updates
## CI/CD
The GitHub Actions workflow automatically loads versions from `.env.versions` - no manual intervention needed.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View file

@ -1,6 +1,460 @@
/*
* Telekom-inspired Theme Variables
* Based on https://edp.buildth.ing Telekom Design System
*/
Add styles or override variables from the theme here.
// Bootstrap/Docsy Variable Overrides (must be before imports)
$primary: #E20074 !default;
$secondary: #B6B6B6 !default;
$success: #00b367 !default;
$info: #0070ad !default;
$warning: #ffcc00 !default;
$danger: #d52b1e !default;
$dark: #000000 !default;
$light: #f9fafb !default;
*/
// Link colors
$link-color: #E20074 !default;
$link-hover-color: #C2005E !default;
// Body
$body-bg: #ffffff !default;
$body-color: #000000 !default;
// Navbar
$navbar-light-color: #000000 !default;
$navbar-light-hover-color: #E20074 !default;
$navbar-light-active-color: #E20074 !default;
// Fonts
$font-family-sans-serif: 'TeleNeo', -apple-system, "Segoe UI", system-ui, Roboto, "Helvetica Neue", Arial, sans-serif !default;
$font-family-base: $font-family-sans-serif !default;
// Telekom TeleNeo Fonts
@font-face {
font-family: 'TeleNeo';
src: url('../fonts/TeleNeoOffice-Thin.53627df9.ttf') format('truetype');
font-weight: 300;
font-style: normal;
}
@font-face {
font-family: 'TeleNeo';
src: url('../fonts/TeleNeoOffice-Regular.b0a2cff1.ttf') format('truetype');
font-weight: 400;
font-style: normal;
}
@font-face {
font-family: 'TeleNeo';
src: url('../fonts/TeleNeoOffice-Medium.79fb426d.ttf') format('truetype');
font-weight: 500;
font-style: normal;
}
@font-face {
font-family: 'TeleNeo';
src: url('../fonts/TeleNeoOffice-Bold.a7bb592b.ttf') format('truetype');
font-weight: 600;
font-style: normal;
}
@font-face {
font-family: 'TeleNeo';
src: url('../fonts/TeleNeoOffice-ExtraBold.fbe9fe42.ttf') format('truetype');
font-weight: 700;
font-style: normal;
}
// Primary Colors - Telekom Magenta
:root {
// Telekom Primary Color (Magenta)
--color-primary: #E20074;
--color-primary-contrast: #ffffff;
--color-primary-dark-1: #C2005E;
--color-primary-dark-2: #A5004D;
--color-primary-dark-3: #87003D;
--color-primary-light-1: #E7338A;
--color-primary-light-2: #EC66A1;
--color-primary-light-3: #F299B8;
--color-primary-light-4: #F7CCCF;
--color-primary-light-5: #FCEFF6;
--color-primary-light-6: #FFF5FA;
// Secondary Colors
--color-secondary: #B6B6B6;
--color-secondary-dark: #6a7178;
--color-secondary-light: #f9fafb;
// Semantic Colors
--color-success: #00b367;
--color-warning: #ffcc00;
--color-error: #d52b1e;
--color-info: #0070ad;
// Text Colors
--color-text: #000000;
--color-text-light: #666666;
--color-text-dark: #000000;
// Background Colors
--color-body: #ffffff;
--color-card: #F1F1F1;
--color-hover: #F1F1F1;
--color-active: #F1F1F1;
// Navigation
--color-nav-bg: #ffffff;
--color-nav-text: #000000;
--nav-border-color: #B6B6B6;
// UI Elements
--color-input-background: #ffffff;
--color-input-border: #cccccc;
--color-input-text: #000000;
--color-box-body: #f2f2f2;
--color-box-header: #e6e6e6;
// Shadows & Overlays
--color-shadow: rgba(0, 0, 0, 0.15);
--color-overlay-backdrop: rgba(0, 0, 0, 0.5);
// Font Settings
--font-family-base: 'TeleNeo', -apple-system, "Segoe UI", system-ui, Roboto, "Helvetica Neue", Arial, sans-serif;
--nav-text-font-weight: 600;
}
// Apply TeleNeo font globally
body {
font-family: var(--font-family-base);
}
// Dark Mode Support
@media (prefers-color-scheme: dark) {
:root {
// Primary Colors remain same
--color-primary: #E20074;
--color-primary-contrast: #000000;
// Dark Mode Adjustments
--color-primary-dark-1: #E7338A;
--color-primary-dark-2: #EC66A1;
--color-primary-light-1: #C2005E;
--color-primary-light-2: #A5004D;
// Secondary Colors for Dark Mode
--color-secondary: #1c1c1e;
--color-secondary-dark: #4D4D4D;
--color-secondary-light: #0D0D0D;
// Text Colors
--color-text: #FFFFFF;
--color-text-light: #CCCCCC;
--color-text-dark: #FFFFFF;
// Background Colors
--color-body: #000000;
--color-card: #1c1c1e;
--color-hover: #1c1c1e;
--color-active: #0D0D0D;
// Navigation
--color-nav-bg: #000000;
--color-nav-text: #FFFFFF;
// UI Elements
--color-input-background: #1c1c1e;
--color-input-border: #4D4D4D;
--color-input-text: #FFFFFF;
--color-box-body: #000000;
--color-box-header: #1A1A1A;
// Semantic Colors for Dark Mode
--color-success: #00A94F;
--color-warning: #FFCC00;
--color-error: #D52B1E;
--color-info: #0070AD;
// Shadows
--color-shadow: rgba(0, 0, 0, 0.35);
}
}
// Telekom-inspired Component Styling
.td-navbar {
background-color: var(--color-nav-bg) !important;
border-bottom: 1px solid var(--nav-border-color);
.navbar-brand,
.nav-link {
color: var(--color-nav-text) !important;
font-weight: var(--nav-text-font-weight);
}
.nav-link:hover,
.nav-link.active {
color: var(--color-primary) !important;
background: transparent !important;
}
}
// Primary Buttons - Telekom Magenta
.btn-primary {
background-color: var(--color-primary) !important;
border-color: var(--color-primary) !important;
color: var(--color-primary-contrast) !important;
&:hover {
background-color: var(--color-primary-dark-1) !important;
border-color: var(--color-primary-dark-1) !important;
}
&:active,
&:focus {
background-color: var(--color-primary-dark-2) !important;
border-color: var(--color-primary-dark-2) !important;
}
}
// Links
a {
color: var(--color-primary);
&:hover {
color: var(--color-primary-dark-1);
}
}
// Cards with Telekom Style
.card {
background-color: var(--color-card);
border: 1px solid var(--nav-border-color);
&:hover {
background-color: var(--color-hover);
}
}
// Active/Selected States - REMOVED harsh black backgrounds
// Now using soft Telekom colors instead
// Sidebar Navigation
.td-sidebar-nav {
.td-sidebar-link {
&:hover {
background-color: var(--color-primary-light-5) !important;
color: var(--color-primary) !important;
}
&.active {
background-color: var(--color-primary-light-6);
color: var(--color-primary);
font-weight: 500;
border-left: 3px solid var(--color-primary);
}
}
// All list items in sidebar
li a {
&:hover {
background-color: var(--color-primary-light-5) !important;
color: var(--color-primary) !important;
}
}
}
// Main navigation tabs
.td-sidebar {
.td-sidebar-nav__section {
.ul-1 > li > a {
&.active,
&.td-sidebar-link--active {
background-color: var(--color-primary-light-6) !important;
color: var(--color-primary) !important;
font-weight: 500;
border-left: 3px solid var(--color-primary);
}
&:hover {
background-color: var(--color-primary-light-5) !important;
color: var(--color-primary) !important;
}
}
// All nested levels
li a:hover {
background-color: var(--color-primary-light-5) !important;
color: var(--color-primary) !important;
}
}
}
// Top navigation breadcrumb area
.td-sidebar__inner {
.td-sidebar-nav__section-title {
&.active {
background-color: var(--color-primary-light-5) !important;
color: var(--color-primary) !important;
}
}
}
// Breadcrumb navigation in header
.breadcrumb {
.active {
color: var(--color-primary) !important;
}
a:hover {
color: var(--color-primary-dark-1) !important;
}
}
// Remove harsh black backgrounds globally
.active,
.selected {
background-color: var(--color-primary-light-6) !important;
color: var(--color-primary) !important;
}
// Softer hover states
*:hover {
transition: all 0.2s ease-in-out;
}
// Override any dark/black hover backgrounds in navigation
nav, .td-sidebar, .td-sidebar-nav {
a:hover,
li:hover > a,
.nav-link:hover {
background-color: var(--color-primary-light-5) !important;
color: var(--color-primary) !important;
}
}
// Code Blocks
pre,
code {
background-color: var(--color-box-body);
border: 1px solid var(--color-input-border);
}
// Inline code (backticks in text)
code {
background-color: var(--color-primary-light-6);
color: var(--color-primary-dark-2);
padding: 2px 6px;
border-radius: 3px;
border: 1px solid var(--color-primary-light-3);
font-size: 0.9em;
}
// Code blocks (fenced code)
pre {
background-color: var(--color-box-body);
border: 1px solid var(--color-input-border);
padding: 1rem;
border-radius: 4px;
code {
background-color: transparent;
border: none;
padding: 0;
color: inherit;
}
}
// Tables
table {
thead {
background-color: var(--color-box-header);
}
tbody tr:hover {
background-color: var(--color-hover);
}
}
// Alerts/Notifications
.alert-success {
background-color: var(--color-success);
border-color: var(--color-success);
}
.alert-warning {
background-color: var(--color-warning);
border-color: var(--color-warning);
color: #000000;
}
.alert-danger {
background-color: var(--color-error);
border-color: var(--color-error);
}
.alert-info {
background-color: var(--color-info);
border-color: var(--color-info);
}
// Docsy Homepage Components
.td-cover-block {
background-color: var(--color-primary) !important;
h1, h2, h3, h4, h5, h6, p {
color: var(--color-primary-contrast) !important;
}
}
.td-box {
background-color: var(--color-card);
border: 1px solid var(--nav-border-color);
&:hover {
background-color: var(--color-hover);
border-color: var(--color-primary);
}
&--primary {
background-color: var(--color-primary);
border-color: var(--color-primary);
color: var(--color-primary-contrast);
}
&--secondary {
background-color: var(--color-secondary);
border-color: var(--color-secondary);
}
}
// Hero/Cover sections
.td-cover {
background: linear-gradient(135deg, var(--color-primary) 0%, var(--color-primary-dark-2) 100%);
.display-1, .display-2, .display-3, .display-4 {
color: var(--color-primary-contrast) !important;
}
}
// Section backgrounds
.td-section {
&--primary {
background-color: var(--color-primary-light-6);
}
&--secondary {
background-color: var(--color-secondary-light);
}
}
// Feature boxes
.td-feature {
border: 1px solid var(--nav-border-color);
background-color: var(--color-card);
&:hover {
border-color: var(--color-primary);
box-shadow: 0 4px 12px var(--color-shadow);
}
}

View file

@ -0,0 +1,84 @@
# Review
1) 09h35 Marco
business plan
issue: value of software, depreciation
FTE: around 100 overall, 3 full teams of developers
tax discussion
10h04 Discussions
2) 10h10 Julius
3) 10h27 Sebastiano - DevDay bis 10h40
schriften bei votes größer - fragen sollten lesbar sein!
devops is dead .... claim
4) Stephan bis 10h55
5) christopher 10h58
6) robert 11:11
* app
* devops-pipelines
* edp in osc deployed
7) michal has nothing to show
8) evgenii wants to finish -- 11:30
9) patrick 11:32
====
projekt management meeting
workshops, externe teams
customer episodes
wem was wo prinzipien
|
Rollen, Personas
weiter die perspektive des nutzers bekommen, inneres verlangen eines developers, mein anspruch an das EDP
(bekommen wir das hin, möchte ic damit arbeiten)
level 2 erklimmen
workshops halten
senioren bekommen
level1: source code structure, artefakte builden, revision control, branching model, e.g. pull requesting, tests der software, local debugging
level2: automatisierung des artefakte-builds, versionsmgmt, milestones, tickets, issues, compliances an security
level3: deployment auf stages, feedback pipeline verhalten
level4: feedback app-verhalten (logs, metrics, alerts) + development loop
level5: 3rd level support in production
level1: coding
source code structure, artefakte builden, revision control, branching model, e.g. pull requesting, tests der software, local debugging
level2: reaching the outdside world with output
automatisierung des artefakte-builds, versionsmgmt, milestones, tickets, issues, compliances an security
level3: run the app anywhere
deployment auf stages, feedback pipeline verhalten
level4: monitoring the app
feedback app-verhalten (logs, metrics, alerts) + development loop
level5: support
3rd level support in production (or any outer stage)
sprint 4
leveraging säule
eigene app säule
chore säule

View file

@ -0,0 +1,9 @@
---
title: "Architecture"
linkTitle: "Architecture"
weight: 3
description: >
System architecture documentation and interactive diagrams
---
This section contains architecture documentation for the IPCEI-CIS Developer Framework, including interactive C4 architecture diagrams.

View file

@ -0,0 +1,125 @@
---
title: "High Level Architecture"
linkTitle: "High Level Architecture"
weight: 1
description: >
Interactive high-level architecture overview of the Enterprise Development Platform
---
This document describes the high-level architecture of our Enterprise Development Platform (EDP) system.
## Interactive Architecture Diagram
<div class="likec4-container">
<div class="likec4-header">
Enterprise Development Platform - OTC FaaS Deployment Architecture
</div>
<likec4-view view-id="otc-faas" browser="true"></likec4-view>
<div class="likec4-loading" id="likec4-loading">
Loading architecture diagram...
</div>
</div>
<script>
// Enhanced loading check with fallback
document.addEventListener('DOMContentLoaded', function() {
let attempts = 0;
const maxAttempts = 10;
function checkLikeC4Loading() {
attempts++;
const component = document.querySelector('likec4-view');
const loading = document.getElementById('likec4-loading');
if (component && component.shadowRoot && component.shadowRoot.children.length > 0) {
if (loading) loading.style.display = 'none';
console.log('LikeC4 component loaded successfully');
} else if (attempts >= maxAttempts) {
console.warn('LikeC4 component failed to load');
if (loading) {
loading.innerHTML = 'Interactive diagram failed to load. <br><small>Please ensure JavaScript is enabled and the webcomponent is generated.</small>';
loading.style.color = '#dc3545';
}
} else {
setTimeout(checkLikeC4Loading, 1000);
}
}
// Check if LikeC4 loader is available
if (typeof window.customElements !== 'undefined') {
setTimeout(checkLikeC4Loading, 1500);
} else {
const loading = document.getElementById('likec4-loading');
if (loading) {
loading.innerHTML = 'Interactive diagrams require a modern browser with JavaScript enabled.';
loading.style.color = '#dc3545';
}
}
});
</script>
{{< alert title="Interactive Diagram" >}}
The diagram above is interactive when viewed in a compatible browser.
You can click on components to explore the architecture details.
**Note:** The interactive diagram requires the LikeC4 webcomponent to be generated.
See the [setup instructions]({{< ref "/docs/architecture/setup" >}}) for details.
{{< /alert >}}
## Architecture Overview
The Enterprise Development Platform consists of several key components working together to provide a comprehensive development and deployment environment.
### Key Components
1. **OTC Foundry** - Central management and orchestration layer
2. **Per-Tenant EDP** - Isolated development environments for each tenant
3. **FaaS Environment** - Function-as-a-Service deployment targets on Open Telekom Cloud
4. **Cloud Services** - Managed services including databases, storage, and monitoring
### Deployment Environments
- **Development Environment** (`*.t09.de`) - For platform team development and testing
- **Production Environment** (`*.buildth.ing`) - For production workloads and tenant services
## Component Details
The interactive diagram above shows the relationships between different components and how they interact within the system architecture. You can explore the diagram by clicking on different elements to see more details.
### Infrastructure Components
- **Kubernetes Clusters** - Container orchestration using OTC CCE (Cloud Container Engine)
- **ArgoCD** - GitOps continuous deployment and application lifecycle management
- **Forgejo** - Git repository management and CI/CD pipelines
- **Observability Stack** - Monitoring (Prometheus, Grafana), logging (Loki), and alerting
### Security and Management
- **Keycloak** - Identity and access management (IAM)
- **OpenBao** - Secrets management (Hashicorp Vault fork)
- **External Secrets Operator** - Kubernetes secrets integration
- **Crossplane** - Infrastructure as Code and cloud resource provisioning
### Developer Experience
- **Backstage** - Internal developer portal and service catalog
- **Forgejo Actions** - CI/CD pipeline execution
- **Development Workflows** - GitOps-based inner and outer loop workflows
## Setup and Maintenance
To update or modify the architecture diagrams:
1. Edit the `.c4` files in `resources/likec4/`
2. Regenerate the webcomponent:
```bash
cd resources/likec4
npx likec4 codegen webcomponent \
--webcomponent-prefix likec4 \
--outfile ../../static/js/likec4-webcomponent.js
```
3. Commit both the model changes and the regenerated JavaScript file
For more information, see the [LikeC4 Integration Guide]({{< ref "/docs/architecture/setup" >}}).

View file

@ -0,0 +1,291 @@
---
title: "LikeC4 Setup Guide"
linkTitle: "Setup"
weight: 10
description: >
How to set up and use LikeC4 interactive architecture diagrams
---
This guide explains how to set up and use LikeC4 interactive architecture diagrams in this documentation.
## Overview
LikeC4 enables you to create interactive C4 architecture diagrams as code. The diagrams are defined in `.c4` files and compiled into a web component that can be embedded in any HTML page.
## Prerequisites
- Node.js (v18 or later)
- npm or yarn
## Initial Setup
### 1. Install Dependencies
Navigate to the LikeC4 directory and install dependencies:
```bash
cd resources/likec4
npm install
```
### 2. Generate the Web Component
Create the web component that Hugo will load:
```bash
npx likec4 codegen webcomponent \
--webcomponent-prefix likec4 \
--outfile ../../static/js/likec4-webcomponent.js
```
This command:
- Reads all `.c4` files from `models/` and `views/`
- Generates a single JavaScript file with all architecture views
- Outputs to `static/js/likec4-webcomponent.js`
### 3. Verify Integration
The integration should already be configured in:
- `hugo.toml` - Contains `params.likec4.enable = true`
- `layouts/partials/hooks/head-end.html` - Loads CSS and loader script
- `static/css/likec4-styles.css` - Diagram styling
- `static/js/likec4-loader.js` - Dynamic module loader
## Directory Structure
```
resources/likec4/
├── models/ # C4 model definitions
│ ├── components/ # Component models
│ ├── containers/ # Container models
│ ├── context/ # System context
│ └── code/ # Code-level workflows
├── views/ # View definitions
│ ├── deployment/ # Deployment views
│ ├── edp/ # EDP views
│ ├── high-level-concept/ # Conceptual views
│ └── dynamic/ # Process flows
├── package.json # Dependencies
└── INTEGRATION.md # Integration docs
```
## Using in Documentation
### Basic Usage
Add this to any Markdown file:
```html
<div class="likec4-container">
<div class="likec4-header">
Your Diagram Title
</div>
<likec4-view view-id="YOUR-VIEW-ID" browser="true"></likec4-view>
<div class="likec4-loading" id="likec4-loading">
Loading architecture diagram...
</div>
</div>
```
### Available View IDs
To find available view IDs, search the `.c4` files:
```bash
cd resources/likec4
grep -r "view\s\+\w" views/ models/ --include="*.c4"
```
Common views:
- `otc-faas` - OTC FaaS deployment
- `edp` - EDP overview
- `landscape` - Developer landscape
- `edpbuilderworkflow` - Builder workflow
- `keycloak` - Keycloak component
### With Hugo Alert
Combine with Docsy alerts for better UX:
```markdown
<div class="likec4-container">
<div class="likec4-header">
System Architecture
</div>
<likec4-view view-id="otc-faas" browser="true"></likec4-view>
<div class="likec4-loading" id="likec4-loading">
Loading...
</div>
</div>
{{</* alert title="Note" */>}}
Click on components in the diagram to explore the architecture.
{{</* /alert */>}}
```
## Workflow for Changes
### 1. Modify Architecture Models
Edit the `.c4` files in `resources/likec4/`:
```bash
# Edit a model
vi resources/likec4/models/containers/argocd.c4
# Or edit a view
vi resources/likec4/views/deployment/otc/otc-faas.c4
```
### 2. Preview Changes Locally
Use the LikeC4 CLI to preview:
```bash
cd resources/likec4
# Start preview server
npx likec4 start
# Opens browser at http://localhost:5173
```
### 3. Regenerate Web Component
After making changes:
```bash
cd resources/likec4
npx likec4 codegen webcomponent \
--webcomponent-prefix likec4 \
--outfile ../../static/js/likec4-webcomponent.js
```
### 4. Test in Hugo
Start the Hugo development server:
```bash
# From repository root
hugo server -D
# Open http://localhost:1313
```
### 5. Commit Changes
Commit both the model files and the regenerated web component:
```bash
git add resources/likec4/
git add static/js/likec4-webcomponent.js
git commit -m "feat: update architecture diagrams"
```
## Advanced Configuration
### Custom Styling
Modify `static/css/likec4-styles.css` to customize appearance:
```css
.likec4-container {
height: 800px; /* Adjust height */
border-radius: 8px; /* Rounder corners */
}
```
### Multiple Diagrams Per Page
You can include multiple diagrams on a single page:
```html
<!-- First diagram -->
<div class="likec4-container">
<div class="likec4-header">Deployment View</div>
<likec4-view view-id="otc-faas" browser="true"></likec4-view>
<div class="likec4-loading">Loading...</div>
</div>
<!-- Second diagram -->
<div class="likec4-container">
<div class="likec4-header">Component View</div>
<likec4-view view-id="edp" browser="true"></likec4-view>
<div class="likec4-loading">Loading...</div>
</div>
```
### Disable for Specific Pages
Add to page front matter:
```yaml
---
title: "My Page"
params:
disable_likec4: true
---
```
Then update `layouts/partials/hooks/head-end.html`:
```html
{{ if and .Site.Params.likec4.enable (not .Params.disable_likec4) }}
<!-- LikeC4 scripts -->
{{ end }}
```
## Troubleshooting
### Diagram Not Loading
1. **Check browser console** (F12 → Console)
2. **Verify webcomponent exists:**
```bash
ls -lh static/js/likec4-webcomponent.js
```
3. **Regenerate if missing:**
```bash
cd resources/likec4
npm install
npx likec4 codegen webcomponent \
--webcomponent-prefix likec4 \
--outfile ../../static/js/likec4-webcomponent.js
```
### View Not Found
- Check view ID matches exactly (case-sensitive)
- Search for the view in `.c4` files:
```bash
grep -r "view otc-faas" resources/likec4/
```
### Styling Issues
- Clear browser cache (Ctrl+Shift+R)
- Check `static/css/likec4-styles.css` is loaded in browser DevTools → Network
### Build Errors
If LikeC4 codegen fails:
```bash
cd resources/likec4
rm -rf node_modules package-lock.json
npm install
```
## Resources
- [LikeC4 Documentation](https://likec4.dev/)
- [C4 Model](https://c4model.com/)
- [Web Components](https://developer.mozilla.org/en-US/docs/Web/Web_Components)
- [Hugo Documentation](https://gohugo.io/documentation/)
## Migration Notes
This LikeC4 integration was migrated from the edp-doc repository. This repository (`ipceicis-developerframework`) is now the primary source for architecture models.
The edp-doc repository can reference these models via git submodule if needed.

View file

@ -10,25 +10,25 @@ ArgoCD is considered set in stone as the tool to manage the deployment of applic
In general, there are 2 decisions to make:
- What tools should we use to execute the pipeline?
- What tools should we use to compose the pipeline?
* What tools should we use to execute the pipeline?
* What tools should we use to compose the pipeline?
The following use-cases should be considered for this decision:
- **User who wants to manage their own runners (???)**
- User who only wants to use our golden path
- User who wants to use our golden path and add custom actions
- User who wants to use their own templates and import some of our actions
- User who wants to import an existing GitHub repository with a pipeline
* **User who wants to manage their own runners (???)**
* User who only wants to use our golden path
* User who wants to use our golden path and add custom actions
* User who wants to use their own templates and import some of our actions
* User who wants to import an existing GitHub repository with a pipeline
## Considered Options
- Argo Workflows + Events
- Argo Workflows + Events + Additional Composition tool
- Forgejo Actions
- Forgejo Actions + Additional Composition tool
- Dagger (as Engine)
- Shuttle (as Engine)
* Argo Workflows + Events
* Argo Workflows + Events + Additional Composition tool
* Forgejo Actions
* Forgejo Actions + Additional Composition tool
* Dagger (as Engine)
* Shuttle (as Engine)
## Decision Outcome
@ -40,87 +40,87 @@ TBD
**Pro**
- integration with ArgoCD
- ability to trigger additional workflows based on events.
- level of maturity and community support.
* integration with ArgoCD
* ability to trigger additional workflows based on events.
* level of maturity and community support.
**Con**
- Ability to self-host runners?
- way how composition for pipelines works (based on Kubernetes CRDs)
- Templates must be available in the cluster where the pipelines are executed, so any imported templates must be applied into the cluster before the pipeline can be executed and cannot simply reference a repository
- This makes it difficult to import existing templates from other repositories when using self-hosted runners
- This also makes it difficult to use our golden path, or at least we will need to provide a way to import our golden path into the cluster
- This also makes the split of every component has its own repo very difficult
- additional UI to manage the pipeline
- Additional complexity
* Ability to self-host runners?
* way how composition for pipelines works (based on Kubernetes CRDs)
* Templates must be available in the cluster where the pipelines are executed, so any imported templates must be applied into the cluster before the pipeline can be executed and cannot simply reference a repository
* This makes it difficult to import existing templates from other repositories when using self-hosted runners
* This also makes it difficult to use our golden path, or at least we will need to provide a way to import our golden path into the cluster
* This also makes the split of every component has its own repo very difficult
* additional UI to manage the pipeline
* Additional complexity
### Argo Workflows + Events + Additional Composition tool
**Pro**
- Composability can be offloaded to another tool
* Composability can be offloaded to another tool
**Con**
- All cons of the previous option (except composability)
- Additional complexity by adding another tool
* All cons of the previous option (except composability)
* Additional complexity by adding another tool
### Forgejo Actions
**Pro**
- tight integration with GitHub Actions providing a familiar interface for developers and a vast catalog of actions to choose from
- ability to compose pipelines without relying on another tool
- Self-hosting of runners possible
- every component can have its own repository and use different tools (e.g. written in go, bash, python etc.)
* tight integration with GitHub Actions providing a familiar interface for developers and a vast catalog of actions to choose from
* ability to compose pipelines without relying on another tool
* Self-hosting of runners possible
* every component can have its own repository and use different tools (e.g. written in go, bash, python etc.)
**Con**
- level of maturity - will require additional investments to provide a production-grade system
* level of maturity - will require additional investments to provide a production-grade system
### Forgejo Actions + Additional Tool
**Pro**
- may be possible to use GitHub actions alongside another tool
* may be possible to use GitHub actions alongside another tool
**Con**
- additional complexity by adding another tool
* additional complexity by adding another tool
### Shuttle
**Pro**
- Possibility to clearly define interfaces for pipeline steps
- Relatively simple
* Possibility to clearly define interfaces for pipeline steps
* Relatively simple
**Con**
- basically backed by only one company
- **centralized templates**, so no mechanism for composing pipelines from multiple repositories
* basically backed by only one company
* **centralized templates**, so no mechanism for composing pipelines from multiple repositories
### Dagger
**Pro**
- Pipeline as code
- if it runs it should run anywhere and produce the "same" / somewhat stable results
- build environments are defined within containers / the dagger config. Dagger is the only dependency one has to install on a machine
- DX is extremely nice, especially if you have to debug (image) builds, also type safety due to the ability to code your build in a strong language
- additional tooling, like trivy, is added to a build pipeline with low effort due to containers and existing plugin/wrappers
- you can create complex test environments similar to test containers and docker compose
* Pipeline as code
* if it runs it should run anywhere and produce the "same" / somewhat stable results
* build environments are defined within containers / the dagger config. Dagger is the only dependency one has to install on a machine
* DX is extremely nice, especially if you have to debug (image) builds, also type safety due to the ability to code your build in a strong language
* additional tooling, like trivy, is added to a build pipeline with low effort due to containers and existing plugin/wrappers
* you can create complex test environments similar to test containers and docker compose
**Con**
- relies heavily containers, which might not be available some environments (due to policy etc), it also has an effect on reproducibility and verifiability
- as a dev you need to properly understand containers
- dagger engine has to run privileged locally and/or in the cloud which might be a blocker or at least a big pain in the ...
* relies heavily containers, which might not be available some environments (due to policy etc), it also has an effect on reproducibility and verifiability
* as a dev you need to properly understand containers
* dagger engine has to run privileged locally and/or in the cloud which might be a blocker or at least a big pain in the ...
**Suggestion Patrick**
- dagger is a heavy weight and might not be as productive in a dev workflow as it seems (setup lsp etc)
- it might be too opinionated to force on teams, especially since it is not near mainstream enough, community might be too small
- it feels like dagger gets you 95% of the way, but the remaining 5% are a real struggle
- if we like it, we should check the popularity in the dev community before further considering as it has a direct impact on teams and their preferences
* dagger is a heavy weight and might not be as productive in a dev workflow as it seems (setup lsp etc)
* it might be too opinionated to force on teams, especially since it is not near mainstream enough, community might be too small
* it feels like dagger gets you 95% of the way, but the remaining 5% are a real struggle
* if we like it, we should check the popularity in the dev community before further considering as it has a direct impact on teams and their preferences

View file

@ -1,14 +0,0 @@
---
title: Deployment
weight: 3
---
> **Mantra**:
> 1. Everything as Code.
> 1. Cloud natively deployable everywhere.
> 1. Ramping up and tearing down oftenly is a no-brainer.
> 1. Especially locally (whereby 'locally' means 'under my own control')
## Entwurf (28.8.24)
![Deployment 2024](./deployment.drawio.png)

View file

@ -1,8 +0,0 @@
---
title: POCs
weight: 2
---
## Further ideas for POSs
* see sample apps 'metaphor' in https://docs.kubefirst.io/

View file

@ -1,49 +0,0 @@
+++
title = "Existing Backstage Plugins"
weight = 4
+++
1. **Catalog**:
- Used for managing services and microservices, including registration, visualization, and the ability to track dependencies and relationships between services. It serves as a central directory for all services in an organization.
2. **Docs**:
- Designed for creating and managing documentation, supporting formats such as Markdown. It helps teams organize and access technical and non-technical documentation in a unified interface.
3. **API Docs**:
- Automatically generates API documentation based on OpenAPI specifications or other API definitions, ensuring that your API information is always up to date and accessible for developers.
4. **TechDocs**:
- A tool for creating and publishing technical documentation. It is integrated directly into Backstage, allowing developers to host and maintain documentation alongside their projects.
5. **Scaffolder**:
- Allows the rapid creation of new projects based on predefined templates, making it easier to deploy services or infrastructure with consistent best practices.
6. **CI/CD**:
- Provides integration with CI/CD systems such as GitHub Actions and Jenkins, allowing developers to view build status, logs, and pipelines directly in Backstage.
7. **Metrics**:
- Offers the ability to monitor and visualize performance metrics for applications, helping teams to keep track of key indicators like response times and error rates.
8. **Snyk**:
- Used for dependency security analysis, scanning your codebase for vulnerabilities and helping to manage any potential security risks in third-party libraries.
9. **SonarQube**:
- Integrates with SonarQube to analyze code quality, providing insights into code health, including issues like technical debt, bugs, and security vulnerabilities.
10. **GitHub**:
- Enables integration with GitHub repositories, displaying information such as commits, pull requests, and other repository activity, making collaboration more transparent and efficient.
11. **CircleCI**:
- Allows seamless integration with CircleCI for managing CI/CD workflows, giving developers insight into build pipelines, test results, and deployment statuses.
12. **Kubernetes**:
- Provides tools to manage Kubernetes clusters, including visualizing pod status, logs, and cluster health, helping teams maintain and troubleshoot their cloud-native applications.
13. **Cloud**:
- Includes plugins for integration with cloud providers like AWS and Azure, allowing teams to manage cloud infrastructure, services, and billing directly from Backstage.
14. **OpenTelemetry**:
- Helps with monitoring distributed applications by integrating OpenTelemetry, offering powerful tools to trace requests, detect performance bottlenecks, and ensure application health.
15. **Lighthouse**:
- Integrates Google Lighthouse to analyze web application performance, helping teams identify areas for improvement in metrics like load times, accessibility, and SEO.

View file

@ -0,0 +1,13 @@
---
title: "Documentation (v1 - Legacy)"
linkTitle: "v1 (Legacy)"
weight: 100
description: >
Legacy documentation - archived version of the original content.
---
{{% alert title="Note" color="warning" %}}
This is the legacy documentation (v1). For the latest version, please visit the [current documentation](/docs/).
{{% /alert %}}
This section contains the original documentation that is being migrated to a new structure.

View file

@ -4,7 +4,7 @@ weight: 2
description: The golden paths in the engineers and product development domain
---
## Rationale
## Rationale
The challenge of IPCEI-CIS Developer Framework is to provide value for DTAG customers, and more specifically: for Developers of DTAG customers.
@ -52,11 +52,10 @@ The resulting visualization should look similar like this:
![alt text](fibonacci-app_cpu-spike.png)
## When and how to use the developer framework?
### e.g. an example
.... taken from https://cloud.google.com/blog/products/application-development/common-myths-about-platform-engineering?hl=en
![alt text](image.png)
![alt text](image.png)

View file

Before

Width:  |  Height:  |  Size: 154 KiB

After

Width:  |  Height:  |  Size: 154 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 944 KiB

After

Width:  |  Height:  |  Size: 944 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 160 KiB

After

Width:  |  Height:  |  Size: 160 KiB

Before After
Before After

Binary file not shown.

After

Width:  |  Height:  |  Size: 128 KiB

View file

@ -6,3 +6,10 @@ description: What in terms of components or building blocks is needed in a platf
> This page is in work. Right now we have in the index a collection of links describing and listing typical components and building blocks of platforms. Also we have a growing number of subsections regarding special types of components.
See also:
* https://thenewstack.io/build-an-open-source-kubernetes-gitops-platform-part-1/
* https://thenewstack.io/build-an-open-source-kubernetes-gitops-platform-part-2/
![alt text](7b748ff4-image2-1024x580.png)

View file

@ -47,11 +47,11 @@ Components are the composable and self-contained building blocks for the context
Components must be as small as possible and follow the same concepts of software development and deployment as any other software product. In particular, they must have the following characteristics:
- designed for a single task
- provide a clear and intuitive output
- easy to compose
- easily customizable or interchangeable
- automatically testable
* designed for a single task
* provide a clear and intuitive output
* easy to compose
* easily customizable or interchangeable
* automatically testable
In the EDF components are divided into different categories. Each category contains components that perform similar actions. For example, the `build` category contains components that compile code, while the `deploy` category contains components that automate the management of the artefacts created in a production-like system.

View file

@ -8,4 +8,4 @@ There is no continuous whatever step inbetween ... Gitops is just 'overwriting'
This means whatever quality ensuring steps have to take part before 'overwriting' have to be defined as state changer in the repos, not in the environments.
Conclusio: I think we only have three contexts, or let's say we don't have the contect 'continuous delivery'
Conclusio: I think we only have three contexts, or let's say we don't have the contect 'continuous delivery'

View file

@ -33,4 +33,4 @@ https://www.getport.io/compare/backstage-vs-port
* [port-vs-backstage-choosing-your-internal-developer-portal](https://medium.com/@vaibhavgupta0702/port-vs-backstage-choosing-your-internal-developer-portal-71c6a6acd979)
* [idp-vs-self-service-portal-a-platform-engineering-showdown](https://thenewstack.io/idp-vs-self-service-portal-a-platform-engineering-showdown)
* [portals-vs-platform-orchestrator](https://humanitec.com/portals-vs-platform-orchestrator)
* [internal-developer-portal-vs-internal-developer-platform](https://www.cortex.io/post/internal-developer-portal-vs-internal-developer-platform)
* [internal-developer-portal-vs-internal-developer-platform](https://www.cortex.io/post/internal-developer-portal-vs-internal-developer-platform)

View file

@ -17,7 +17,7 @@ description: "The new kid on the block since 2023 ist 'Platform Orchestrating':
* cnoe.io
#### Resources
#### Resources
* [CNOE IDPBuilder](https://cnoe.io/docs/reference-implementation/installations/idpbuilder)
* https://github.com/csantanapr/cnoe-examples/tree/main
* https://github.com/csantanapr/cnoe-examples/tree/main

View file

@ -29,8 +29,8 @@ description: An currently uncurated list of references with respect to typical p
| Core Component | Short Description |
| ---- | --- |
| Application Configuration Management | Manage application configuration in a dynamic, scalable and reliable way. |
| Infrastructure Orchestration | Orchestrate your infrastructure in a dynamic and intelligent way depending on the context. |
| Environment Management | Enable developers to create new and fully provisioned environments whenever needed. |
| Deployment Management | Implement a delivery pipeline for Continuous Delivery or even Continuous Deployment (CD). |
| Role-Based Access Control | Manage who can do what in a scalable way. |
| Application Configuration Management | Manage application configuration in a dynamic, scalable and reliable way. |
| Infrastructure Orchestration | Orchestrate your infrastructure in a dynamic and intelligent way depending on the context. |
| Environment Management | Enable developers to create new and fully provisioned environments whenever needed. |
| Deployment Management | Implement a delivery pipeline for Continuous Delivery or even Continuous Deployment (CD). |
| Role-Based Access Control | Manage who can do what in a scalable way. |

View file

@ -5,7 +5,7 @@ description: Theory and general blue prints of the platform engineering discipli
---
## Rationale
## Rationale
IPCEI-CIS Developer Framework is part of a cloud native technology stack. To design the capabilities and architecture of the Developer Framework we need to define the surounding context and internal building blocks, both aligned with cutting edge cloud native methodologies and research results.
@ -16,6 +16,7 @@ In CNCF the discipline of building stacks to enhance the developer experience is
[CNCF first asks](https://tag-app-delivery.cncf.io/whitepapers/platforms/) why we need platform engineering:
> The desire to refocus delivery teams on their core focus and reduce duplication of effort across the organisation has motivated enterprises to implement platforms for cloud-native computing. By investing in platforms, enterprises can:
>
> * Reduce the cognitive load on product teams and thereby accelerate product development and delivery
> * Improve reliability and resiliency of products relying on platform capabilities by dedicating experts to configure and manage them
> * Accelerate product development and delivery by reusing and sharing platform tools and knowledge across many teams in an enterprise
@ -40,7 +41,7 @@ https://humanitec.com/blog/wtf-internal-developer-platform-vs-internal-developer
## Internal Developer Platform
> In IPCEI-CIS right now (July 2024) we are primarily interested in understanding how IDPs are built as one option to implement an IDP is to build it ourselves.
> In IPCEI-CIS right now (July 2024) we are primarily interested in understanding how IDPs are built as one option to implement an IDP is to build it ourselves.
The outcome of the Platform Engineering discipline is - created by the platform engineering team - a so called 'Internal Developer Platform'.
@ -69,4 +70,4 @@ The amount of available IDPs as product is rapidly growing.
## Platform 'Initiatives' aka Use Cases
Cortex is [talking about Use Cases (aka Initiatives):](https://www.youtube.com/watch?v=LrEC-fkBbQo) (or https://www.brighttalk.com/webcast/20257/601901)
![alt text](cortex-use-cases.png)
![alt text](cortex-use-cases.png)

View file

@ -7,14 +7,14 @@ weight = 1
date = '2024-07-30'
+++
## [The Structure of a Successful Internal Developer Platform](https://platformengineering.org/blog/create-your-own-platform-engineering-reference-architectures)
## [The Structure of a Successful Internal Developer Platform](https://platformengineering.org/blog/create-your-own-platform-engineering-reference-architectures)
In a platform reference architecture there are five main planes that make up an IDP:
1. Developer Control Plane this is the primary configuration layer and interaction point for the platform users. Components include Workload specifications such as Score and a portal for developers to interact with.
2. Integration and Delivery Plane this plane is about building and storing the image, creating app and infra configs, and deploying the final state. It usually contains a CI pipeline, an image registry, a Platform Orchestrator, and the CD system.
3. Resource Plane this is where the actual infrastructure exists including clusters, databases, storage or DNS services.
4, Monitoring and Logging Plane provides real-time metrics and logs for apps and infrastructure.
4, Monitoring and Logging Plane provides real-time metrics and logs for apps and infrastructure.
5. Security Plane manages secrets and identity to protect sensitive information, e.g., storing, managing, and security retrieving API keys and credentials/secrets.
![idp](../idp.webp)
@ -29,12 +29,9 @@ https://github.com/humanitec-architecture
https://humanitec.com/reference-architectures
## Create a reference architecture
[Create your own platform reference architecture](https://platformengineering.org/blog/create-your-own-platform-engineering-reference-architectures)
[Reference arch slide deck](https://docs.google.com/presentation/d/1yAf_FSjiA0bAFukgu5p1DRMvvGGE1fF4KhvZbb7gn2I/edit?pli=1#slide=id.g1ef66f3349b_3_3)

View file

@ -5,12 +5,12 @@ weight: 4
* https://cnoe.io/docs/intro
> The goal for the CNOE framework is to bring together a cohort of enterprises operating at the same scale so that they can navigate their operational technology decisions together, de-risk their tooling bets, coordinate contribution, and offer guidance to large enterprises on which CNCF technologies to use together to achieve the best cloud efficiencies.
### Aussprache
* Englisch Kuh.noo,
* Englisch Kuh.noo,
* also 'Kanu' im Deutschen
@ -26,6 +26,7 @@ See https://cnoe.io/docs/reference-implementation/integrations/reference-impl:
# in a local terminal with docker and kind
idpbuilder create --use-path-routing --log-level debug --package-dir https://github.com/cnoe-io/stacks//ref-implementation
```
### Output
```bash
@ -150,7 +151,7 @@ Data:
USER_PASSWORD : RwCHPvPVMu+fQM4L6W/q-Wq79MMP+3CN-Jeo
```
### login to backstage
### login to backstage
login geht mit den Creds, siehe oben:

View file

Before

Width:  |  Height:  |  Size: 113 KiB

After

Width:  |  Height:  |  Size: 113 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 364 KiB

After

Width:  |  Height:  |  Size: 364 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 208 KiB

After

Width:  |  Height:  |  Size: 208 KiB

Before After
Before After

View file

@ -0,0 +1,269 @@
# Ideen
* Kanban
* product review sessions
# ChatGPT
## Fehlendes Backlog
> welche agilen vorgehensmodelle bzw. best practices gibt es in der software-entwicklung, wenn das product backlog noch nicht da ist. es gibt zwar eine produktvision und auch schon einen PoC, aber das Produkt ist sowohl technisch als auch nutzenseitig immer nocht nicht planbar in user stories. daher ist SCRUM nicht machbar. das team ist ca. 8 personen groß, und wir überlgen, einen meilenstein in den nächsten 8 wochen einfach durch tägliches 'PDCA' zu erstellen: ein morgendliches planning, dann in subgruppen ein erforschen, dann im Plenum wieder C (Check) zusammenführen/integrieren, und ein abschliessendes 'A' als Ergebnissicherung, Versionierierung, Dokumentation. Was hälst du von diesem Vorgehen, bzw. welches Risiken und Verbesserungsvorschläge hast du? Oder gibt es ein Standard-Vorgehen hierfür?
Dein beschriebenes Szenario Vision und PoC vorhanden, aber kein ausformuliertes Product Backlog ist typisch für eine frühe **Explorations- oder Inceptionsphase** in der Produktentwicklung. In dieser Phase ist klassisches **Scrum** tatsächlich oft zu starr, weil es eine gewisse Klarheit über Features und Stories voraussetzt. Das von euch überlegte Vorgehen klingt nach einer sehr pragmatischen und iterativen Methode, die im Kern gut zu den **Lean-Startup-Prinzipien** oder **Design Thinking + Agile Engineering** passt.
## Bewertung eures PDCA-basierten Vorgehens
**Positiv:**
* **Täglicher PDCA-Zyklus** (Plan-Do-Check-Act) sorgt für schnelle Feedbackschleifen.
* **Morgendliches Planning** und **Check-Meeting im Plenum** fördern Transparenz und Selbstorganisation.
* **Subgruppen-Erkundung** erlaubt parallele Experimente.
* **Abschließendes "A"** zur Ergebnissicherung ist essenziell, sonst bleibt es bei "busy work".
**Risiken:**
1. **Fehlende Langfriststruktur:** Ohne grobe übergreifende Richtung (z.B. Meilensteinplan, Zielbild) kann es leicht in ziellosem Explorieren enden.
2. **Uneinheitlicher Erkenntnisgewinn:** Subgruppen könnten redundant oder inkompatibel arbeiten, wenn kein gemeinsames Verständnis besteht.
3. **Dokumentation als „Nachsorge“:** Wenn die Doku erst am Ende passiert, droht Wissensverlust lieber „Living Docs“ in Echtzeit pflegen.
4. **Stakeholder-Abkopplung:** Ohne explizite Reviews oder Demo-Rituale kann Feedback von außen fehlen.
## Verbesserungsvorschläge
1. **Exploration Backlog oder Hypothesenboard:** Auch ohne klassisches Product Backlog könnt ihr mit einem **Experiment-/Hypothesenboard** (à la Lean Startup) arbeiten. Zum Beispiel:
* Hypothese: „Feature X wird den Use Case Y verbessern.“
* Experiment: „Prototyp bauen und testen mit Nutzergruppe Z.“
* Ergebnis & Learnings dokumentieren.
2. **Wöchentliche Zielsetzungen:** Jeden Montag ein Weekly Planning mit klaren Zielen für die Woche (Outcome-fokussiert, nicht Task-fokussiert).
3. **Integration mit Dual Track Agile:** Das Vorgehen ähnelt dem **Dual-Track-Modell**: ein **Discovery-Track** (Erkunden, Validieren, Prototypisieren) läuft parallel zum späteren **Delivery-Track** (Releases, Sprints). Ihr seid noch voll im Discovery-Track.
4. **Design Studio oder Spike-Zeitfenster:** Strukturiert Zeit für z.B. 2-Tages-Design-Studios oder 1-Tages-Spikes das erhöht die Qualität der Exploration.
5. **Review/Share-Session freitags:** Eine fixe wöchentliche Review-/Demo-Session (auch intern) erhöht die Disziplin und hilft beim „Check“-Teil im PDCA.
## Gängige Standard-Methoden in dieser Phase
* **Design Thinking** (für Nutzerzentrierte Exploration)
* **Lean UX / Lean Startup** (Hypothesengetrieben)
* **Dual Track Agile**
* **Google Design Sprint (5 Tage)** extrem hilfreich, wenn es um das Testen von Nutzerideen geht
* **Shape Up (Basecamp)** für nicht-sprintbasiertes Arbeiten mit "Bet" auf 6-Wochen-Projekte
---
**Fazit:**
Euer PDCA-Zyklus ist ein solider, pragmatischer Einstieg in eine unsichere, vorstrukturierte Produktphase. Mit ein paar Ergänzungen (Exploration Backlog, feste Review-Elemente, Hypothesenstruktur) könnt ihr daraus ein effektives Discovery-Vorgehen machen, das den Übergang zu einem klassischen agilen Framework (z.B. Scrum) vorbereitet.
Möchtest du eine konkrete Vorlage für ein Exploration Backlog oder Hypothesenboard für euer Team?
## Architektur vorhanden
> ok, du findest vor allem Tages-Zyklen zu kurz. Ich möchte noch ergänzen, dass wir eine Architektur haben. Das klingt vielleicht paradox, aber es ist tatsächlich so, dass wir das Software-syewtm bereits designed haben, und die Komponenten auch bereits kennen, aber jetzt vor allem die Wege des automatischen Deployments in eine Cloud und die Konfigurations- und Verwaltungsstrukturen unsrerer Software herausfinden müssen. Das ist da, was sich gerade nicht weiter planen lässt. Kannst du mit diesen Ergänzungen dein Feedbach nochmal schärfen?
Danke für die Ergänzung das ändert die Ausgangslage entscheidend und erlaubt eine deutlich präzisere Einordnung.
### Neue Ausgangslage
* Ihr **habt eine Architektur**: Die Komponenten und Schnittstellen sind klar.
* Es gibt **bereits einen PoC** und eine Produktvision.
* Das **Backlog fehlt**, weil ihr gerade die **"nicht-funktionalen" Themen wie Deployment, Cloud-Umgebung, Konfiguration und Betrieb** klärt.
* Das System ist **technisch grob fertig designt**, aber die **Operations- und Deployment-Prozesse** sind unklar und werden explorativ entwickelt.
### Schärferes Feedback
#### 1. **Tages-PDCA-Zyklen wirken in diesem Kontext zu granular**
Bei Infrastruktur-, Deployment- oder Konfigurationsfragen entstehen oft tiefe „Spikes“ (z.B. CI/CD-Pipeline aufsetzen, Helm-Chart modellieren, Secrets Handling evaluieren). Diese brauchen oft mehr als einen Tag bis zum „Check“, weil Abhängigkeiten (z.B. Rechte, Cloud-Zugänge, Testumgebungen) entstehen. Ein **2- bis 3-Tages-Rhythmus** mit klaren Zwischenzielen wäre realistischer, ergänzt um:
* **Daily Standup als Taktgeber**, aber nicht zwangsläufig als vollständiger PDCA-Zyklus.
* **Weekly Planning mit Zielvorgaben und Review-Ritualen**, um Fortschritt messbar zu machen.
#### 2. **Was ihr macht, ist kein Produkt-Delivery, sondern "System Enablement"**
Ihr steckt im Übergang von Architektur zu einem **Infrastructure as Code + Plattform Enablement Track**. Das bedeutet:
* Die „User Stories“ sind keine klassischen Features, sondern z.B.:
* „Als Team möchte ich unsere Software mit einem Befehl deployen können.“
* „Als Betreiber will ich wissen, wie ich Services konfiguriere.“
Das spricht für ein **Infrastructure Kanban Board**, keine Sprints.
#### 3. **Mögliche Vorgehensmodelle / Best Practices**
| Modell / Methode | Warum es jetzt passt |
|------------------------------------------|------------------------------------------------------------------|
| **Kanban für technische Tasks** | Flexible Work-in-Progress-Kontrolle, ideal für Exploration. |
| **Spikes** (aus XP) | Timeboxed Exploration zu offenen Fragen (z.B. Helm, Secrets). |
| **Enabler Epics (SAFe)** | Deployment- und Ops-Fragen formal als "Enabler" fassen. |
| **Team Topologies Platform Team Light**| Ihr agiert gerade wie ein internes Plattform-Team. |
| **Weekly DevOps Review** | Alle 57 Tage Stand der Automatisierung/Doku/Strukturen. |
| **ADRs & Living Docs** | Architectural Decision Records für Deployment-/Ops-Entscheidungen.|
#### 4. **Konkrete Empfehlungen zur Strukturierung**
* **Ziele für die 8 Wochen formulieren** als **"Capabilities"**, z.B.:
* „Greenfield-Clouddeployment in AWS/GCP über GitHub Actions.“
* „Parameterisierbares Helm-Chart mit Doku.“
* „Konfigurierbarkeit über ENV vs. Secrets evaluieren.“
* Dazu **12 Woche cadence**, jede Woche ein Review/Checkpoint.
* **Boards mit Task-Kategorien**:
* 🧪 Spike
* 🛠️ Tooling/Automation
* 📦 Infrastruktur/Dienst
* 📓 Dokumentation
* **Ergebnisse sofort versionieren und dokumentieren**, z.B.:
* GitOps Repo-Struktur
* ADRs
* Beispiel-Deployments
---
### Fazit
Ihr seid im **"technisch operativen Enablement"** eures Systems, nicht im Produkt-Delivery. Das rechtfertigt:
* ein **kanban-artiges, flexibles Modell mit timeboxed Reviews**,
* tägliche Abstimmungen, aber nicht zwingend tägliche Zyklen,
* Fokus auf **Enabler-Ziele statt funktionale Stories**.
SCRUM ist hier fehl am Platz, aber euer Ansatz ist sehr nahe an einer **modernen DevOps-orientierten Arbeitsweise**, die z.B. von **Platform Engineering Teams** eingesetzt wird.
Möchtest du ein Beispiel für ein solches Kanban-Board mit passenden Task-Typen und Zielstruktur?
## Kanban
> ja, gerne!
Hier ist ein konkretes Beispiel für ein **Kanban-Board**, das speziell auf eure Situation als „Platform/Enablement“-Team mit einem vorhandenen Architekturbild, aber offenem Deployment- und Betriebsdesign zugeschnitten ist.
---
## 🎯 **Zielstruktur (übergeordnet für 8 Wochen)**
Diese formuliert ihr am besten als **Capabilities** Fähigkeiten, die das System nachher haben soll:
| Capability ID | Zielbeschreibung |
|---------------|---------------------------------------------------------------------------------------------|
| C1 | Anwendung kann automatisiert in Cloudumgebung X deployed werden (inkl. Secrets Mgmt) |
| C2 | Konfiguration des Systems ist externisiert und dokumentiert (ENV, YAML, o. ä.) |
| C3 | Monitoring- und Logging-Infrastruktur ist einsatzbereit und dokumentiert |
| C4 | Dev- und Test-Umgebungen sind klar getrennt und automatisch provisionierbar |
| C5 | Alle Plattformentscheidungen (z.B. Helm vs. Kustomize) sind versioniert und begründet |
Diese Capabilities bilden **Spalten** oder **Swimlanes** im Board (wenn euer Tool das unterstützt, z.B. GitHub Projects, Jira oder Trello mit Labels).
---
## 🗂️ **Kanban-Board-Spalten (klassisch)**
| Spalte | Zweck |
|------------------|-----------------------------------------------------------|
| 🔍 Backlog | Ideen, Hypothesen, Tasks priorisiert nach Capabilities |
| 🧪 In Exploration | Aktive Spikes, Proofs, technische Evaluierungen |
| 🛠️ In Progress | Umsetzung mit konkretem Ziel |
| ✅ Review / Check | Funktionsprüfung, internes Review |
| 📦 Done | Abgeschlossen, dokumentiert, ggf. in Repo |
---
## 🏷️ **Task-Typen (Labels oder Emojis zur Kennzeichnung)**
| Symbol / Label | Typ | Beispiel |
|------------------|-----------------------------|--------------------------------------------------------------------------|
| 🧪 Spike | Technische Untersuchung | „Untersuche ArgoCD vs. Flux für GitOps Deployment“ |
| 📦 Infra | Infrastruktur | „Provisioniere dev/test/stage in GCP mit Terraform“ |
| 🔐 Secrets | Sicherheitsrelevante Aufgabe| „Design für Secret-Handling mit Sealed Secrets“ |
| 📓 Docs | Dokumentation | „README für Developer Setup schreiben“ |
| 🧰 Tooling | CI/CD, Pipelines, Linter | „GitHub Action für Build & Deploy schreiben“ |
| 🔁 Entscheidung | Architekturentscheidung | „ADR: Helm vs. Kustomize für Service Deployment“ |
---
## 🧩 Beispielhafte Tasks für Capability „C1 Deployment automatisieren“
| Task | Typ | Status |
|----------------------------------------------|----------|----------------|
| Write GitHub Action for Docker image push | 🧰 Tooling | 🛠️ In Progress |
| Evaluate ArgoCD vs. Flux | 🧪 Spike | ✅ Review |
| Create Helm Chart for Service A | 📦 Infra | 🔍 Backlog |
| Create ADR: Deployment-Toolentscheidung | 🔁 Entscheidung | 🔍 Backlog |
| Add deployment doc in Wiki | 📓 Docs | 🔍 Backlog |
---
## 📅 Cadence-Empfehlung
* **Daily Standup (15min)**: Was läuft, was blockiert? Pull statt Push!
* **Weekly Planning (Montag)**: Priorisieren und Backlog-Grooming (welche Capabilities sind diese Woche dran?)
* **Weekly Review (Freitag)**: Erkenntnisse, Entscheidungen und Dokumentation präsentieren
* Optional: **Retrospektive alle 2 Wochen**, falls das Team Energie hat
---
Wenn du magst, kann ich dir ein Beispiel-Board als Markdown-Vorlage oder als CSV-Datei zum Import in Trello, GitHub Projects, Jira etc. machen. Wäre das hilfreich?
## Living Docs
> kannst du noch etwas mehr zu 'LivingDocs' sagen?
Ja, gerne! **"Living Docs"** (bzw. **Living Documentation**) ist ein Konzept aus der agilen Softwareentwicklung und DevOps, das sich auf **aktuelle, direkt aus dem System oder aus dem Arbeitsprozess generierte Dokumentation** bezieht. Ziel ist es, dass die Dokumentation *nicht veraltet*, sondern so eng mit Code und Systemzustand gekoppelt ist, dass sie sich (teilweise) selbst aktualisiert oder sich mit dem Entwicklungsprozess mitbewegt.
---
## 🔍 **Was macht "Living Documentation" aus?**
| Merkmal | Beschreibung |
|----------------------------------|------------------------------------------------------------------------------|
| 🔄 Automatisch erzeugt oder aktualisiert | Generiert aus Code, Tests, Pipelines, ADRs oder Deployments |
| 📌 Versionskontrolliert | Im Git abgelegt, gemeinsam mit dem Code gepflegt |
| 🧑‍💻 Entwicklernah | Entwickelt sich mit dem Code weiter keine Trennung zwischen "Docs" und Dev |
| 📈 Änderbar & nachvollziehbar | Jede Änderung an Code/Doku hat einen Commit & Kontext |
| 📚 Mehrwert für alle Beteiligten| Richtet sich an Devs, Ops, PMs oder andere Teams nicht nur "für später" |
---
## 🧰 Typische Formen von Living Docs
| Typ | Beschreibung & Tools |
|----------------------|----------------------------------------------------------------------------------------|
| **Architecture Decision Records (ADRs)** | Markdown-Dateien im Repo (z.B. `docs/adr/001-helm-vs-kustomize.md`) |
| **Code-Doku aus Source** | Mit Tools wie JSDoc, TypeDoc, Sphinx, Doxygen, etc. |
| **API-Doku** | Automatisch aus Code oder OpenAPI (Swagger) generiert |
| **Test-Doku (z.B. BDD)** | z.B. Gherkin-Style: `Given/When/Then`-Spezifikationen als Dokumentation |
| **Monitoring & Deployment-Status** | z.B. Dashboards mit Infrastructure-Doku (Grafana, Backstage, Argo UI) |
| **DevDocs im Repo** | z.B. `README.md`, `CONTRIBUTING.md`, `how-to/`, `docs/`, `adr/` |
| **Playbooks / Runbooks** | Schritt-für-Schritt-Anleitungen für Betrieb oder Störungen |
---
## ✅ **Best Practices**
1. **Nutze Markdown + Git:** Einfach, versionierbar, lesbar → ADRs, Runbooks, HowTos.
2. **Dokumentiere Entscheidungen bewusst:** Lieber knappe, saubere ADRs als keine Doku.
3. **Verknüpfe Doku mit Code / CI/CD:** Beispiel: Build-Tool erzeugt automatisch API-Doku oder ADR-Lint-Checks.
4. **Verlinke Logs, Dashboards, Monitoring:** z.B. in einem `system-overview.md`.
5. **Behandle Doku wie Code:** Reviewpflichtig, Pull Request basiert, CI-checkbar.
---
## 🛠️ Tools & Frameworks
| Zweck | Tools (Beispiele) |
|------------------------|-------------------------------------------------------------------------|
| ADRs | [`adr-tools`](https://github.com/npryce/adr-tools), einfaches Markdown |
| Markdown-Doku | MkDocs, Docusaurus, GitBook |
| API-Doku | Swagger/OpenAPI, Redocly |
| Infrastruktur-Doku | Backstage (Spotify), Terraform-docs |
| Doku-Automatisierung | GitHub Actions, pre-commit Hooks, CI-Checks |
---
## 🧠 Denkweise: Doku = Gespräch in Zeit
> "Dokumentation ist kein Nachtrag, sondern ein asynchrones Gespräch mit deinem zukünftigen Ich oder dem nächsten Teammitglied."
Gerade in eurem Kontext (technisches Enablement, Deployment, Betrieb) ist das **entscheidungsorientierte, leichtgewichtige Festhalten von Kontext** Gold wert. ADRs und `docs/runbooks/`-Verzeichnisse sind ein idealer Startpunkt.
---
Möchtest du eine Vorlage für ein ADR oder ein Beispielverzeichnis für eine sinnvolle Living-Docs-Struktur im Repo?

View file

@ -39,6 +39,7 @@ velero install \
3. Delete `credentials.ini`, it is not needed anymore (a secret has been created in the cluster).
4. Create a schedule to back up the relevant resources in the cluster:
```
velero schedule create devfw-bootstrap --schedule="23 */2 * * *" "--include-namespaces=forgejo"
```
@ -48,6 +49,7 @@ velero schedule create devfw-bootstrap --schedule="23 */2 * * *" "--include-name
You can now use Velero to create backups, restore them, or perform other operations. Please refer to the [Velero Documentation](https://velero.io/docs/main/backup-reference/).
To list all currently available backups:
```
velero backup get
```

View file

@ -5,10 +5,12 @@ description: The 5-step storyflow of this Onboarding chapter
---
{{% pageinfo color="info" %}}
## Summary
This onboarding section is for you when are new to IPCEI-CIS subproject 'Edge Developer Framework (EDF)' and you want to know about
* its context to 'Platform Engineering'
This onboarding section is for you when are new to IPCEI-CIS subproject 'Edge Developer Framework (EDF)' and you want to know about
* its context to 'Platform Engineering'
* and why we think it's the stuff we need to care about in the EDF
{{% /pageinfo %}}
@ -41,9 +43,7 @@ Please do not think this story and the underlying assumptions are carved in ston
## Your role as 'Framework Engineer' in the Domain Architecture
Pls be aware of the the following domain and task structure of our mission:
Pls be aware of the the following domain and task structure of our mission:
![](./conclusio/images/modern.png)

View file

@ -5,10 +5,11 @@ description: Driving requirements for a platform
---
{{% pageinfo color="info" %}}
## Summary
The 'Edge Developer Framework' is both the project and the product we are working for. Out of the leading 'Portfolio Document'
we derive requirements which are ought to be fulfilled by Platform Engineering.
The 'Edge Developer Framework' is both the project and the product we are working for. Out of the leading 'Portfolio Document'
we derive requirements which are ought to be fulfilled by Platform Engineering.
**This is our claim!**
@ -26,6 +27,7 @@ e. Development of DTAG/TSI Edge Developer Framework
* Goal: All developed innovations must be accessible to developer communities in a **highly user-friendly and easy way**
### Development of DTAG/TSI Edge Developer Framework (p.14)
| capability | major novelties |||
| -- | -- | -- | -- |
| e.1. Edge Developer full service framework (SDK + day1 +day2 support for edge installations) | Adaptive CI/CD pipelines for heterogeneous edge environments | Decentralized and self healing deployment and management | edge-driven monitoring and analytics |
@ -34,22 +36,23 @@ e. Development of DTAG/TSI Edge Developer Framework
### DTAG objectives & contributions (p.27)
DTAG will also focus on developing an easy-to-use **Edge Developer framework for software
DTAG will also focus on developing an easy-to-use **Edge Developer framework for software
developers** to **manage the whole lifecycle of edge applications**, i.e. for **day-0-, day-1- and up to day-2-
operations**. With this DTAG will strongly enable the ecosystem building for the entire IPCEI-CIS edge to
cloud continuum and ensure openness and accessibility for anyone or any company to make use and
further build on the edge to cloud continuum. Providing the use of the tool framework via an open-source approach will further reduce entry barriers and enhance the openness and accessibility for anyone or
operations**. With this DTAG will strongly enable the ecosystem building for the entire IPCEI-CIS edge to
cloud continuum and ensure openness and accessibility for anyone or any company to make use and
further build on the edge to cloud continuum. Providing the use of the tool framework via an open-source approach will further reduce entry barriers and enhance the openness and accessibility for anyone or
any organization (see innovations e.).
### WP Deliverables (p.170)
e.1 Edge developer full-service framework
This tool set and related best practices and guidelines will **adapt, enhance and further innovate DevOps principles** and
their related, necessary supporting technologies according to the specific requirements and constraints associated with edge or edge cloud development, in order to keep the healthy and balanced innovation path on both sides,
This tool set and related best practices and guidelines will **adapt, enhance and further innovate DevOps principles** and
their related, necessary supporting technologies according to the specific requirements and constraints associated with edge or edge cloud development, in order to keep the healthy and balanced innovation path on both sides,
the (software) development side and the operations side in the field of DevOps.
{{% pageinfo color="info" %}}
### What comes next?
[Next](../platforming/) we'll see how these requirements seem to be fulfilled by platforms!

View file

@ -7,17 +7,18 @@ description: DevOps is dead - long live next level DevOps in platforms
{{% pageinfo color="info" %}}
## Summary
Since 2010 we have DevOps. This brings increasing delivery speed and efficiency at scale.
But next we got high 'cognitive loads' for developers and production congestion due to engineering lifecycle complexity.
Since 2010 we have DevOps. This brings increasing delivery speed and efficiency at scale.
But next we got high 'cognitive loads' for developers and production congestion due to engineering lifecycle complexity.
So we need on top of DevOps an instrumentation to ensure and enforce speed, quality, security in modern, cloud native software development. This instrumentation is called 'golden paths' in intenal develoepr platforms (IDP).
{{% /pageinfo %}}
## History of Platform Engineering
Let's start with a look into the history of platform engineering. A good starting point is [Humanitec](https://humanitec.com/), as they nowadays are one of the biggest players (['the market leader in IDPs.'](https://internaldeveloperplatform.org/#how-we-curate-this-site)) in platform engineering.
Let's start with a look into the history of platform engineering. A good starting point is [Humanitec](https://humanitec.com/), as they nowadays are one of the biggest players (['the market leader in IDPs.'](https://internaldeveloperplatform.org/#how-we-curate-this-site)) in platform engineering.
They create lots of [beautiful articles and insights](https://humanitec.com/blog), their own [platform products](https://humanitec.com/products/) and [basic concepts for the platform architecture](https://humanitec.com/platform-engineering) (we'll meet this later on!).
@ -51,7 +52,7 @@ There is a CNCF working group which provides the definition of [Capabilities of
### Platform Engineering Team
Or, in another illustration for the platform as a developer service interface, which also defines the **'Platform Engineering Team'** inbetween:
Or, in another illustration for the platform as a developer service interface, which also defines the **'Platform Engineering Team'** inbetween:
<img src="./platform-self-services.webp" width="600" alt="https://medium.com/@bijit211987/what-is-platform-engineering-and-how-it-reduce-cognitive-load-on-developers-ac7805603925">
@ -70,7 +71,7 @@ First of all some important wording to motivate the important term 'internal dev
[Capabilities of platforms](https://tag-app-delivery.cncf.io/whitepapers/platforms/#capabilities-of-platforms)
### Ecosystems in InternalDeveloperPlatform
### Ecosystems in InternalDeveloperPlatform
Build or buy - this is also in pltaform engineering a tweaked discussion, which one of the oldest player answers like this with some oppinioated internal capability structuring:
@ -78,6 +79,7 @@ Build or buy - this is also in pltaform engineering a tweaked discussion, which
{{% pageinfo color="info" %}}
### What comes next?
[Next](../orchestrators/) we'll see how these concepts got structured!
@ -87,7 +89,7 @@ Build or buy - this is also in pltaform engineering a tweaked discussion, which
### Digital Platform defintion from [What we **call** a Platform](https://martinfowler.com/articles/talk-about-platforms.html)
> Words are hard, it seems. Platform is just about the most ambiguous term we could use for an approach that is super-important for increasing delivery speed and efficiency at scale. Hence the title of this article, here is what Ive been talking about most recently.
> Words are hard, it seems. Platform is just about the most ambiguous term we could use for an approach that is super-important for increasing delivery speed and efficiency at scale. Hence the title of this article, here is what Ive been talking about most recently.
\
Definitions for software and hardware platforms abound, generally describing an operating environment upon which applications can execute and which provides reusable capabilities such as file systems and security.
\

View file

@ -5,9 +5,11 @@ description: Next level platforming is orchestrating platforms
---
{{% pageinfo color="info" %}}
## Summary
When defining and setting up platforms next two intrinsic problems arise:
When defining and setting up platforms next two intrinsic problems arise:
1. it is not declarative and automated
2. it is not or least not easily changable
@ -33,10 +35,11 @@ https://humanitec.com/reference-architectures
<img src="./platform-architectures.webp" width="600" alt="https://humanitec.com/blog/aws-azure-and-gcp-open-source-reference-architectures-to-start-your-mvp">
> Hint: There is a [slides tool provided by McKinsey](https://platformengineering.org/blog/create-your-own-platform-engineering-reference-architectures) to set up your own platform deign based on the reference architecture
> Hint: There is a [slides tool provided by McKinsey](https://platformengineering.org/blog/create-your-own-platform-engineering-reference-architectures) to set up your own platform deign based on the reference architecture
{{% pageinfo color="info" %}}
### What comes next?
[Next](../cnoe/) we'll see how we are going to do platform orchestration with CNOE!
@ -50,4 +53,3 @@ You remember the [capability mappings from the time before orchestration](../pla
<img src="./platform-tooling-humanitec-platform-report-2024.PNG" width="600" alt="https://humanitec.com/whitepapers/state-of-platform-engineering-report-volume-2 Whitepaper_ State of Platform Engineering Report.pdf">

View file

@ -5,6 +5,7 @@ description: Our top candidate for a platform orchestrator
---
{{% pageinfo color="info" %}}
## Summary
In late 2023 platform orchestration raised - the discipline of declarativley dinfing, building, orchestarting and reconciling building blocks of (digital) platforms.
@ -17,6 +18,7 @@ Thus we were looking for open source means for platform orchestrating and found
## Requirements for an Orchestrator
When we want to set up a [complete platform](../platforming/platforms-def.drawio.png) we expect to have
* a **schema** which defines the platform, its ressources and internal behaviour
* a **dynamic configuration or templating mechanism** to provide a concrete specification of a platform
* a **deployment mechanism** to deploy and reconcile the platform
@ -55,6 +57,7 @@ There are already some example stacks:
{{% pageinfo color="info" %}}
### What comes next?
[Next](../cnoe-showtime/) we'll see how a CNOE stacked Internal Developer Platform is deployed on you local laptop!

View file

Before

Width:  |  Height:  |  Size: 120 KiB

After

Width:  |  Height:  |  Size: 120 KiB

Before After
Before After

View file

@ -5,9 +5,10 @@ description: CNOE hands on
---
{{% pageinfo color="info" %}}
## Summary
CNOE is a 'Platform Engineering Framework' (Danger: Our wording!) - it is open source and locally runnable.
CNOE is a 'Platform Engineering Framework' (Danger: Our wording!) - it is open source and locally runnable.
It consists of the orchestrator 'idpbuilder' and both of some predefined building blocks and also some predefined platform configurations.
@ -87,7 +88,7 @@ It's an important feature of idpbuilder that it will set up on an existing clust
That's why we here first create the kind cluster `localdev`itself:
```bash
```bash
cat << EOF | kind create cluster --name localdev --config=-
# Kind kubernetes release images https://github.com/kubernetes-sigs/kind/releases
kind: Cluster
@ -137,7 +138,7 @@ kube-system kube-scheduler-localdev-control-plane 1/1 Ru
local-path-storage local-path-provisioner-6f8956fb48-6fvt2 1/1 Running 0 15s
```
### First run: Start with core applications, 'core package'
### First run: Start with core applications, 'core package'
Now we run idpbuilder the first time:
@ -149,7 +150,7 @@ ib create --use-path-routing
#### Output
##### idpbuilder log
##### idpbuilder log
```bash
stl@ubuntu-vpn:~/git/mms/idpbuilder$ ib create --use-path-routing
@ -243,7 +244,7 @@ Data:
username : giteaAdmin
```
In ArgoCD you will see the deployed three applications of the core package:
In ArgoCD you will see the deployed three applications of the core package:
![alt text](image-1.png)
@ -302,7 +303,7 @@ drwxr-xr-x 4 stl stl 4096 Jul 29 10:57 ..
Now we run idpbuilder the second time with `-p basic/package1`
##### idpbuilder log
##### idpbuilder log
```bash
stl@ubuntu-vpn:~/git/mms/cnoe-stacks$ ib create --use-path-routing -p basic/package1
@ -572,9 +573,10 @@ Next wait a bit until Gitops does its magic and our 'wanted' state in the repo g
![alt text](image-15.png)
{{% pageinfo color="info" %}}
### What comes next?
The showtime of CNOE high level behaviour and usage scenarios is now finished. We setup an initial IDP and used a backstage golden path to init and deploy a simple application.
[Last not least](../conclusio/) we want to sum up the whole way from Devops to 'Frameworking' (is this the correct wording???)
[Last not least](../conclusio/) we want to sum up the whole way from Devops to 'Frameworking' (is this the correct wording???)
{{% /pageinfo %}}

View file

@ -9,10 +9,10 @@ docker commit likec4 likec4
docker run -it --rm --user node -v $PWD:/app -p 5173:5173 likec4 bash
// as root
npx playwright install-deps
npx playwright install-deps
npx playwright install
npm install likec4
// render
node@e20899c8046f:/app/content/en/docs/project/onboarding$ ./node_modules/.bin/likec4 export png -o ./images .
node@e20899c8046f:/app/content/en/docs/project/onboarding$ ./node_modules/.bin/likec4 export png -o ./images .

Some files were not shown because too many files have changed in this diff Show more