mirror of
https://github.com/spring-projects/spring-petclinic.git
synced 2026-01-19 01:51:11 +00:00
278 lines
9.3 KiB
YAML
278 lines
9.3 KiB
YAML
name: Enhanced Java Application Pipeline with Metrics and Energy Monitoring
|
|
|
|
on:
|
|
push:
|
|
branches: [ pipeline-optimization ]
|
|
pull_request:
|
|
branches: [ pipeline-optimization ]
|
|
|
|
jobs:
|
|
build-with-metrics:
|
|
runs-on: ubuntu-latest
|
|
timeout-minutes: 60
|
|
|
|
services:
|
|
prometheus:
|
|
image: prom/prometheus:latest
|
|
ports:
|
|
- 9090:9090
|
|
options: >-
|
|
--health-cmd "wget -q -O- http://localhost:9090/-/healthy || exit 1"
|
|
--health-interval 10s
|
|
--health-timeout 5s
|
|
--health-retries 3
|
|
|
|
pushgateway:
|
|
image: prom/pushgateway:latest
|
|
ports:
|
|
- 9091:9091
|
|
options: >-
|
|
--health-cmd "wget -q -O- http://localhost:9091/-/healthy || exit 1"
|
|
--health-interval 10s
|
|
--health-timeout 5s
|
|
--health-retries 3
|
|
|
|
steps:
|
|
- uses: actions/checkout@v4
|
|
|
|
- name: Setup directories and tools
|
|
run: |
|
|
set -eo pipefail
|
|
|
|
# Créer la structure des répertoires
|
|
mkdir -p metrics/system
|
|
mkdir -p metrics/power
|
|
mkdir -p metrics/performance
|
|
|
|
# Installation des packages nécessaires
|
|
sudo apt-get update
|
|
sudo apt-get install -y powerstat linux-tools-common linux-tools-generic python3-pip
|
|
|
|
# Installation de PowerAPI et pandas
|
|
pip3 install powerapi pandas
|
|
|
|
# Vérifier les installations
|
|
python3 --version
|
|
pip3 list | grep powerapi
|
|
pip3 list | grep pandas
|
|
|
|
- name: Cache Maven packages
|
|
uses: actions/cache@v3
|
|
with:
|
|
path: ~/.m2
|
|
key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }}
|
|
restore-keys: ${{ runner.os }}-m2
|
|
|
|
- name: Start monitoring
|
|
id: start-monitoring
|
|
run: |
|
|
set -eo pipefail
|
|
|
|
# Enregistrer le temps de début
|
|
date +%s%N > metrics/pipeline_start_time.txt
|
|
|
|
# Collecter les métriques initiales
|
|
echo "=== Initial System Resources ===" > metrics/system/initial_metrics.txt
|
|
top -b -n 1 >> metrics/system/initial_metrics.txt
|
|
|
|
echo "=== Initial Memory Usage ===" > metrics/system/initial_memory.txt
|
|
free -m >> metrics/system/initial_memory.txt
|
|
|
|
echo "=== Initial Disk Usage ===" > metrics/system/initial_disk.txt
|
|
df -h >> metrics/system/initial_disk.txt
|
|
|
|
- name: Set up JDK 17
|
|
uses: actions/setup-java@v4
|
|
with:
|
|
java-version: '17'
|
|
distribution: 'adopt'
|
|
cache: maven
|
|
|
|
- name: Build with Maven
|
|
id: build
|
|
timeout-minutes: 15
|
|
env:
|
|
MAVEN_OPTS: "-Xmx2048m -XX:+TieredCompilation -XX:TieredStopAtLevel=1"
|
|
run: |
|
|
set -eo pipefail
|
|
|
|
start_time=$(date +%s%N)
|
|
|
|
# Collecter les métriques avant build
|
|
free -m > metrics/system/pre_build_memory.txt
|
|
|
|
# Build optimisé
|
|
./mvnw -B verify \
|
|
-Dmaven.test.skip=true \
|
|
-Dcheckstyle.skip=true \
|
|
-T 1C
|
|
|
|
build_status=$?
|
|
end_time=$(date +%s%N)
|
|
|
|
# Collecter les métriques post-build
|
|
free -m > metrics/system/post_build_memory.txt
|
|
|
|
# Enregistrer le temps de build
|
|
echo "BUILD_TIME=$((($end_time - $start_time)/1000000))" >> $GITHUB_ENV
|
|
echo "$((($end_time - $start_time)/1000000))" > metrics/performance/build_time.txt
|
|
|
|
exit $build_status
|
|
|
|
- name: Run tests
|
|
id: test
|
|
if: success()
|
|
timeout-minutes: 20
|
|
run: |
|
|
set -eo pipefail
|
|
|
|
start_time=$(date +%s%N)
|
|
|
|
# Collecter les métriques pré-tests
|
|
free -m > metrics/system/pre_test_memory.txt
|
|
|
|
# Tests optimisés
|
|
./mvnw test -T 1C
|
|
|
|
test_status=$?
|
|
end_time=$(date +%s%N)
|
|
|
|
# Collecter les métriques post-tests
|
|
free -m > metrics/system/post_test_memory.txt
|
|
|
|
# Enregistrer le temps des tests
|
|
echo "TEST_TIME=$((($end_time - $start_time)/1000000))" >> $GITHUB_ENV
|
|
echo "$((($end_time - $start_time)/1000000))" > metrics/performance/test_time.txt
|
|
|
|
exit $test_status
|
|
|
|
- name: Build Docker image
|
|
id: docker-build
|
|
if: success()
|
|
timeout-minutes: 10
|
|
run: |
|
|
set -eo pipefail
|
|
|
|
start_time=$(date +%s%N)
|
|
|
|
# Collecter les métriques pré-docker
|
|
free -m > metrics/system/pre_docker_memory.txt
|
|
df -h > metrics/system/pre_docker_disk.txt
|
|
|
|
# Build Docker optimisé
|
|
docker build -t app:latest -f .devcontainer/Dockerfile . --no-cache
|
|
|
|
build_status=$?
|
|
end_time=$(date +%s%N)
|
|
|
|
# Collecter les métriques post-docker
|
|
free -m > metrics/system/post_docker_memory.txt
|
|
df -h > metrics/system/post_docker_disk.txt
|
|
|
|
# Enregistrer le temps de build Docker
|
|
echo "DOCKER_BUILD_TIME=$((($end_time - $start_time)/1000000))" >> $GITHUB_ENV
|
|
echo "$((($end_time - $start_time)/1000000))" > metrics/performance/docker_time.txt
|
|
|
|
# Collecter la taille de l'image
|
|
docker images app:latest --format "{{.Size}}" > metrics/performance/docker_image_size.txt
|
|
|
|
exit $build_status
|
|
|
|
- name: Collect and analyze metrics
|
|
if: always()
|
|
run: |
|
|
set -eo pipefail
|
|
|
|
# Collecter les métriques système finales
|
|
echo "=== Final System Resources ===" > metrics/system/final_metrics.txt
|
|
top -b -n 1 >> metrics/system/final_metrics.txt || echo "Failed to collect top metrics"
|
|
|
|
echo "=== Final Memory Usage ===" > metrics/system/final_memory.txt
|
|
free -m >> metrics/system/final_memory.txt || echo "Failed to collect memory metrics"
|
|
|
|
echo "=== Final Disk Usage ===" > metrics/system/final_disk.txt
|
|
df -h >> metrics/system/final_disk.txt || echo "Failed to collect disk metrics"
|
|
|
|
# Marquer la fin du pipeline
|
|
date +%s%N > metrics/pipeline_end_time.txt
|
|
|
|
# Analyser les temps d'exécution
|
|
python3 << EOF
|
|
import pandas as pd
|
|
import os
|
|
|
|
def read_time_file(filename):
|
|
try:
|
|
with open(filename, 'r') as f:
|
|
return float(f.read().strip())
|
|
except:
|
|
return 0
|
|
|
|
# Collecter les temps
|
|
times = {
|
|
'build': read_time_file('metrics/performance/build_time.txt'),
|
|
'test': read_time_file('metrics/performance/test_time.txt'),
|
|
'docker': read_time_file('metrics/performance/docker_time.txt')
|
|
}
|
|
|
|
# Créer le rapport de performance
|
|
with open('metrics/performance/summary.txt', 'w') as f:
|
|
f.write("Pipeline Performance Summary\n")
|
|
f.write("==========================\n\n")
|
|
|
|
total_time = sum(times.values())
|
|
|
|
for stage, duration in times.items():
|
|
percentage = (duration / total_time * 100) if total_time > 0 else 0
|
|
f.write(f"{stage.capitalize()} Stage:\n")
|
|
f.write(f"Duration: {duration/1000:.2f} seconds\n")
|
|
f.write(f"Percentage of total time: {percentage:.1f}%\n\n")
|
|
|
|
f.write(f"Total Pipeline Duration: {total_time/1000:.2f} seconds\n")
|
|
|
|
# Créer un CSV avec les métriques
|
|
pd.DataFrame([times]).to_csv('metrics/performance/times.csv', index=False)
|
|
EOF
|
|
|
|
- name: Export metrics to Prometheus
|
|
if: always()
|
|
timeout-minutes: 5
|
|
run: |
|
|
set -eo pipefail
|
|
|
|
function export_metric() {
|
|
local metric_name=$1
|
|
local metric_value=$2
|
|
local stage=$3
|
|
|
|
if [ -n "$metric_value" ]; then
|
|
echo "${metric_name}{stage=\"${stage}\",project=\"java-app\"} ${metric_value}" | \
|
|
curl --retry 3 --max-time 10 --silent --show-error \
|
|
--data-binary @- http://localhost:9091/metrics/job/pipeline-metrics || \
|
|
echo "Failed to export metric ${metric_name}"
|
|
fi
|
|
}
|
|
|
|
# Exporter les temps d'exécution
|
|
export_metric "pipeline_duration_ms" "${BUILD_TIME}" "build"
|
|
export_metric "pipeline_duration_ms" "${TEST_TIME}" "test"
|
|
export_metric "pipeline_duration_ms" "${DOCKER_BUILD_TIME}" "docker"
|
|
|
|
# Exporter l'utilisation mémoire finale
|
|
mem_usage=$(free -b | grep Mem: | awk '{print $3}')
|
|
export_metric "memory_usage_bytes" "$mem_usage" "final"
|
|
|
|
- name: Save metrics
|
|
if: always()
|
|
uses: actions/upload-artifact@v4
|
|
with:
|
|
name: pipeline-metrics
|
|
path: metrics/
|
|
retention-days: 90
|
|
if-no-files-found: warn
|
|
|
|
- name: Cleanup
|
|
if: always()
|
|
run: |
|
|
docker system prune -af
|
|
rm -rf node_exporter*
|