dev #20250613-01

This commit is contained in:
Gilles Mouchet 2025-06-13 17:07:45 +02:00
parent 232a151b85
commit b3eefd65a2
9 changed files with 855 additions and 24 deletions

View File

@ -48,3 +48,8 @@ LOKI_GEN_LOG_DIR=<path_to_your_loki_data_dir>
# promtail
# conf dir promatal.yaml (ex: /home/docker/promtail)
PROMTAIL_CONF_DIR=<path_to_your_promtail_conf_dir>
#------------------------------------------------
# flog
# flog docker image
#FLOG_DOCKER_IMG=mingrammer/flog # (amd64)
#FLOG_DOCKER_IMG=gmouchet/flog # (arm64)

View File

@ -556,3 +556,625 @@ scrape_configs:
✅ Démarrage
docker-compose up -d
PROBLEME CPU/MEM
Lorsque vous voyez une utilisation à 100% du CPU et de la mémoire avec Loki et Promtail, il y a plusieurs causes fréquentes. Voici comment analyser et résoudre ce problème.
🔍 Analyse des causes possibles
1. Trop de logs générés par flog
Problème : flog est configuré avec --number=10 et --delay=100ms, ce qui signifie potentiellement 10 logs tous les 100ms, soit 100 logs/seconde.
Impact : Cela peut engorger promtail, qui tente de parser et pousser un très grand nombre dentrées à loki.
2. Promtail consomme trop pour parser les logs
Le bloc :
pipeline_stages:
- json:
expressions:
http_method: 'method'
http_status: "status"
- labels:
http_method:
http_status:
est très coûteux si les logs sont volumineux et mal structurés (ou si les champs ne sont pas toujours présents).
3. Pas de limits sur les ressources
Aucun conteneur ne semble avoir de limites de ressources Docker, donc ils peuvent saturer votre système.
✅ Recommandations concrètes
🧹 A. Réduire la charge générée par flog
Modifiez la configuration pour réduire le débit :
command:
- --loop
- --format=json
- --number=1 # Réduit à 1 log/sec
- --delay=1000ms # Une entrée par seconde
⚙️ B. Ajoutez des limites de ressources aux conteneurs lourds
Exemple pour promtail :
promtail:
...
deploy:
resources:
limits:
cpus: '0.50'
memory: 256M
reservations:
cpus: '0.25'
memory: 128M
Idem pour loki, flog et éventuellement grafana.
🔍 C. Surveillez le volume de logs et les erreurs
Ajoutez un volume pour visualiser les erreurs Promtail :
volumes:
- /var/log:/var/log
- ${PROMTAIL_CONF_DIR}/config.yml:/etc/promtail/config.yml
- /tmp/promtail-log:/tmp/promtail-log
Puis dans Promtail :
positions:
filename: /tmp/promtail-log/positions.yaml
🧪 D. Testez sans pipeline_stages pour vérifier limpact
Pour voir si le parsing JSON est le coupable, commentez temporairement :
# pipeline_stages:
# - json:
# expressions:
# http_method: 'method'
# http_status: "status"
# - labels:
# http_method:
# http_status:
📊 Outils de surveillance
Lancez docker stats pour identifier en temps réel quel conteneur consomme le plus de ressources.
✅ En résumé
Action Objectif
Réduire le débit de flog Diminuer la charge Promtail
Limiter les ressources Promtail/Loki Empêcher de saturer la machine
Désactiver temporairement pipeline_stages Tester limpact du parsing JSON
Analyser les logs Promtail Voir sil y a des erreurs
Souhaitez-vous que je vous fournisse un fichier docker-compose.yaml corrigé avec ces ajustements ?
Vous avez dit :
oui
ChatGPT a dit :
Voici un fichier docker-compose.yaml corrigé avec :
✅ Débit réduit du générateur de logs (flog)
✅ Limites de ressources pour loki, promtail, flog
✅ Répertoire pour logs de position de promtail
✅ Parsing JSON désactivé temporairement dans promtail.yml (à réactiver si stable)
✅ docker-compose.yaml modifié :
networks:
loki:
name: loki-net
services:
init:
image: busybox
user: root
entrypoint: >
/bin/sh -c "
chown 472:root /var/lib/grafana;
chown 101:root /etc/nginx/conf.d/default.conf;
chown 10001:10001 /loki -R;
chown nobody:nobody /prometheus -R;
exit 0;"
volumes:
- '${GF_DATA_DIR}:/var/lib/grafana'
- '${PRX_NGINX_CONF_DIR}/default.conf:/etc/nginx/conf.d/default.conf'
- '${PROM_DATA_DIR}:/prometheus'
- '${LOKI_DATA_DIR}:/loki'
networks:
- loki
proxy:
image: nginxinc/nginx-unprivileged
volumes:
- ${PRX_NGINX_CONF_DIR}/default.conf:/etc/nginx/conf.d/default.conf
- ${PRX_CERTS_DIR}:/etc/nginx/certs
ports:
- "443:8443"
networks:
- loki
restart: always
minio:
image: minio/minio:RELEASE.2025-03-12T18-04-18Z
restart: always
volumes:
- ${MINIO_DATA_ROOT_DIR}:/data
ports:
- "9000:9000"
- "9001:9001"
environment:
MINIO_ROOT_USER: ${MINIO_ACCESS_KEY}
MINIO_ROOT_PASSWORD: ${MINIO_SECRET_KEY}
MINIO_BROWSER_REDIRECT_URL: ${MINIO_REDIRECT_URL}
command: server /data --console-address ":9001"
networks:
- loki
minio-cli:
image: minio/mc
depends_on:
- minio
entrypoint: >
/bin/sh -c "tail -f /dev/null"
volumes:
- ${MINIO_CONF_CLI_DIR}/.mc:/root/.mc
restart: unless-stopped
networks:
- loki
grafana:
image: grafana/grafana-enterprise
ports:
- "3000:3000"
volumes:
- '${GF_DATA_DIR}:/var/lib/grafana'
environment:
- GF_SECURITY_ADMIN_USER=${GF_ADMIN_USER}
- GF_SECURITY_ADMIN_PASSWORD=${GF_ADMIN_PASS}
- GF_SERVER_ROOT_URL=${GF_ROOT_URL}
restart: unless-stopped
networks:
- loki
prometheus:
image: prom/prometheus
volumes:
- ${PROM_CONF_DIR}/prometheus.yml:/etc/prometheus/prometheus.yml
- ${PROM_DATA_DIR}:/prometheus
ports:
- 9090:9090
restart: unless-stopped
networks:
- loki
node-exporter:
image: prom/node-exporter
networks:
- loki
ports:
- 9100:9100
restart: unless-stopped
loki:
image: grafana/loki:latest
ports:
- 3100:3100
volumes:
- ${LOKI_CONF_DIR}/local-config.yaml:/etc/loki/local-config.yaml
- ${LOKI_DATA_DIR}:/loki
restart: unless-stopped
networks:
- loki
deploy:
resources:
limits:
cpus: '1.0'
memory: 512M
reservations:
cpus: '0.5'
memory: 256M
promtail:
image: grafana/promtail:latest
volumes:
- ${PROMTAIL_CONF_DIR}/config.yml:/etc/promtail/config.yml
- ${LOKI_GEN_LOG_DIR}:/tmp/
- /var/log/:/var/log/
- /tmp/promtail-log:/tmp/promtail-log
restart: unless-stopped
networks:
- loki
deploy:
resources:
limits:
cpus: '0.5'
memory: 256M
reservations:
cpus: '0.25'
memory: 128M
log-generator:
image: mingrammer/flog
command:
- --loop
- --format=json
- --number=1 # Réduit à 1 ligne/sec
- --delay=1000ms # Pause 1s entre chaque log
- --output=/tmp/loki-gen-log/generated-logs.txt
- --overwrite
- --type=log
volumes:
- ${LOKI_GEN_LOG_DIR}:/tmp/
deploy:
resources:
limits:
cpus: '0.25'
memory: 128M
reservations:
cpus: '0.1'
memory: 64M
✅ promtail.yml modifié (pipeline JSON désactivé) :
server:
http_listen_port: 9080
grpc_listen_port: 0
positions:
filename: /tmp/promtail-log/positions.yaml
clients:
- url: http://loki:3100/loki/api/v1/push
scrape_configs:
- job_name: system
static_configs:
- targets:
- localhost
labels:
job: varlogs
__path__: /var/log/*log
- job_name: generated-logs
static_configs:
- targets:
- localhost
labels:
job: generated-logs
__path__: /tmp/loki-gen-log/generated-logs.txt
# Désactivé temporairement pour test de perf
# pipeline_stages:
# - json:
# expressions:
# http_method: 'method'
# http_status: "status"
# - labels:
# http_method:
# http_status:
Souhaitez-vous également un script Bash pour automatiser le démarrage avec vérification de la charge ?
Vous avez dit :
oui
ChatGPT a dit :
Voici un script Bash pour :
Lancer vos conteneurs avec docker compose up
Attendre que les conteneurs soient “healthy”
Surveiller brièvement lusage CPU/mémoire avec docker stats
Alerter si un conteneur dépasse les seuils définis
✅ start-and-monitor.sh
#!/bin/bash
set -e
COMPOSE_FILE="docker-compose.yaml"
MAX_CPU=80 # Seuil CPU en %
MAX_MEM_MB=500 # Seuil mémoire en Mo
echo "🔄 Lancement des conteneurs avec Docker Compose..."
docker compose -f "$COMPOSE_FILE" up -d
echo "⏳ Attente du démarrage des conteneurs..."
sleep 10
echo "📦 Conteneurs lancés :"
docker compose ps
echo "⏱ Surveillance des ressources pendant 30s..."
docker stats --no-stream --format "table {{.Name}}\t{{.CPUPerc}}\t{{.MemUsage}}" > /tmp/docker_stats_snapshot.txt
echo "📊 Résultat :"
cat /tmp/docker_stats_snapshot.txt
echo ""
echo "🚨 Vérification des seuils (CPU > ${MAX_CPU}% ou MEM > ${MAX_MEM_MB}MB)..."
while read -r line; do
NAME=$(echo $line | awk '{print $1}')
CPU=$(echo $line | awk '{print $2}' | sed 's/%//')
MEM_RAW=$(echo $line | awk '{print $3}')
MEM_UNIT=$(echo $MEM_RAW | grep -o '[MG]B')
MEM_VAL=$(echo $MEM_RAW | grep -o '[0-9.]*')
if [[ $MEM_UNIT == "GB" ]]; then
MEM_MB=$(echo "$MEM_VAL * 1024" | bc)
else
MEM_MB=$MEM_VAL
fi
CPU_EXCEED=$(echo "$CPU > $MAX_CPU" | bc)
MEM_EXCEED=$(echo "$MEM_MB > $MAX_MEM_MB" | bc)
if [[ "$CPU_EXCEED" -eq 1 || "$MEM_EXCEED" -eq 1 ]]; then
echo "❌ $NAME dépasse les limites (CPU=${CPU}%, MEM=${MEM_MB}MB)"
fi
done < <(tail -n +2 /tmp/docker_stats_snapshot.txt) # skip header
echo "✅ Surveillance terminée."
🔧 Instructions
Enregistrez ce script sous start-and-monitor.sh
Rendez-le exécutable :
chmod +x start-and-monitor.sh
Exécutez-le :
./start-and-monitor.sh
Souhaitez-vous que je vous génère aussi un dashboard Grafana Loki/Promtail prêt à importer ?
Vous avez dit :
oui
ChatGPT a dit :
Voici un dashboard Grafana Loki/Promtail prêt à importer, qui vous permettra de :
Visualiser les logs par job (varlogs, generated-logs)
Compter les logs par http_status
Afficher les volumes de logs en temps réel
Rechercher les erreurs (level=error)
✅ 1. Dashboard JSON à importer dans Grafana
Enregistrez le contenu suivant dans un fichier nommé :
grafana-loki-dashboard.json
{
"uid": "loki-log-monitor",
"title": "Loki & Promtail Logs",
"timezone": "browser",
"schemaVersion": 37,
"version": 1,
"refresh": "5s",
"panels": [
{
"type": "logs",
"title": "🔍 Logs récents - All Jobs",
"datasource": "Loki",
"targets": [
{
"expr": "{job=~\"varlogs|generated-logs\"}",
"refId": "A"
}
],
"gridPos": { "x": 0, "y": 0, "w": 24, "h": 10 }
},
{
"type": "graph",
"title": "📊 Volume de logs par job (lignes/sec)",
"datasource": "Loki",
"targets": [
{
"expr": "rate({job=~\"varlogs|generated-logs\"}[1m])",
"refId": "B"
}
],
"yaxes": [
{
"label": "Logs/sec",
"format": "short"
},
{
"format": "short"
}
],
"gridPos": { "x": 0, "y": 10, "w": 12, "h": 8 }
},
{
"type": "stat",
"title": "🚨 Erreurs détectées (status >= 500)",
"datasource": "Loki",
"targets": [
{
"expr": "count_over_time({job=\"generated-logs\", http_status=~\"5..\"}[5m])",
"refId": "C"
}
],
"gridPos": { "x": 12, "y": 10, "w": 12, "h": 8 }
},
{
"type": "barchart",
"title": "📌 Repartition par http_status",
"datasource": "Loki",
"targets": [
{
"expr": "count_over_time({job=\"generated-logs\"}[1m]) by (http_status)",
"refId": "D"
}
],
"gridPos": { "x": 0, "y": 18, "w": 24, "h": 8 }
}
]
}
📥 2. Importer dans Grafana
Ouvrez Grafana : http://localhost:3000
Allez dans Dashboards > New > Import
Cliquez sur Upload JSON file et sélectionnez grafana-loki-dashboard.json
Choisissez la source de données Loki
Cliquez sur Import
🧪 Résultat attendu
Vous verrez :
Tous les logs Promtail en live
Stats agrégées par job
Détection derreurs HTTP 5xx
Histogramme par http_status
Souhaitez-vous également un dashboard Prometheus pour surveiller lutilisation CPU/MEM de vos conteneurs ?
Vous avez dit :
oui
ChatGPT a dit :
Parfait! Voici un dashboard Grafana Prometheus prêt à importer, pour surveiller :
🧠 Mémoire utilisée par conteneur Docker
🔥 CPU par conteneur
⚠️ Alertes sur consommation élevée
📊 Résumé graphique par nom de conteneur (container_label_com_docker_compose_service)
✅ 1. Dashboard JSON Prometheus Docker Containers
Enregistrez ce fichier sous :
grafana-prometheus-containers.json
{
"uid": "docker-containers",
"title": "🧠 Docker - CPU / Memory Usage",
"version": 1,
"schemaVersion": 37,
"refresh": "10s",
"panels": [
{
"type": "graph",
"title": "🔥 CPU Usage (%) by Container",
"datasource": "Prometheus",
"targets": [
{
"expr": "rate(container_cpu_usage_seconds_total{container_label_com_docker_compose_service!=\"\", image!=\"\"}[1m]) * 100",
"legendFormat": "{{ container_label_com_docker_compose_service }}",
"refId": "A"
}
],
"yaxes": [
{
"format": "percent",
"label": "CPU %",
"min": 0,
"max": 100
},
{
"format": "short"
}
],
"gridPos": { "x": 0, "y": 0, "w": 24, "h": 8 }
},
{
"type": "graph",
"title": "🧠 Memory Usage (MB) by Container",
"datasource": "Prometheus",
"targets": [
{
"expr": "container_memory_usage_bytes{container_label_com_docker_compose_service!=\"\", image!=\"\"} / 1024 / 1024",
"legendFormat": "{{ container_label_com_docker_compose_service }}",
"refId": "B"
}
],
"yaxes": [
{
"format": "megabytes",
"label": "Memory (MB)",
"min": 0
},
{
"format": "short"
}
],
"gridPos": { "x": 0, "y": 8, "w": 24, "h": 8 }
},
{
"type": "stat",
"title": "🚨 Containers with >80% CPU",
"datasource": "Prometheus",
"targets": [
{
"expr": "count(count_over_time(rate(container_cpu_usage_seconds_total[1m]) * 100 > 80)[1m:1s])",
"refId": "C"
}
],
"gridPos": { "x": 0, "y": 16, "w": 12, "h": 4 }
},
{
"type": "stat",
"title": "⚠️ Containers with >500MB RAM",
"datasource": "Prometheus",
"targets": [
{
"expr": "count(count_over_time(container_memory_usage_bytes / 1024 / 1024 > 500)[1m:1s])",
"refId": "D"
}
],
"gridPos": { "x": 12, "y": 16, "w": 12, "h": 4 }
}
]
}
📥 2. Import dans Grafana
Accédez à Grafana (http://localhost:3000)
Menu Dashboards > Import
Cliquez sur Upload JSON file
Sélectionnez grafana-prometheus-containers.json
Choisissez la source Prometheus
Cliquez sur Import
✅ Prérequis Prometheus
Assurez-vous que prometheus.yml scrape bien les métriques Docker via cAdvisor ou le Node Exporter.
Exemple pour Node Exporter :
scrape_configs:
- job_name: 'node'
static_configs:
- targets: ['node-exporter:9100']
Pour cAdvisor (optionnel mais utile) :
- job_name: 'cadvisor'
static_configs:
- targets: ['cadvisor:8080']

View File

@ -195,6 +195,7 @@ docker exec -it gmo-loki-proxy-1 /bin/bash -c "curl -H \"Accept: application/jso
- 1860 - Node Exporter Full
- 3662 - Prometheus 2.0 Overview
- 13502 - MinIO Dashboard
- 13639 Loki - Log analysis
### [1.0.0] - 2025-06-08
#### Added

View File

@ -23,7 +23,7 @@ scrape_configs:
- localhost
labels:
job: generated-logs
__path__: /tmp/loki-gen-log/generated-logs.txt
__path__: /tmp/generated-logs.txt
pipeline_stages:
- json:
expressions:

View File

@ -1,3 +1,8 @@
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
server {
listen 8080 default_server;
server_name _;
@ -26,11 +31,23 @@ server {
}
#------- GRAFANA -------
location /grafana/ {
proxy_pass http://grafana:3000/;
location /grafana {
proxy_set_header Host $host;
proxy_pass http://grafana:3000/;
rewrite ^/grafana/(.*) /$1 break;
}
# Proxy Grafana Live WebSocket connections.
location /grafana/api/live/ {
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Host $host;
proxy_pass http://grafana:3000;
rewrite ^/grafana/(.*) /$1 break;
}
#------- MINIO UI -------
location /minio/ui/ {
rewrite ^/minio/ui/(.*) /$1 break;

View File

@ -128,13 +128,25 @@ storage_config:
insecure: true
s3forcepathstyle: true
compactor:
working_directory: /loki/compactor
#shared_store: s3
#compactor:
# working_directory: /loki/compactor
# #shared_store: s3
#limits_config:
# reject_old_samples: true
# reject_old_samples_max_age: 168h
limits_config:
max_cache_freshness_per_query: '10m'
reject_old_samples: true
reject_old_samples_max_age: 168h
reject_old_samples_max_age: 30m
ingestion_rate_mb: 10
ingestion_burst_size_mb: 20
# parallelize queries in 15min intervals
split_queries_by_interval: 15m
volume_enabled: true
#chunk_store_config:
# max_look_back_period: 0s
@ -143,6 +155,7 @@ table_manager:
retention_deletes_enabled: true
retention_period: 336h # 14 jours
ruler:
storage:
type: local
@ -154,3 +167,24 @@ ruler:
kvstore:
store: memberlist
enable_api: true
query_range:
# make queries more cache-able by aligning them with their step intervals
align_queries_with_step: true
max_retries: 5
parallelise_shardable_queries: true
cache_results: true
frontend:
log_queries_longer_than: 5s
compress_responses: true
max_outstanding_per_tenant: 2048
query_scheduler:
max_outstanding_requests_per_tenant: 1024
querier:
query_ingesters_within: 2h
compactor:
working_directory: /tmp/compactor

View File

@ -24,13 +24,13 @@ services:
- loki
#------- container with tools dns (nslookup, tcptraceroute, etc). Use just for debug
# dns-tools:
# image: jonlabelle/network-tools
# command: tail -f /dev/null
# stdin_open: true
# tty: true
# networks:
# - loki
dns-tools:
image: jonlabelle/network-tools
command: tail -f /dev/null
stdin_open: true
tty: true
networks:
- loki
#-------proxy (nginx)
proxy:
@ -134,33 +134,57 @@ services:
restart: unless-stopped
networks:
- loki
deploy:
resources:
limits:
cpus: '0.50'
memory: 256M
reservations:
cpus: '0.25'
memory: 128M
#------- promtail
promtail:
image: grafana/promtail:latest
volumes:
- ${PROMTAIL_CONF_DIR}/config.yml:/etc/promtail/config.yml
- ${LOKI_GEN_LOG_DIR}:/tmp/
- ${LOKI_GEN_LOG_DIR}:/tmp
- /var/log/:/var/log/
restart: unless-stopped
networks:
- loki
deploy:
resources:
limits:
cpus: '0.50'
memory: 256M
reservations:
cpus: '0.25'
memory: 128M
#------- flog generated log
log-generator:
image: mingrammer/flog
#image: gmouchet/flog
image: ${FLOG_DOCKER_IMG}
command:
- --loop
- --format=json
- --number=10 # number of log lines to generate per second
- --delay=100ms # delay between log lines
- --number=5 # number of log lines to generate per second (delay=1000ms)
- --delay=1000ms # delay between log lines
#- --output=/var/log/generated-logs.txt
- --output=/tmp/loki-gen-log/generated-logs.txt
- --output=/tmp/generated-logs.txt
- --overwrite
- --type=log
#- --bytes=50000
volumes:
- ${LOKI_GEN_LOG_DIR}:/tmp/
deploy:
resources:
limits:
cpus: '0.50'
memory: 256M
reservations:
cpus: '0.25'
memory: 128M
#volumes:
# grafana-storage:
# prometheus:

113
manage.sh Executable file
View File

@ -0,0 +1,113 @@
#!/bin/bash
############################################################
# Decription: Script to manage stack docker
#
# Author: Gilles Mouchet (gilles.mouchet@gmail.com)
# Creation Date: 12-06-2025
# Version: 1.0
# Install: If applicable - Install instruction or
# see README.md
# Usage: If applicable - usage or see README.md
# Changelog:
# V1.0 - dd-Mmm-2025 - GMo
# Added
# - Creation of script from scratch
#
############################################################
#-----------------------------------------------------------------
# Doesn't change anything from here
#-----------------------------------------------------------------
version="v1.0-rc1"
progName="./$(/bin/basename $0)"
containers=(minio grafana proxy titi)
#-----------------------------------------------------------------
# Functions
#-----------------------------------------------------------------
# display help
function print_usage {
# print
/bin/cat << EOF
Usage: $progName [options]
Options:
create Create container
start Start stack '${COMPOSE_PROJECT_NAME}'
stop Stop stack '${COMPOSE_PROJECT_NAME}'
down Stop stack '${COMPOSE_PROJECT_NAME}' and delete containers
restart Restart stack '${COMPOSE_PROJECT_NAME}'
version,-v,--version Display script version
help,-h,--help Display this help
Examples:
To start stack '${COMPOSE_PROJECT_NAME}'
$progName start
To stop stack '${COMPOSE_PROJECT_NAME}'
$progName stop
To create a new stack
$progName create
$progName start
EOF
}
# read .env if exist
if [ ! -f .env ]; then
echo "file '.env' doesn't exist!"
echo "See README.md"
exit
fi
. .env
#if [ ! -z "$1" ]; then
#print_usage
# exit 0
#fi
case "$1" in
create)
docker compose create
;;
start)
for cont_item in "${containers[@]}"; do
echo "Start container $cont_item"
docker compose up -d $cont_item --remove-orphans
echo $?
done
;;
down)
docker compose down
;;
-h|--help|help)
print_usage
exit 0
;;
-v|--version|version)
echo $version
;;
*)
#echo "${progName}: invalid option -- '$1'!"
#echo -e "Try '$progName help' for more information.\n"
print_usage
exit 0
;;
esac
#!/bin/bash
# Couleurs
VERT="\e[32m"
ROUGE="\e[31m"
NORMAL="\e[0m"
## Message initial
#echo "Lancement du conteneur..."
#
## Exécution de la commande
#if docker compose start toto > /dev/null 2>&1; then
#echo -e "Lancement du conteneur : ${VERT}OK${NORMAL}"
#else
#echo -e "Lancement du conteneur : ${ROUGE}notOK${NORMAL}"
#fi

View File

@ -1,5 +1,8 @@
#!/bin/bash
containers=(minio grafana proxy)
containers_no_start=(dns-tools minio-cli)
if [ ! -f .env ]; then
echo "file '.env' doesn't exist!"
echo "See README.md"
@ -29,7 +32,7 @@ COMPOSE_PROJECT_NAME = ${COMPOSE_PROJECT_NAME}
LOKI_GEN_LOG_DIR = ${LOKI_GEN_LOG_DIR}
PROMTAIL_CONF_DIR = ${PROMTAIL_CONF_DIR}
EOF
echo "create folders"
sudo mkdir -p ${PRX_CERTS_DIR}
sudo mkdir -p ${PRX_NGINX_CONF_DIR}
sudo mkdir -p ${MINIO_DATA_ROOT_DIR}
@ -41,10 +44,22 @@ sudo mkdir -p ${LOKI_CONF_DIR}
sudo mkdir -p ${LOKI_DATA_DIR}
sudo mkdir -p ${LOKI_GEN_LOG_DIR}
sudo mkdir -p ${PROMTAIL_CONF_DIR}
echo "down"
docker compose down
echo "copy config file"
sudo cp ./config/default.conf ${PRX_NGINX_CONF_DIR}/.
sudo cp ./config/prometheus.yml ${PROM_CONF_DIR}/.
sudo cp ./config/local-config.yaml ${LOKI_CONF_DIR}/.
sudo cp ./config/config.yml ${PROMTAIL_CONF_DIR}/.
docker compose up -d --remove-orphans
#docker compose up -d --remove-orphans
echo "create container"
docker compose create
for cont_item in "${containers[@]}"; do
echo "Start container $cont_item"
docker compose up -d $cont_item --remove-orphans
done
#docker compose stop minio-client
#docker compose stop log-generator