Update common/Jenkinsfile.k8s.j2

This commit is contained in:
2026-03-19 13:22:56 +09:00
parent d3ea978375
commit 3e63e99809

View File

@@ -5,66 +5,69 @@ pipeline {
parameters { parameters {
string(name: 'BRANCH_NAME', defaultValue: '', description: '빌드할 브랜치명 (비우면 main)') string(name: 'BRANCH_NAME', defaultValue: '', description: '빌드할 브랜치명 (비우면 main)')
string(name: 'GIT_COMMIT_HASH', defaultValue: '', description: '빌드할 커밋 해시(비우면 최신 커밋 기준)') string(name: 'GIT_COMMIT_HASH', defaultValue: '', description: '빌드할 커밋 해시 (비우면 최신 커밋)')
} }
environment { environment {
// 고정 값(노드 생성 시점에 Jinja로 박힘)
NODE_ID = '{{ node_id }}' NODE_ID = '{{ node_id }}'
NODE_TYPE = '{{ node_type }}' NODE_TYPE = '{{ node_type }}'
NAMESPACE = '{{ namespace }}' NAMESPACE = '{{ namespace }}'
REGISTRY = '{{ regi }}' REGISTRY = '{{ regi }}'
platform_namespace = '{{ platform_namespace }}' PLATFORM_NAMESPACE = '{{ platform_namespace }}'
// ✅ imagePullSecret 이름도 "메소드에서 전달"되도록 Jinja로 박힘
// (create_node_project_files(extra_context={"image_pull_secret":"..."}) 로 전달)
IMAGE_PULL_SECRET = '{{ image_pull_secret | default("acr-pull") }}' IMAGE_PULL_SECRET = '{{ image_pull_secret | default("acr-pull") }}'
} }
stages { stages {
// ──────────────────────────────────────────────
// Stage 1. Checkout
// - 브랜치 / 커밋 해시 기준으로 소스코드 체크아웃
// - 이미지 경로 / release 이름 등 이후 stage 에서 사용할 환경변수 세팅
// ──────────────────────────────────────────────
stage('Checkout') { stage('Checkout') {
steps { steps {
container('jnlp') { container('jnlp') {
sh '''
set -e
git fetch --all || true
'''
script { script {
def checkoutBranch = params.BRANCH_NAME?.trim() ? params.BRANCH_NAME.trim() : 'main' sh 'git fetch --all || true'
def checkoutBranch = params.BRANCH_NAME?.trim() ?: 'main'
def commitHash = params.GIT_COMMIT_HASH?.trim() def commitHash = params.GIT_COMMIT_HASH?.trim()
if (commitHash) { if (commitHash) {
sh """ sh "git fetch origin ${checkoutBranch}"
git fetch origin ${checkoutBranch} sh "git checkout ${checkoutBranch}"
git checkout ${checkoutBranch} sh "git checkout ${commitHash}"
git checkout ${commitHash}
"""
} else { } else {
sh """ sh "git fetch origin ${checkoutBranch}"
git fetch origin ${checkoutBranch} sh "git checkout ${checkoutBranch}"
git checkout ${checkoutBranch}
"""
commitHash = sh(script: "git rev-parse --short HEAD", returnStdout: true).trim() commitHash = sh(script: "git rev-parse --short HEAD", returnStdout: true).trim()
} }
env.BRANCH_NAME = checkoutBranch env.BRANCH_NAME = checkoutBranch
env.GIT_COMMIT_HASH = commitHash env.GIT_COMMIT_HASH = commitHash
env.RELEASE_NAME = "node-${NODE_ID}-${checkoutBranch}"
env.RELEASE_NAME = "node-${NODE_ID}-${env.BRANCH_NAME}" env.IMAGE_REPO = "${REGISTRY}/node.${NODE_ID}.${checkoutBranch}"
env.IMAGE_REPO = "${REGISTRY}/node.${NODE_ID}.${env.BRANCH_NAME}" env.IMAGE_TAG = commitHash
env.IMAGE_TAG = env.GIT_COMMIT_HASH
env.FULL_IMAGE = "${env.IMAGE_REPO}:${env.IMAGE_TAG}" env.FULL_IMAGE = "${env.IMAGE_REPO}:${env.IMAGE_TAG}"
echo "NODE_ID=${NODE_ID}"
echo "BRANCH_NAME=${env.BRANCH_NAME}"
echo "GIT_COMMIT_HASH=${env.GIT_COMMIT_HASH}"
echo "RELEASE_NAME=${env.RELEASE_NAME}" echo "RELEASE_NAME=${env.RELEASE_NAME}"
echo "FULL_IMAGE=${env.FULL_IMAGE}" echo "FULL_IMAGE=${env.FULL_IMAGE}"
echo "IMAGE_PULL_SECRET=${env.IMAGE_PULL_SECRET}" echo "IMAGE_PULL_SECRET=${IMAGE_PULL_SECRET}"
} }
} }
} }
} }
stage('Build Image (Kaniko)') { // ──────────────────────────────────────────────
// Stage 2. Build
// - Kaniko 로 Docker 이미지 빌드 후 레지스트리에 푸시
// - 실패 시 로그에 pip install / SyntaxError 등 원인 출력
// - retry(3): 레지스트리 일시적 네트워크 오류 대비
// ──────────────────────────────────────────────
stage('Build') {
steps { steps {
container('kaniko') { container('kaniko') {
retry(3) { retry(3) {
@@ -80,21 +83,33 @@ pipeline {
} }
} }
} }
post {
failure {
echo '[Build] ★ BUILD FAILED ★'
echo '[Build] Kaniko 빌드 실패. 위 로그에서 원인을 확인하세요.'
echo '[Build] 주요 원인: Dockerfile 오류 / pip install 실패 / SyntaxError / ModuleNotFoundError'
}
}
} }
stage('Deploy (Manifests)') { // ──────────────────────────────────────────────
// Stage 3. Deploy
// - K8s manifest 렌더링 후 kubectl apply
// - imagePullSecret 없으면 platform 네임스페이스에서 복제
// - rollout status 로 배포 완료 대기 (최대 10분)
// - spec 변경 없으면 rollout restart 실행
// ──────────────────────────────────────────────
stage('Deploy') {
steps { steps {
container('tooling') { container('tooling') {
sh ''' sh '''
set -euo pipefail set -euo pipefail
# ✅ B) imagePullSecret 복제 (platform -> ${NAMESPACE}) # imagePullSecret 복제 (platform NAMESPACE)
# secret 이름도 IMAGE_PULL_SECRET로 통일
if [ -n "${IMAGE_PULL_SECRET}" ] && ! kubectl -n "${NAMESPACE}" get secret "${IMAGE_PULL_SECRET}" >/dev/null 2>&1; then if [ -n "${IMAGE_PULL_SECRET}" ] && ! kubectl -n "${NAMESPACE}" get secret "${IMAGE_PULL_SECRET}" >/dev/null 2>&1; then
echo "[INFO] Copying imagePullSecret '${IMAGE_PULL_SECRET}' platform -> ${NAMESPACE} ..." echo "[Deploy] imagePullSecret '${IMAGE_PULL_SECRET}' 복제 중 (platform ${NAMESPACE})"
{% raw %} {% raw %}
kubectl -n "${platform_namespace}" get secret "${IMAGE_PULL_SECRET}" -o go-template='{{ index .data ".dockerconfigjson" }}' > .dockercfg.b64 kubectl -n "${PLATFORM_NAMESPACE}" get secret "${IMAGE_PULL_SECRET}" -o go-template='{{ index .data ".dockerconfigjson" }}' > .dockercfg.b64
{% endraw %} {% endraw %}
base64 -d .dockercfg.b64 > .dockerconfigjson base64 -d .dockercfg.b64 > .dockerconfigjson
kubectl -n "${NAMESPACE}" create secret generic "${IMAGE_PULL_SECRET}" \ kubectl -n "${NAMESPACE}" create secret generic "${IMAGE_PULL_SECRET}" \
@@ -103,62 +118,206 @@ pipeline {
rm -f .dockercfg.b64 .dockerconfigjson rm -f .dockercfg.b64 .dockerconfigjson
fi fi
# ✅ C) 배포 전 Deployment generation 저장 # 배포 전 현재 generation 저장 (spec 변경 감지용)
PREV_GEN="" PREV_GEN=""
if kubectl -n "${NAMESPACE}" get deploy "${RELEASE_NAME}" >/dev/null 2>&1; then if kubectl -n "${NAMESPACE}" get deploy "${RELEASE_NAME}" >/dev/null 2>&1; then
PREV_GEN="$(kubectl -n "${NAMESPACE}" get deploy "${RELEASE_NAME}" -o jsonpath='{.metadata.generation}' 2>/dev/null || true)" PREV_GEN="$(kubectl -n "${NAMESPACE}" get deploy "${RELEASE_NAME}" -o jsonpath='{.metadata.generation}' 2>/dev/null || true)"
fi fi
echo "[INFO] PREV_GEN=${PREV_GEN:-<none>} deploy=${RELEASE_NAME}" echo "[Deploy] PREV_GEN=${PREV_GEN:-<none>}"
# ✅ D) repo의 manifests 템플릿을 렌더링 후 apply # manifest 렌더링 (envsubst 로 배포마다 바뀌는 값 주입)
# - k8s/*.yaml 은 Jinja로 이미 한번 렌더된 상태(노드 생성 시점)
# - 여기서는 배포마다 바뀌는 값(RELASE_NAME, FULL_IMAGE, BRANCH_NAME)만 envsubst로 주입
rm -rf .rendered && mkdir -p .rendered rm -rf .rendered && mkdir -p .rendered
# envsubst 치환 대상 화이트리스트
export RELEASE_NAME FULL_IMAGE BRANCH_NAME export RELEASE_NAME FULL_IMAGE BRANCH_NAME
for f in k8s/*.yaml; do for f in k8s/*.yaml; do
echo "[INFO] render $f" echo "[Deploy] rendering $f"
envsubst '${RELEASE_NAME} ${FULL_IMAGE} ${BRANCH_NAME}' < "$f" > ".rendered/$(basename "$f")" envsubst '${RELEASE_NAME} ${FULL_IMAGE} ${BRANCH_NAME}' < "$f" > ".rendered/$(basename "$f")"
done done
echo "---- Rendered files ----" echo "[Deploy] --- Rendered files ---"
ls -al .rendered ls -al .rendered
kubectl -n "${NAMESPACE}" apply -f .rendered/ kubectl -n "${NAMESPACE}" apply -f .rendered/
# ✅ E) 롤아웃 대기(Deployment가 이 이름으로 생성된다는 전제) # rollout 완료 대기 (최대 10분)
echo "[Deploy] rollout status 대기 중..."
kubectl -n "${NAMESPACE}" rollout status deploy "${RELEASE_NAME}" --timeout=10m kubectl -n "${NAMESPACE}" rollout status deploy "${RELEASE_NAME}" --timeout=10m
# ✅ F) generation 비교 → 안 바뀌면 rollout restart # spec 변경 없으면 rollout restart
POST_GEN="$(kubectl -n "${NAMESPACE}" get deploy "${RELEASE_NAME}" -o jsonpath='{.metadata.generation}' 2>/dev/null || true)" POST_GEN="$(kubectl -n "${NAMESPACE}" get deploy "${RELEASE_NAME}" -o jsonpath='{.metadata.generation}' 2>/dev/null || true)"
echo "[INFO] POST_GEN=${POST_GEN:-<none>} deploy=${RELEASE_NAME}" echo "[Deploy] POST_GEN=${POST_GEN:-<none>}"
if [ -n "${PREV_GEN}" ] && [ -n "${POST_GEN}" ] && [ "${PREV_GEN}" = "${POST_GEN}" ]; then if [ -n "${PREV_GEN}" ] && [ -n "${POST_GEN}" ] && [ "${PREV_GEN}" = "${POST_GEN}" ]; then
echo "[INFO] Deployment spec unchanged (generation=${POST_GEN}). Running rollout restart..." echo "[Deploy] Spec 변경 없음 (generation=${POST_GEN}). rollout restart 실행"
kubectl -n "${NAMESPACE}" rollout restart deploy "${RELEASE_NAME}" kubectl -n "${NAMESPACE}" rollout restart deploy "${RELEASE_NAME}"
kubectl -n "${NAMESPACE}" rollout status deploy "${RELEASE_NAME}" --timeout=10m kubectl -n "${NAMESPACE}" rollout status deploy "${RELEASE_NAME}" --timeout=10m
else else
echo "[INFO] Deployment spec changed (or first install). Skip rollout restart." echo "[Deploy] Spec 변경 감지 또는 최초 배포. rollout restart skip"
fi fi
echo '--- STATUS ---' echo "[Deploy] --- 배포 후 리소스 상태 ---"
kubectl -n "${NAMESPACE}" get deploy,po,svc -l watcher.nodeId="${NODE_ID}",watcher.role="${NODE_TYPE}" || true kubectl -n "${NAMESPACE}" get deploy,po,svc \
-l watcher.nodeId="${NODE_ID}",watcher.role="${NODE_TYPE}" || true
'''
}
}
post {
failure {
container('tooling') {
echo '[Deploy] ★ DEPLOY FAILED ★'
sh '''
echo "[Deploy] --- Deployment 상태 ---"
kubectl -n "${NAMESPACE}" describe deploy "${RELEASE_NAME}" || true
echo "[Deploy] --- Pod 이벤트 ---"
kubectl -n "${NAMESPACE}" get events \
--field-selector involvedObject.name="${RELEASE_NAME}" \
--sort-by='.lastTimestamp' || true
''' '''
} }
} }
} }
} }
// ──────────────────────────────────────────────
// Stage 4. Verify
// - rollout 완료 후 Pod 실제 정상 여부 최종 검증
// - Ready condition + restartCount 확인
// - CrashLoopBackOff 즉시 감지
// - 성공 시: Pod 기동 로그 첫 30줄 출력
// - 실패 시: Pod 로그 마지막 50줄 + describe 출력 → Poller 가 수집
// ──────────────────────────────────────────────
stage('Verify') {
steps {
container('tooling') {
sh '''
set -euo pipefail
MAX_WAIT=120
INTERVAL=5
ELAPSED=0
echo "[Verify] Pod 상태 확인 시작 (최대 ${MAX_WAIT}s)"
while [ $ELAPSED -lt $MAX_WAIT ]; do
POD_NAME="$(kubectl -n "${NAMESPACE}" get po \
-l app.kubernetes.io/name="${RELEASE_NAME}" \
--sort-by=.metadata.creationTimestamp \
-o jsonpath='{.items[-1].metadata.name}' 2>/dev/null || true)"
if [ -z "$POD_NAME" ]; then
echo "[Verify] elapsed=${ELAPSED}s | Pod 아직 없음, 대기 중..."
sleep $INTERVAL
ELAPSED=$((ELAPSED + INTERVAL))
continue
fi
PHASE="$(kubectl -n "${NAMESPACE}" get po "${POD_NAME}" \
-o jsonpath='{.status.phase}' 2>/dev/null || true)"
READY="$(kubectl -n "${NAMESPACE}" get po "${POD_NAME}" \
-o jsonpath='{.status.conditions[?(@.type=="Ready")].status}' 2>/dev/null || true)"
RESTART_COUNT="$(kubectl -n "${NAMESPACE}" get po "${POD_NAME}" \
-o jsonpath='{.status.containerStatuses[0].restartCount}' 2>/dev/null || echo '0')"
WAITING_REASON="$(kubectl -n "${NAMESPACE}" get po "${POD_NAME}" \
-o jsonpath='{.status.containerStatuses[0].state.waiting.reason}' 2>/dev/null || true)"
echo "[Verify] elapsed=${ELAPSED}s | Pod=${POD_NAME} | Phase=${PHASE} | Ready=${READY} | Restarts=${RESTART_COUNT} | WaitReason=${WAITING_REASON}"
# CrashLoopBackOff 즉시 실패 처리
if [ "${WAITING_REASON}" = "CrashLoopBackOff" ]; then
echo "[Verify] ★ VERIFY FAILED ★ CrashLoopBackOff 감지"
echo "[Verify] --- Pod 로그 (마지막 50줄) ---"
kubectl -n "${NAMESPACE}" logs "${POD_NAME}" --tail=50 || true
echo "[Verify] --- 이전 컨테이너 로그 ---"
kubectl -n "${NAMESPACE}" logs "${POD_NAME}" --previous --tail=50 || true
echo "[Verify] --- Pod describe ---"
kubectl -n "${NAMESPACE}" describe po "${POD_NAME}" || true
exit 1
fi
# Phase Failed 즉시 실패 처리
if [ "${PHASE}" = "Failed" ]; then
echo "[Verify] ★ VERIFY FAILED ★ Pod Phase=Failed"
echo "[Verify] --- Pod 로그 (마지막 50줄) ---"
kubectl -n "${NAMESPACE}" logs "${POD_NAME}" --tail=50 || true
echo "[Verify] --- Pod describe ---"
kubectl -n "${NAMESPACE}" describe po "${POD_NAME}" || true
exit 1
fi
# restartCount > 0 이면 불안정 판정
if [ "${RESTART_COUNT}" -gt 0 ]; then
echo "[Verify] ★ VERIFY FAILED ★ 재시작 감지 (restartCount=${RESTART_COUNT})"
echo "[Verify] --- Pod 로그 (마지막 50줄) ---"
kubectl -n "${NAMESPACE}" logs "${POD_NAME}" --tail=50 || true
echo "[Verify] --- 이전 컨테이너 로그 ---"
kubectl -n "${NAMESPACE}" logs "${POD_NAME}" --previous --tail=50 || true
echo "[Verify] --- Pod describe ---"
kubectl -n "${NAMESPACE}" describe po "${POD_NAME}" || true
exit 1
fi
# Ready=True 확인 → 최종 성공
if [ "${READY}" = "True" ]; then
echo "[Verify] ✅ VERIFY SUCCESS"
echo "[Verify] Pod=${POD_NAME} | Phase=${PHASE} | Ready=${READY} | Restarts=${RESTART_COUNT}"
echo "[Verify] --- Pod 기동 로그 (첫 30줄) ---"
kubectl -n "${NAMESPACE}" logs "${POD_NAME}" | head -30 || true
echo "[Verify] --- 최종 리소스 상태 ---"
kubectl -n "${NAMESPACE}" get deploy,po,svc \
-l watcher.nodeId="${NODE_ID}",watcher.role="${NODE_TYPE}" || true
exit 0
fi
sleep $INTERVAL
ELAPSED=$((ELAPSED + INTERVAL))
done
# 타임아웃
echo "[Verify] ★ VERIFY FAILED ★ 타임아웃 (${MAX_WAIT}s 초과)"
echo "[Verify] --- Pod 로그 (마지막 50줄) ---"
kubectl -n "${NAMESPACE}" logs "${POD_NAME}" --tail=50 || true
echo "[Verify] --- Pod describe ---"
kubectl -n "${NAMESPACE}" describe po "${POD_NAME}" || true
exit 1
'''
}
}
post { post {
failure {
container('tooling') {
echo '[Verify] ★ VERIFY FAILED - post ★'
sh '''
echo "[Verify] --- Deployment 최종 상태 ---"
kubectl -n "${NAMESPACE}" get deploy "${RELEASE_NAME}" -o wide || true
echo "[Verify] --- Pod 최종 상태 ---"
kubectl -n "${NAMESPACE}" get po \
-l app.kubernetes.io/name="${RELEASE_NAME}" -o wide || true
'''
}
}
}
}
}
post {
success {
echo '✅ DEPLOY SUCCESS'
echo "NODE_ID=${NODE_ID} | BRANCH=${env.BRANCH_NAME} | COMMIT=${env.GIT_COMMIT_HASH}"
}
failure {
echo '★ DEPLOY FAILED ★'
echo "NODE_ID=${NODE_ID} | BRANCH=${env.BRANCH_NAME} | COMMIT=${env.GIT_COMMIT_HASH}"
}
always { always {
container('tooling') { container('tooling') {
sh ''' sh '''
set +e echo "--- FINAL SUMMARY ---"
echo '--- FINAL SUMMARY ---'
kubectl -n "${NAMESPACE}" get deploy "${RELEASE_NAME}" -o wide || true kubectl -n "${NAMESPACE}" get deploy "${RELEASE_NAME}" -o wide || true
kubectl -n "${NAMESPACE}" get po -l app.kubernetes.io/name="${RELEASE_NAME}" -o wide || true kubectl -n "${NAMESPACE}" get po \
-l app.kubernetes.io/name="${RELEASE_NAME}" -o wide || true
kubectl -n "${NAMESPACE}" get svc "${RELEASE_NAME}" -o wide || true kubectl -n "${NAMESPACE}" get svc "${RELEASE_NAME}" -o wide || true
''' '''
} }