chore: add current provisioning state before migration

This commit is contained in:
Jesús Pérez 2025-09-22 23:11:41 +01:00
parent a9703b4748
commit 50745b0f22
660 changed files with 88126 additions and 0 deletions

View file

@ -0,0 +1,4 @@
export ROOK_CLUSTER_NAMESPACE=rook-ceph
#kubectl -n $ROOK_CLUSTER_NAMESPACE get deployment -l rook_cluster=$ROOK_CLUSTER_NAMESPACE -o jsonpath='{range .items[*]}{"ceph-version="}{.metadata.labels.ceph-version}{"\n"}{end}' | sort | uniq
kubectl -n $ROOK_CLUSTER_NAMESPACE get deployment -l rook_cluster=$ROOK_CLUSTER_NAMESPACE -o jsonpath='{range .items[*]}{"ceph-version="}{.metadata.labels.ceph-version} {.metadata.name}{"\n"}{end}' | sort

View file

@ -0,0 +1,10 @@
#!/bin/bash
ROOK_CLUSTER_NAMESPACE=rook-ceph
POD_NAME=$(kubectl -n $ROOK_CLUSTER_NAMESPACE get pod -o custom-columns=name:.metadata.name --no-headers | grep rook-ceph-mon-b)
kubectl -n $ROOK_CLUSTER_NAMESPACE get pod ${POD_NAME} -o jsonpath='{.spec.containers[0].image}'
kubectl -n $ROOK_CLUSTER_NAMESPACE get deployments -o jsonpath='{range .items[*]}{.metadata.name}{" \treq/upd/avl: "}{.spec.replicas}{"/"}{.status.updatedReplicas}{"/"}{.status.readyReplicas}{" \trook-version="}{.metadata.labels.rook-version}{"\n"}{end}'
kubectl -n $ROOK_CLUSTER_NAMESPACE get jobs -o jsonpath='{range .items[*]}{.metadata.name}{" \tsucceeded: "}{.status.succeeded}{" \trook-version="}{.metadata.labels.rook-version}{"\n"}{end}'

View file

@ -0,0 +1,3 @@
#!/bin/bash
fgrep " image:" * 2>/dev/null | egrep -v "# " | egrep -v "^_" | grep "/" | awk '{print $1" "$3}' | sort -u

View file

@ -0,0 +1,6 @@
#!/bin/bash
URL="https://quay.io/api/v1/repository/ceph/ceph/tag/?onlyActiveTags=false&limit=10"
TAG=v16
curl -s "$URL" | jq '.tags | sort_by(.last_modified) | reverse | [.[] | select(.name | contains("'$TAG'"))] '

View file

@ -0,0 +1,3 @@
kubectl create -f crds.yaml -f common.yaml -f operator.yaml
kubectl -n rook-ceph get pod

View file

@ -0,0 +1,6 @@
echo '
RUN kubectl get namespace rook-ceph -o json > rook-ceph.json
Remove "finalizers in spec"
RUN: kubectl replace --raw "/api/v1/namespaces/rook-ceph/finalize" -f rook-ceph.json
'

View file

@ -0,0 +1,3 @@
#!/bin/bash
kubectl -n rook-ceph describe pods | grep -e "^Name: " -e "Image: "

View file

@ -0,0 +1,4 @@
ROOK_CLUSTER_NAMESPACE=rook-ceph
NEW_CEPH_IMAGE='quay.io/ceph/ceph:v17.2.6-20230410'
kubectl -n $ROOK_CLUSTER_NAMESPACE patch CephCluster $ROOK_CLUSTER_NAMESPACE --type=merge -p "{\"spec\": {\"cephVersion\": {\"image\": \"$NEW_CEPH_IMAGE\"}}}"

View file

@ -0,0 +1,51 @@
#!/bin/bash
#
# https://quay.io/repository/ceph/ceph?tab=tags
#
#NEW_CEPH_IMAGE="ceph/ceph:v14.2.2-20190722"
#NEW_CEPH_IMAGE="ceph/ceph:v14.2.8-20200305"
#NEW_CEPH_IMAGE="ceph/ceph:v15.2.0-20200324"
#NEW_CEPH_IMAGE="ceph/ceph:v15.2.1-20200410"
#NEW_CEPH_IMAGE="ceph/ceph:v15.2.2-20200519"
#NEW_CEPH_IMAGE="ceph/ceph:v15.2.3-20200530"
#NEW_CEPH_IMAGE="ceph/ceph:v15.2.4-20200630"
#NEW_CEPH_IMAGE="ceph/ceph:v15.2.5-20200916"
##NEW_CEPH_IMAGE="quay.io/ceph/ceph:v16.2.4-20210514"
#NEW_CEPH_IMAGE="quay.io/ceph/ceph:v16.2.5-20210708"
#NEW_CEPH_IMAGE="quay.io/ceph/ceph:v16.2.6-20210926"
#NEW_CEPH_IMAGE="quay.io/ceph/ceph:v16.2.6-20210927"
#NEW_CEPH_IMAGE="quay.io/ceph/ceph:v16.2.7"
#NEW_CEPH_IMAGE="quay.io/ceph/ceph:v16.2.7-20220303"
#NEW_CEPH_IMAGE="quay.io/ceph/ceph:v16.2.7-20220317"
#NEW_CEPH_IMAGE="quay.io/ceph/ceph:v17.1.0-20220317"
# cluster.yaml
# Whether to allow unsupported versions of Ceph. Currently `octopus` and `pacific` are supported.
# Future versions such as `pacific` would require this to be set to `true`.
# Do not set to true in production.
# allowUnsupported: false
# NEW_CEPH_IMAGE="quay.io/ceph/ceph:v16.2.7-20220317"
#NEW_CEPH_IMAGE="quay.io/ceph/ceph:v16.2.10"
#NEW_CEPH_IMAGE="quay.io/ceph/ceph:v17.2.6-20230410"
NEW_CEPH_IMAGE="quay.io/ceph/ceph:v18.2.0-20230912"
export ROOK_SYSTEM_NAMESPACE="rook-ceph-system"
export ROOK_SYSTEM_NAMESPACE="rook-ceph"
export ROOK_NAMESPACE="rook-ceph"
CLUSTER_NAME="$ROOK_NAMESPACE" # change if your cluster name is not the Rook namespace
RUNNER=""
[ "$1" == "-w" ] && RUNNER="watch" && shift
if [ "$1" == "update" ] ; then
[ -z "$RUNNER" ] && RUNNER="watch"
kubectl -n $ROOK_NAMESPACE patch CephCluster $CLUSTER_NAME --type=merge \
-p "{\"spec\": {\"cephVersion\": {\"image\": \"$NEW_CEPH_IMAGE\"}}}"
fi
CMD='kubectl -n $ROOK_NAMESPACE describe pods | grep "Image:.*ceph/ceph" | sort | uniq -c'
#CMD='kubectl -n $ROOK_NAMESPACE describe pods | grep "Image:.*ceph/ceph" '
if [ -z "$RUNNER" ] ; then
eval $CMD
else
$RUNNER $CMD
fi

View file

@ -0,0 +1,2 @@
#kubectl -n rook-ceph-system set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.1.8
#kubectl -n $ROOK_SYSTEM_NAMESPACE set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.0.4

View file

@ -0,0 +1,3 @@
export ROOK_CLUSTER_NAMESPACE=rook-ceph
watch --exec kubectl -n $ROOK_CLUSTER_NAMESPACE get deployments -l rook_cluster=$ROOK_CLUSTER_NAMESPACE -o jsonpath='{range .items[*]}{.metadata.name}{" \treq/upd/avl: "}{.spec.replicas}{"/"}{.status.updatedReplicas}{"/"}{.status.readyReplicas}{" \trook-version="}{.metadata.labels.rook-version}{"\n"}{end}' -o wide

View file

@ -0,0 +1 @@
watch -n 2 "kubectl get pods -n rook-ceph"