SSH_KEY_PUBLIC=~/.ssh/id_rsa.pub
SSH_KEY_PRIVATE=~/.ssh/id_rsa
AZURE_RG=test
AZURE_AKS_CLUSTER_NAME=test
AKS_AGENT_PREFIX=sbox
kubectl get nodes --output wide
CLUSTER_RESOURCE_GROUP=$(az aks show \
--resource-group ${AZURE_RG} \
--name ${AZURE_AKS_CLUSTER_NAME} \
--query nodeResourceGroup \
--output tsv)
az vm list \
--resource-group ${CLUSTER_RESOURCE_GROUP} \
--output table
# Also make note of IP address of same node
CLUSTER_NODE= # Pick one from above list and put value here
az vm user update \
--resource-group ${CLUSTER_RESOURCE_GROUP} \
--name ${CLUSTER_NODE} \
--username azureuser \
--ssh-key-value ${SSH_KEY_PUBLIC}
kubectl drain ${CLUSTER_NODE} \
--ignore-daemonsets \
--delete-local-data
# WAIT FOR ALL PODS TO BE READY!
# In new terminal window run....
kubectl run -it --rm aks-ssh \
--image=debian \
--generator=run-pod/v1 \
--labels="app=basshtion"
# Back in the first terminal
kubectl cp ${SSH_KEY_PRIVATE} aks-ssh:/id_rsa
# over to the second terminal window
# which should now be running a debian shell inside aks-ssh
chmod 600 id_rsa && \
apt-get update && \
apt-get install openssh-client -y
ssh -i id_rsa azureuser@<IP address noted above>
# This now drops you into the shell of the node
# You should see something like this
# *** System restart required ***
sudo reboot
# You will be kicked out of the node
# and returned to the debian shell.
# Type ctrl+D to disconnect the shell which will terminate the pod
# Back in the first terminal
# Keep an eye on the KERNEL-VERSION until it changes
watch "kubectl get nodes -o wide"
# Finally uncordon the node
kubectl uncordon ${CLUSTER_NODE}