Sometime for some automation to apply kubectl you may need a service account based login for running kubectl command. In order to do that you will need the required access for it and relevant ~/kube/config file.
Here is how you can generate one. This is an example for AWS EKS cluster.
wget https://github.com/jayeshmahajan/k8s-utility/blob/master/serviceaccount.sh
#!/bin/bash
#
# run in context of account
# ex. dev
# ./deployer.sh ClusterName CustomUser My_Env
_clustername=$1
_username_=$2
_env_=$3
export ROLE="cluster-admin"
export NS="kube-system"
echo "create service account ${_username_} for env ${_env_}"
kubectl create sa $_username_ -n $NS
echo "Bind SA ${_username_} with ClusterRole ${ROLE} for environment ${_env_}"
kubectl create clusterrolebinding $_username_ \
--serviceaccount=$NS:$_username_ \
--clusterrole=${ROLE}
SECRET_NAME=$(kubectl get sa $_username_ -n $NS -o json | jq -r .secrets[0].name)
TOKEN=$(kubectl get secrets $SECRET_NAME -n $NS -o json | jq -r .data.token | base64 -D)
CA=$(kubectl get secrets $SECRET_NAME -n $NS -o json | jq -r '.data | .["ca.crt"]')
SERVER=$(aws eks describe-cluster --name $_clustername | jq -r .cluster.endpoint)
cat <<-EOF > $_username_-$_env_.yaml
apiVersion: v1
kind: Config
users:
- name: $_username_
user:
token: $TOKEN
clusters:
- cluster:
certificate-authority-data: $CA
server: $SERVER
name: $_username_
contexts:
- context:
cluster: $_username_
user: $_username_
name: $_username_
current-context: $_username_
EOF
echo "Created kubeconfig $_username_-$_env_.yaml"
sh +x serviceaccount.sh ClusterName ServiceAccount Environment
kubectl get nodes –kubeconfig ServiceAccount_Environment.yaml # replace yaml file with the one thats generate as part of output.