This project is a product by team Enigma for the "COS301 - Software Engineering" (University of Pretoria) 2020 Capstone Project Assignment.
- For more information about the team, check out our website!
- For our other related project modules, see the following:
Note: This repository serves to hold configuration files only and as such does not contain all of the information regarding our project and its progress, to view more about our project please navigate to either our API or Mobile repository.
A centralized repository for the Truckin-IT Docker images and Kubernetes cluster configuration files
Please be aware that this repository is being mirrored from GitLab → GitHub. As a result, if you are viewing this repository on GitHub, we suggest that you rather navigate to the GitLab repository.
For more about us, check out our website!
This repository contains the yaml configuration files and documentation required to deploy and maintain the Truckin-IT backend system on a Kubernetes (k8s) cluster.
The main purpose of this document is to provide detailed instructions on how to setup a k8s cluster which can then be used to host the Truckin-IT backend system.
Note: The steps found in this documentation make a number of assumptions (such as using a debian based image for the cluster nodes) for the sake of simplicity. This by no means implies that the system cannot be deployed in a different manner.
Note: All commands should be run on the control-plane/master node unless otherwise stipulated
Kubeadm is used to create the Kubernetes (k8s) cluster which runs all the microservices used in the system.
- Update the system
sudo apt update && sudo apt upgrade -y
- Install initial apt packages
sudo apt install -y git nmap zsh apt-transport-https curl gnupg2 docker.io vim
- Install oh-my-zsh
sh -c "$(wget https://raw.github.com/ohmyzsh/ohmyzsh/master/tools/install.sh -O -)"
chsh -s /bin/zsh
- Enable bridged network traffic
sudo modprobe br_netfilter
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sudo sysctl --system
- Enable Docker
sudo systemctl enable --now docker
- Install Kubeadm
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb https://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt update
sudo apt install -y kubelet kubeadm kubectl
sudo apt-mark hold kubelet kubeadm kubectl
- Restart Kubelet
sudo systemctl daemon-reload
sudo systemctl restart kubelet
- Create cluster
kubeadm init
- Copy kube config
# Don't just copy if your user already has a .kube/config setup
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# The details of this .kube/config need to be copied to all machines that will be used to
# develop and deploy on
- Example Kube Config with multiple clusters (kubectl use-context to change)
apiVersion: v1
clusters:
- name: cluster-truckinit
cluster:
certificate-authority-data: LS0<REDACTED>
server: https://<REDACTED>
- name: cluster-local
cluster:
certificate-authority-data: LS0<REDACTED>
server: https://127.0.0.1:16443
contexts:
- name: context-truckinit
context:
cluster: cluster-truckinit
user: user-truckinit
- name: context-local
context:
cluster: cluster-local
user: user-local
users:
- name: user-truckinit
user:
auth-provider:
config:
access-token: ya29<REDACTED>
cmd-args: config config-helper --format=json
cmd-path: /usr/lib/google-cloud-sdk/bin/gcloud
expiry: "2020-06-16T13:52:11Z"
expiry-key: '{.credential.token_expiry}'
token-key: '{.credential.access_token}'
name: gcp
- name: user-local
user:
username: admin
password: N1R<REDACTED>
current-context: context-truckinit
kind: Config
preferences: {}
- Install a Pod Network
# Required for communication between pods.
# Cluster DNS (CoreDNS) will not startup before this network is installed
# Using Calico (recommended by Kubeadm)
kubectl apply -f https://docs.projectcalico.org/v3.14/manifests/calico.yaml
# Verify if installation is succesful by checking if CoreDNS has started and is running
kubectl get pods --all-namespaces
- Enable scheduling on control-plane node (optional if deploying a multi-node cluster)
# Used to allow pods to be scheduled to current master/control-plane node
kubectl taint nodes --all node-role.kubernetes.io/master-
- Install Helm package manager
wget https://get.helm.sh/helm-canary-linux-amd64.tar.gz
mkdir tmp-helm
mv helm-*.tar.gz tmp-helm/
cd tmp-helm
tar xvf helm-*.tar.gz
sudo mv linux-amd64/helm /usr/local/bin/
cd ../
rm -rf tmp-helm
- Allow communication in firewall for required ports
# Control-Plane Node(s)
# Port Range Purpose Used By
# 6443 Kubernetes API Server All
# 2379-2380 Etcd Server Client API kube-apiserver, etcd
# 10250 Kubelet API Self, Control Plane
# 10251 Kube-Scheduler Self (no need to expose)
# 10252 Kube-Controller-Manager Self (no need to expose)
# Worker Node(s) (including control-plane node if untainted)
# Port Range Purpose Used By
# 10250 Kubelet API Self, Control Plane
# 30000-32767 NodePort Services All
- Add nodes to cluster
# Follow step 1, 2, 3, 4, 10 (on new node)
# Get join command (on control-plane node)
kubeadm token create --print-join-command
# Join node to cluster using command above (on new node)
- Install MetalLB
# MetalLB is used to create bare metal load-balancers
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.9.3/manifests/namespace.yaml
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.9.3/manifests/metallb.yaml
kubectl create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)"
- Install Nginx-ingress controller
kubectl create namespace ingress-nginx
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx/
helm install ingress-nginx ingress-nginx/ingress-nginx -n ingress-nginx
- Install Cert-Manager
# Used to generate letsencrypt certificates in the ingress
kubectl create namespace cert-manager
helm repo add jetstack https://charts.jetstack.io
helm repo update
helm install cert-manager jetstack/cert-manager \
--namespace cert-manager \
--version v0.16.1 \
--set installCRDs=true
- Install Loki
helm repo add loki https://grafana.github.io/loki/charts
helm repo update
helm upgrade --install loki loki/loki-stack
- Install Litmus (optional, used for chaos testing)
kubectl apply -f https://litmuschaos.github.io/litmus/litmus-operator-v1.7.0.yaml
# Verify that the chaos operator is running
kubectl get pods -n litmus
- Install Generic Chaos Experiment (optional, used for chaos testing)
kubectl apply -f https://hub.litmuschaos.io/api/chaos/1.7.0?file=charts/generic/experiments.yaml
- Create Secrets Configurations
cd secrets
# Option 1: Setting up new secrets
cp .secrets-example.yml ./secrets.yml
# Option 2: Copying secrets
cp /tmp/secrets.yml ./
- Deploy Secrets
kubectl apply -f secrets/secrets.yml
- Deploy ConfigMaps
kubectl apply -f configmaps/
- Create persistent volumes
kubectl apply -f volumes/postgres-persistent-volume-prod.yml
- Create deployments, services, and persistent volume claims
kubectl apply -f deployments/
- Deploy MetalLB Configmap
# Set node(s) IP addresses in configmap first
kubectl apply -f metallb/configmap.yml
- Create cluster issuer
kubectl apply -f cert-manager/cluster-issuer.yml
- Deploy Ingress
kubectl apply -f ingress/ingress.yml
- Create Litmus Service Accounts (optional, used for chaos testing)
kubectl apply -f litmus/chaos-engine.yml
- Create Litmus Chaos Engine (optional, used for chaos testing)
kubectl apply -f litmus/rbac.yml