Skip to content

Commit

Permalink
Add v3
Browse files Browse the repository at this point in the history
  • Loading branch information
mquhuy committed Jul 24, 2023
1 parent de5e4b9 commit e9c1fc6
Show file tree
Hide file tree
Showing 59 changed files with 4,656 additions and 0 deletions.
12 changes: 12 additions & 0 deletions Support/Multitenancy/ironic-env/v3/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
*.xml
*.json
*.log
ironicclient.sh
_clouds_yaml/*
kubectl
minikube-linux-amd64
macaddrs
uuids
sushy-tools-conf/*
logs/*
bmc-*.yaml
111 changes: 111 additions & 0 deletions Support/Multitenancy/ironic-env/v3/01-vm-setup.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
#!/bin/bash
set -e
#install kvm for minikube
dnf -y install qemu-kvm libvirt virt-install net-tools podman firewalld
systemctl enable --now libvirtd
systemctl start firewalld
systemctl enable firewalld
# create provisioning network
cat <<EOF >provisioning.xml
<network
xmlns:dnsmasq='http://libvirt.org/schemas/network/dnsmasq/1.0'>
<dnsmasq:options>
<!-- Risk reduction for CVE-2020-25684, CVE-2020-25685, and CVE-2020-25686. See: https://access.redhat.com/security/vulnerabilities/RHSB-2021-001 -->
<dnsmasq:option value="cache-size=0"/>
</dnsmasq:options>
<name>provisioning</name>
<bridge name='provisioning'/>
<forward mode='bridge'></forward>
</network>
EOF

cat <<EOF >baremetal.xml
<network xmlns:dnsmasq='http://libvirt.org/schemas/network/dnsmasq/1.0'>
<name>baremetal</name>
<forward mode='nat'>
<nat>
<port start='1024' end='65535'/>
</nat>
</forward>
<bridge name='baremetal' stp='on' delay='0'/>
<domain name='ostest.test.metalkube.org' localOnly='yes'/>
<dns>
<forwarder domain='apps.ostest.test.metalkube.org' addr='127.0.0.1'/>
</dns>
<ip address='192.168.111.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.111.20' end='192.168.111.60'/>
<host mac='00:5c:52:31:3b:9c' name='node-0' ip='192.168.111.20'>
<lease expiry='60' unit='minutes'/>
</host>
<host mac='00:5c:52:31:3b:ad' name='node-1' ip='192.168.111.21'>
<lease expiry='60' unit='minutes'/>
</host>
</dhcp>
</ip>
<dnsmasq:options>
<dnsmasq:option value='cache-size=0'/>
</dnsmasq:options>
</network>
EOF
# define networks
virsh net-define baremetal.xml
virsh net-start baremetal
virsh net-autostart baremetal

virsh net-define provisioning.xml
virsh net-start provisioning
virsh net-autostart provisioning
tee -a /etc/NetworkManager/system-connections/provisioning.nmconnection <<EOF
[connection]
id=provisioning
type=bridge
interface-name=provisioning
[bridge]
stp=false
[ipv4]
address1=172.22.0.1/24
method=manual
[ipv6]
addr-gen-mode=eui64
method=disabled
EOF

chmod 600 /etc/NetworkManager/system-connections/provisioning.nmconnection
nmcli con load /etc/NetworkManager/system-connections/provisioning.nmconnection
nmcli con up provisioning

tee /etc/NetworkManager/system-connections/baremetal.nmconnection <<EOF
[connection]
id=baremetal
type=bridge
interface-name=baremetal
autoconnect=true
[bridge]
stp=false
[ipv6]
addr-gen-mode=stable-privacy
method=ignore
EOF

chmod 600 /etc/NetworkManager/system-connections/baremetal.nmconnection
nmcli con load /etc/NetworkManager/system-connections/baremetal.nmconnection
nmcli con up baremetal

# install minikube
curl -LO https://storage.googleapis.com/minikube/releases/v1.25.2/minikube-linux-amd64
install minikube-linux-amd64 /usr/local/bin/minikube
# Install kubectl
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl

# Install Helm
# helm_api="https://api.github.com/repos/helm/helm/releases"
# curl -sL "${helm_api}" > helm_releases.txt
# helm_release_tag="$(cat helm_releases.txt | jq -r ".[].tag_name" | head -n 1 )"
# rm -f helm_releases.txt
# filename="helm-${helm_release_tag}-linux-amd64.tar.gz"
# wget -O "$filename.tar.gz" "https://get.helm.sh/${filename}"
# tar -xf "$filename.tar.gz"
# install -o root -g root -m 0755 linux-amd64/helm /usr/local/bin/helm
# rm -rf "$filename.tar.gz" linux-amd64 minikube-linux-amd64 kubectl
16 changes: 16 additions & 0 deletions Support/Multitenancy/ironic-env/v3/02-configure-minikube.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
#!/bin/bash
set -e
minikube config set driver kvm2
minikube config set memory 4096
sudo usermod --append --groups libvirt "$(whoami)"
while /bin/true; do
minikube_error=0
minikube start --insecure-registry 172.22.0.1:5000 || minikube_error=1
if [[ $minikube_error -eq 0 ]]; then
break
fi
sudo su -l -c 'minikube delete --all --purge' "${USER}"
sudo ip link delete virbr0 | true
done
minikube stop

31 changes: 31 additions & 0 deletions Support/Multitenancy/ironic-env/v3/04-start-minikube.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
#!/bin/bash
set -e

# Start Minikube with insecure registry flag
minikube start --insecure-registry 172.22.0.1:5000

# SSH into the Minikube VM and execute the following commands
sudo su -l -c "minikube ssh sudo brctl addbr ironicendpoint" "${USER}"
sudo su -l -c "minikube ssh sudo ip link set ironicendpoint up" "${USER}"
sudo su -l -c "minikube ssh sudo brctl addif ironicendpoint eth2" "${USER}"

sleep 10

IRONIC_DATA_DIR="${IRONIC_DATA_DIR:-/opt/metal3/ironic/}"

read -ra PROVISIONING_IPS <<< "${IRONIC_ENDPOINTS}"
for PROVISIONING_IP in "${PROVISIONING_IPS[@]}"; do
sudo su -l -c "minikube ssh sudo ip addr add ${PROVISIONING_IP}/24 dev ironicendpoint" "${USER}"
done

# sudo su -l -c "minikube mount $IRONIC_DATA_DIR:/mnt"
# Firewall rules
for i in 8000 80 9999 6385 5050 6180 53 5000; do sudo firewall-cmd --zone=public --add-port=${i}/tcp; done
for i in 69 547 546 68 67 5353 6230 6231 6232 6233 6234 6235 9999; do sudo firewall-cmd --zone=libvirt --add-port=${i}/udp; done
sudo firewall-cmd --zone=libvirt --add-port=9999/tcp

for i in $(seq 1 "${N_SUSHY:-5}"); do
port=$(( 8000 + i ))
sudo firewall-cmd --zone=public --add-port=$port/tcp
sudo firewall-cmd --zone=libvirt --add-port=$port/tcp
done
64 changes: 64 additions & 0 deletions Support/Multitenancy/ironic-env/v3/05-apply-manifests.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
#!/bin/bash
set -e

kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.11.0/cert-manager.yaml

kubectl -n cert-manager wait --for=condition=available deployment/cert-manager-webhook --timeout=300s
kubectl -n cert-manager wait --for=condition=available deployment/cert-manager-cainjector --timeout=300s
kubectl -n cert-manager wait --for=condition=available deployment/cert-manager --timeout=300s

if [[ ! -f ~/.ssh/id_rsa.pub ]]; then
ssh-keygen -t ed25519
fi
# Install ironic
# read -ra PROVISIONING_IPS <<< "${IRONIC_ENDPOINTS}"
# helm install ironic ironic --set sshKey="$(cat ~/.ssh/id_rsa.pub)" --set ironicReplicas="{$(echo "$IRONIC_ENDPOINTS" | sed 's/ /\,/g')}" --wait
helm install ironic ironic --set sshKey="$(cat ~/.ssh/id_rsa.pub)" --set ironicReplicas="{${IRONIC_ENDPOINTS/ /\,}}" --wait

kustomize build $HOME/baremetal-operator/config/tls | kubectl apply -f -

ironic_client="ironicclient.sh"
openstack_dir="${PWD}/_clouds_yaml"
rm -rf "${openstack_dir}"
mkdir -p "${openstack_dir}"
cp /opt/metal3-dev-env/ironic/certs/ironic-ca.pem "${openstack_dir}/ironic-ca.crt"
cat << EOT >"${openstack_dir}/clouds.yaml"
clouds:
metal3:
auth_type: none
baremetal_endpoint_override: https://172.22.0.2:6385
baremetal_introspection_endpoint_override: https://172.22.0.2:5050
verify: false
EOT

sudo podman run --net=host --tls-verify=false \
--name openstack-client \
--detach \
--entrypoint='["/bin/sleep", "inf"]' \
-v "${openstack_dir}:/etc/openstack" \
-e OS_CLOUD="${OS_CLOUD:-metal3}" \
"172.22.0.1:5000/localimages/ironic-client"


cat << EOT >"${ironic_client}"
#!/bin/bash
DIR="$(dirname "$(readlink -f "$0")")"
if [ -d $openstack_dir ]; then
MOUNTDIR=$openstack_dir
else
echo 'cannot find $openstack_dir'
exit 1
fi
if [ \$1 == "baremetal" ] ; then
shift 1
fi
# shellcheck disable=SC2086
sudo podman exec openstack-client /usr/bin/baremetal "\$@"
EOT

sudo chmod a+x "${ironic_client}"
sudo ln -sf "$PWD/${ironic_client}" "/usr/local/bin/baremetal"
25 changes: 25 additions & 0 deletions Support/Multitenancy/ironic-env/v3/Init-environment.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
#!/bin/bash
set -e
trap 'trap - SIGTERM && kill -- -'$$'' SIGINT SIGTERM EXIT
__dir__=$(realpath "$(dirname "$0")")
. ./config.sh
# This is temporarily required since https://review.opendev.org/c/openstack/sushy-tools/+/875366 has not been merged.
./build-sushy-tools-image.sh
sudo ./01-vm-setup.sh
./02-configure-minikube.sh
sudo ./handle-images.sh
./generate_unique_nodes.sh
./start_containers.sh
./04-start-minikube.sh
./05-apply-manifests.sh
kubectl -n baremetal-operator-system wait --for=condition=available deployment/baremetal-operator-controller-manager --timeout=300s
kubectl create ns metal3
clusterctl init --infrastructure=metal3
./start_fake_etcd.sh
python create_nodes.py
# rm -f /tmp/test-hosts.yaml
# ./produce-available-hosts.sh > /tmp/test-hosts.yaml
for i in $(seq 1 $N_NODES); do
sleep 20
./create-clusters-v2.sh
done
55 changes: 55 additions & 0 deletions Support/Multitenancy/ironic-env/v3/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
# Multiple ironics setup

## Purposes
- This setup is a part of the study to deploy multiple instances of `ironic-conductor` to increase provisioning capacity.
- It takes into use the new [ipa simulating tool](https://review.opendev.org/c/openstack/sushy-tools/+/875366), which allows simulating inspection and provision for multiple baremetal nodes, without the need of real hardwares.
- One purpose of this study is to investigate if the current `ironic` pod could be divided into smaller parts, and if `ironic` is able to

## Requirements

- Machine: `4c / 16gb / 100gb`
- OS: `CentOS9-20220330`

## Configuration
- Configs can be set in `config.sh`:
- `N_NODES`: Number of nodes to create and inspect
- `NODE_CREATE_BATCH_SIZE`: Number of nodes to create at one time before starting inspection.
- `NODE_INSPECT_BATCH_SIZE`: The size of the batch of nodes that are inspected together at one time.
- `IRONIC_ENDPOINTS`: The endpoints of ironics to use, separated by spaces. The number of endpoints put in here equals the number of ironics that will be used.

- Example config:
```
N_NODES=1000
NODE_CREATE_BATCH_SIZE=100
NODE_INSPECT_BATCH_SIZE=30
IRONIC_ENDPOINTS="172.22.0.2 172.22.0.3 172.22.0.4 172.22.0.5"
```

This config means that there will be, in total, 1000 (fake) nodes created in batches of 100, from which batches of 10 will be inspected together. In details:
- 100 first nodes are created (nodes `fake1` to `fake100`)
- 10 first ones (`fake1` to `fake10`) of the newly-created nodes are enrolled and inspected.
- The second batch of 10 nodes (`fake11` to `fake20`) are enrolled and inspected. This is repeated until all 100 nodes are enrolled and either inspected or got `inspect failed`.
- The second batch of 100 nodes is created (nodes `fake101` to `fake200`).
- etc.

## Results

- The `ironic` pod used in `metal3-dev-env`, which consists of several containers, was splited into smaller pods that run separatedly as followed:
- First pod: consists of `ironic` and `ironic-httpd` containers.
- Second pod: consists of `dnsmasq` and `ironic-inspector` containers.
- Third pod: consists of `mariadb` container.

The `ironic` entity can be scaled up by deploying more instances of the first pod (a.k.a. `ironic` and `ironic-httpd`)

- Ironic cannot recover from `mariadb` failure:
```
baremetal node list
(pymysql.err.ProgrammingError) (1146, "Table 'ironic.nodes' doesn't exist")
[SQL: SELECT nodes.created_at, nodes.updated_at, nodes.version, nodes.id, nodes.uuid, nodes.instance_uuid, nodes.name, nodes.chassis_id, nodes.power_state, nodes.provision_state, nodes.driver, nodes.conductor_group, nodes.maintenance, nodes.owner, nodes.l
essee, nodes.allocation_id
FROM nodes ORDER BY nodes.id ASC
LIMIT %(param_1)s]
[parameters: {'param_1': 1000}]
(Background on this error at: https://sqlalche.me/e/14/f405) (HTTP 500)
```

48 changes: 48 additions & 0 deletions Support/Multitenancy/ironic-env/v3/build-sushy-tools-image.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
#!/bin/bash
#
# SUSHYTOOLS_DIR="$HOME/sushy-tools-19"
SUSHYTOOLS_DIR="$HOME/sushy-tools"
rm -rf "$SUSHYTOOLS_DIR"
git clone https://opendev.org/openstack/sushy-tools.git "$SUSHYTOOLS_DIR"
cd "$SUSHYTOOLS_DIR" || exit
git fetch https://review.opendev.org/openstack/sushy-tools refs/changes/66/875366/25 && git cherry-pick FETCH_HEAD

pip3 install build
python3 -m build

cd dist || exit
WHEEL_FILENAME=$(ls ./*.whl)
echo "$WHEEL_FILENAME"

cd ..

cat <<EOF > "${SUSHYTOOLS_DIR}/Dockerfile"
# Use the official Centos image as the base image
FROM ubuntu:22.04
# Install necessary packages
RUN apt update -y && \
apt install -y python3 python3-pip python3-venv && \
apt clean all
WORKDIR /opt
# RUN python3 setup.py install
# Copy the application code to the container
COPY dist/${WHEEL_FILENAME} .
RUN pip3 install ${WHEEL_FILENAME}
ENV FLASK_DEBUG=1
RUN mkdir -p /root/sushy
# Set the default command to run when starting the container
# CMD ["python3", "app.py"]
# CMD ["sleep", "infinity"]
CMD ["sushy-emulator", "-i", "::", "--config", "/root/sushy/conf.py"]
EOF

sudo podman build -t 127.0.0.1:5000/localimages/sushy-tools .
# rm -rf "$SUSHYTOOLS_DIR"
Loading

0 comments on commit e9c1fc6

Please sign in to comment.