Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

OpenShiftP-247: Remove Squid Proxy #77

Draft
wants to merge 2 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ Note, the `destroy` command removes the Node resource, removes the NFS deploymen

There are some important points to mention:

1. The Power Bastion node uses a https proxy to forward requests to the Cluster's internal api load balancer. This setting is configured in /etc/environment on the Power Bastion.
1. The Power Bastion no longer uses an https proxy to forward requests to the Cluster's internal api load balancer. This setting is removed.
2. NFS is used as the storage provider across nodes.

## Running Automation from another IBMCloud VPC
Expand Down
2 changes: 0 additions & 2 deletions ansible/post/files/approve_and_issue.sh
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,6 @@ fi

# Setting values for variables
IDX=0
export HTTPS_PROXY="http://${PROXY_SERVER}:3128"
echo "HTTPS_PROXY is set to $HTTPS_PROXY"

READY_COUNT=$(oc get nodes -l kubernetes.io/arch=ppc64le | grep "${MACHINE_PREFIX}" | grep -v NotReady | grep -c Ready)

Expand Down
1 change: 0 additions & 1 deletion ansible/post/files/cicd_hold_while_updating.sh
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ echo "Cluster Operator is: "
oc get co

echo "$(date -u --rfc-3339=seconds) - Waiting for clusteroperators to complete"
export HTTPS_PROXY="http://${PROXY_SERVER}:3128"

oc wait clusteroperator.config.openshift.io \
--for=condition=Available=True \
Expand Down
1 change: 0 additions & 1 deletion ansible/post/files/destroy-nfs-deployment.sh
Original file line number Diff line number Diff line change
Expand Up @@ -15,5 +15,4 @@ PROXY_SERVER="${2}"
NFS_NAMESPACE="${3}"

echo "Removing the Deployment for the NFS storage class. Please ensure that you have taken backup of NFS server."
export HTTPS_PROXY="http://${PROXY_SERVER}:3128"
oc delete deployment ${NFS_DEPLOYMENT} -n ${NFS_NAMESPACE}
1 change: 0 additions & 1 deletion ansible/post/files/destroy-workers.sh
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ IDX=0
while [ "$IDX" -lt "$COUNT" ]
do
echo "Removing the Worker: ${NAME_PREFIX}-worker-${IDX}"
export HTTPS_PROXY="http://${PROXY_SERVER}:3128"
oc delete node ${NAME_PREFIX}-worker-${IDX} || true
IDX=$(($IDX + 1))
done
1 change: 0 additions & 1 deletion ansible/post/files/remove-worker-taints.sh
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ IDX=0
while [ "$IDX" -lt "$COUNT" ]
do
echo "Removing the taint for Worker: ${NAME_PREFIX}-worker-${IDX}"
export HTTPS_PROXY="http://${PROXY_SERVER}:3128"
oc adm taint node ${NAME_PREFIX}-worker-${IDX} node.cloudprovider.kubernetes.io/uninitialized- \
|| true
IDX=$(($IDX + 1))
Expand Down
1 change: 0 additions & 1 deletion ansible/post/files/wait_on_power_nodes.sh
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ POWER_COUNT="${2}"
IDX=0
while [ "$IDX" -lt "121" ]
do
export HTTPS_PROXY="http://${PROXY_SERVER}:3128"

echo "Try number: ${IDX}"
echo "List of Power Workers: "
Expand Down
20 changes: 4 additions & 16 deletions ansible/post/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,16 +15,8 @@
- kubernetes
- openshift

- name: Populate /etc/environment
lineinfile:
path: "/etc/environment"
state: present
regexp: "^HTTPS_PROXY="
line: "HTTPS_PROXY=http://{{ nfs_server }}:3128"
become: yes

- name: Fetch the list of projects
shell: "export HTTPS_PROXY=http://{{ nfs_server }}:3128 && oc get projects"
shell: "oc get projects"
register: projects_list
until: projects_list.stdout.find("openshift-kube-apiserver") != -1
retries: 20
Expand All @@ -33,13 +25,11 @@
- name: Create powervm-rmc project
when: projects_list.stdout.find(project_name) == -1
shell: |
export HTTPS_PROXY=http://{{ nfs_server }}:3128
oc apply -f ../files/rmc-ns.yml

- name: Create powervm-rmc serviceaccount
kubernetes.core.k8s:
state: present
proxy: "http://{{ nfs_server }}:3128"
definition:
api_version: v1
kind: ServiceAccount
Expand All @@ -48,16 +38,15 @@
namespace: "{{ project_name }}"

- name: Add privileged scc to powervm-rmc serviceaccount
shell: "export HTTPS_PROXY=http://{{ nfs_server }}:3128 && oc adm policy add-scc-to-user -z {{ rmc_name }} privileged -n {{ project_name }}"
shell: "oc adm policy add-scc-to-user -z {{ rmc_name }} privileged -n {{ project_name }}"

- name: Deploy powervm-rmc DaemonSet
kubernetes.core.k8s:
state: present
proxy: "http://{{ nfs_server }}:3128"
definition: "{{ lookup('template', '../templates/rsct-daemonset.yml.j2') }}"

- name: Get Nodes with ppc64le architecture
shell: "export HTTPS_PROXY=http://{{ nfs_server }}:3128 && oc get nodes -l kubernetes.io/arch=ppc64le | awk '(NR>1) { print $1 }'"
shell: "oc get nodes -l kubernetes.io/arch=ppc64le | awk '(NR>1) { print $1 }'"
register: node_names

- name: Print Node names with ppc64le architecture
Expand All @@ -67,7 +56,6 @@
- name: Add labels defined in node_labels to ppc64le Nodes
kubernetes.core.k8s:
state: present
proxy: "http://{{ nfs_server }}:3128"
kind: Node
name: "{{ item }}"
definition:
Expand All @@ -78,4 +66,4 @@
- name: Cleanup events in openshift-etcd for cicd
when: cicd == "true"
shell: |
export HTTPS_PROXY=http://{{ nfs_server }}:3128 && oc delete events --all=true -n openshift-etcd
oc delete events --all=true -n openshift-etcd
8 changes: 0 additions & 8 deletions ansible/support/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -129,11 +129,3 @@
name: "httpd"
state: restarted
daemon_reload: yes

- name: Wait for port to become open on the vpc support machine
ansible.builtin.wait_for:
host: "{{ vpc_support_server_ip }}"
port: 3128
delay: 30
timeout: 600

12 changes: 3 additions & 9 deletions ansible/support/tasks/nfs_provisioner.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,39 +9,33 @@
- name: Create nfs-provisioner Project
kubernetes.core.k8s:
state: present
proxy: "http://{{ nfs_server }}:3128"
definition: "{{ lookup('template', '../templates/nfs-ns.yml') }}"

- name: Create RBAC for nfs-provisioner
kubernetes.core.k8s:
state: present
proxy: "http://{{ nfs_server }}:3128"
definition: "{{ lookup('template', '../templates/nfs-rbac.yml') }}"

- name: Setup Authorization
shell: "export HTTPS_PROXY=http://{{ nfs_server }}:3128 && oc adm policy add-scc-to-user hostmount-anyuid system:serviceaccount:nfs-provisioner:nfs-client-provisioner"
shell: "oc adm policy add-scc-to-user hostmount-anyuid system:serviceaccount:nfs-provisioner:nfs-client-provisioner"

- name: Create Deployment for nfs-provisioner
kubernetes.core.k8s:
state: present
proxy: "http://{{ nfs_server }}:3128"
definition: "{{ lookup('template', '../templates/nfs-deployment.yml.j2') }}"

- name: Create Storage Class for nfs
kubernetes.core.k8s:
state: present
proxy: "http://{{ nfs_server }}:3128"
definition: "{{ lookup('template', '../templates/nfs-sc.yml') }}"

- name: Ensure nfs-provisioner Pod is up and running fine
shell: "export HTTPS_PROXY=http://{{ nfs_server }}:3128 && oc get pods -n nfs-provisioner"
shell: "oc get pods -n nfs-provisioner"
register: pod_output
until: pod_output.stdout.find("Running") != -1
retries: 10
delay: 30

- name: Remove the block storage as the default.
shell: |
export HTTPS_PROXY=http://{{ nfs_server }}:3128 \
&& oc patch storageclass ibmc-vpc-block-10iops-tier -p '{"metadata": {"annotations": {"storageclass.kubernetes.io/is-default-class": "false"}}}'

oc patch storageclass ibmc-vpc-block-10iops-tier -p '{"metadata": {"annotations": {"storageclass.kubernetes.io/is-default-class": "false"}}}'
1 change: 0 additions & 1 deletion main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,6 @@ module "worker" {
name_prefix = local.name_prefix
powervs_service_instance_id = var.powervs_service_instance_id
powervs_dhcp_network_id = module.pvs_prepare.powervs_dhcp_network_id
powervs_dhcp_network_name = module.pvs_prepare.powervs_dhcp_network_name
powervs_bastion_name = module.pvs_prepare.powervs_bastion_name
processor_type = var.processor_type
rhcos_image_id = module.pvs_prepare.rhcos_image_id
Expand Down
13 changes: 0 additions & 13 deletions modules/1_vpc_support/2_security_groups/vpc_sgs_supp.tf
Original file line number Diff line number Diff line change
Expand Up @@ -36,19 +36,6 @@ resource "ibm_is_security_group_rule" "supp_vm_sg_ssh_all" {
}
}

# Dev Note: The rules apply to powervs instances to connect to the api-int
# allow all incoming network traffic on port 3128
resource "ibm_is_security_group_rule" "squid_vm_sg_ssh_all" {
group = ibm_is_security_group.supp_vm_sg.id
direction = "inbound"
remote = var.powervs_machine_cidr

tcp {
port_min = 3128
port_max = 3128
}
}

# allow all incoming network traffic on port 53
resource "ibm_is_security_group_rule" "supp_vm_sg_supp_all" {
group = ibm_is_security_group.supp_vm_sg.id
Expand Down
14 changes: 2 additions & 12 deletions modules/1_vpc_support/4_vsi/templates/cloud-init.yaml.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ packages:
- httpd
- mod_ssl
- nfs-utils
- squid
write_files:
- path: /tmp/named-conf-edit.sed
permissions: '0640'
Expand All @@ -21,18 +20,9 @@ write_files:
permissions: '0640'
content: |
/export *(rw)
- path: /etc/squid/squid.conf
permissions: '0640'
content: |
acl localnet src 10.0.0.0/8
acl localnet src 172.16.0.0/12
acl localnet src 192.168.0.0/16
http_access deny !localnet
http_port 3128
coredump_dir /var/spool/squid
runcmd:
- export MYIP=`hostname -I`; sed -i.bak "s/MYIP/$MYIP/" /tmp/named-conf-edit.sed
- sed -i.orig -f /tmp/named-conf-edit.sed /etc/named.conf
- systemctl enable named.service nfs-server squid
- systemctl start named.service nfs-server squid
- systemctl enable named.service nfs-server
- systemctl start named.service nfs-server
- mkdir -p /export && chmod -R 777 /export
1 change: 0 additions & 1 deletion modules/2_pvs_prepare/bastion/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,5 @@ variable "bastion_public_network_id" {}
variable "bastion_public_network_name" {}
variable "bastion_public_network_cidr" {}
variable "powervs_network_id" {}
variable "powervs_network_name" {}
variable "powervs_network_cidr" {}
variable "vpc_support_server_ip" {}
4 changes: 0 additions & 4 deletions modules/2_pvs_prepare/existing_network/outputs.tf
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,6 @@ output "powervs_dhcp_network_id" {
value = local.server.network_id
}

output "powervs_dhcp_network_name" {
value = local.server.network_name
}

output "powervs_dhcp_service" {
value = local.server
}
4 changes: 0 additions & 4 deletions modules/2_pvs_prepare/network/outputs.tf
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,6 @@ output "powervs_dhcp_network_id" {
value = ibm_pi_dhcp.new_dhcp_service.network_id
}

output "powervs_dhcp_network_name" {
value = ibm_pi_dhcp.new_dhcp_service.network_name
}

output "powervs_dhcp_service" {
value = ibm_pi_dhcp.new_dhcp_service
}
5 changes: 0 additions & 5 deletions modules/2_pvs_prepare/outputs.tf
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,6 @@ output "powervs_dhcp_network_id" {
value = var.override_network_name != "" ? module.existing_network[0].powervs_dhcp_network_id : module.network[0].powervs_dhcp_network_id
}

output "powervs_dhcp_network_name" {
depends_on = [module.network]
value = var.override_network_name != "" ? var.override_network_name : module.network[0].powervs_dhcp_network_name
}

output "rhcos_image_id" {
depends_on = [module.images]
value = module.images.rhcos_image_id
Expand Down
1 change: 0 additions & 1 deletion modules/2_pvs_prepare/pvs_prepare.tf
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,6 @@ module "bastion" {
bastion_public_network_name = var.override_network_name != "" ? module.existing_network[0].bastion_public_network_name : module.network[0].bastion_public_network_name
bastion_public_network_cidr = var.override_network_name != "" ? module.existing_network[0].bastion_public_network_cidr : module.network[0].bastion_public_network_cidr
powervs_network_id = var.override_network_name != "" ? module.existing_network[0].powervs_dhcp_network_id : module.network[0].powervs_dhcp_network_id
powervs_network_name = var.override_network_name != "" ? module.existing_network[0].powervs_dhcp_network_name : module.network[0].powervs_dhcp_network_name
powervs_network_cidr = var.powervs_machine_cidr
private_key_file = var.private_key_file
public_key = module.keys.pvs_pubkey_name
Expand Down
10 changes: 0 additions & 10 deletions modules/4_pvs_support/pvs_support.tf
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,6 @@ resource "null_resource" "config_login" {

provisioner "remote-exec" {
inline = [<<EOF
export HTTPS_PROXY="http://${var.vpc_support_server_ip}:3128"
oc login \
"${var.openshift_api_url}" -u "${var.openshift_user}" -p "${var.openshift_pass}" --insecure-skip-tls-verify=true
EOF
Expand All @@ -157,7 +156,6 @@ resource "null_resource" "disable_etcd_defrag" {

provisioner "remote-exec" {
inline = [<<EOF
export HTTPS_PROXY="http://${var.vpc_support_server_ip}:3128"
outval=$(oc get configmap etcd-disable-defrag -n openshift-etcd-operator)
if [ -z "$outval" ]
then
Expand Down Expand Up @@ -209,7 +207,6 @@ resource "null_resource" "config_csi" {
# scheduler.alpha.kubernetes.io/node-selector: kubernetes.io/arch=amd64
provisioner "remote-exec" {
inline = [<<EOF
export HTTPS_PROXY="http://${var.vpc_support_server_ip}:3128"
oc annotate --kubeconfig /root/.kube/config ns openshift-cluster-csi-drivers \
scheduler.alpha.kubernetes.io/node-selector=kubernetes.io/arch=amd64
EOF
Expand All @@ -232,8 +229,6 @@ resource "null_resource" "adjust_mtu" {
# we previously supported OpenShiftSDN since it's deprecation we have removed it from automation.
provisioner "remote-exec" {
inline = [<<EOF
export HTTPS_PROXY="http://${var.vpc_support_server_ip}:3128"

EXISTING_MTU=$(oc get network cluster -o json | jq -r .status.clusterNetworkMTU)

if [ $EXISTING_MTU != ${var.cluster_network_mtu} ]
Expand Down Expand Up @@ -265,7 +260,6 @@ resource "null_resource" "keep_dns_on_vpc" {
# Dev Note: put the dns nodes on the VPC machines
provisioner "remote-exec" {
inline = [<<EOF
export HTTPS_PROXY="http://${var.vpc_support_server_ip}:3128"
oc patch dns.operator/default -p '{ "spec" : {"nodePlacement": {"nodeSelector": {"kubernetes.io/arch" : "amd64"}}}}' --type merge
EOF
]
Expand All @@ -286,7 +280,6 @@ resource "null_resource" "keep_imagepruner_on_vpc" {
# Dev Note: put the image pruner nodes on the VPC machines
provisioner "remote-exec" {
inline = [<<EOF
export HTTPS_PROXY="http://${var.vpc_support_server_ip}:3128"
oc patch imagepruner/cluster -p '{ "spec" : {"nodeSelector": {"kubernetes.io/arch" : "amd64"}}}' --type merge -v=1
EOF
]
Expand All @@ -308,7 +301,6 @@ resource "null_resource" "set_routing_via_host" {

provisioner "remote-exec" {
inline = [<<EOF
export HTTPS_PROXY="http://${var.vpc_support_server_ip}:3128"
if [ "$(oc get Network.config cluster -o jsonpath='{.status.networkType}')" == "OVNKubernetes" ]
then
oc patch network.operator/cluster --type merge -p \
Expand All @@ -333,8 +325,6 @@ resource "null_resource" "wait_on_mcp" {
# Dev Note: added hardening to the MTU wait, we wait for the condition and then fail
provisioner "remote-exec" {
inline = [<<EOF
export HTTPS_PROXY="http://${var.vpc_support_server_ip}:3128"

echo "-diagnostics-"
oc get network cluster -o yaml | grep -i mtu
oc get mcp
Expand Down
1 change: 0 additions & 1 deletion modules/5_worker/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ variable "key_name" {}
variable "name_prefix" {}
variable "powervs_service_instance_id" {}
variable "powervs_dhcp_network_id" {}
variable "powervs_dhcp_network_name" {}
variable "powervs_dhcp_service" {}
variable "powervs_bastion_name" {}
variable "processor_type" {}
Expand Down
5 changes: 0 additions & 5 deletions modules/6_post/post.tf
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,6 @@ resource "null_resource" "remove_workers" {
when = destroy
on_failure = continue
inline = [<<EOF
export HTTPS_PROXY="http://${self.triggers.vpc_support_server_ip}:3128"
oc login \
"${self.triggers.openshift_api_url}" -u "${self.triggers.openshift_user}" -p "${self.triggers.openshift_pass}" --insecure-skip-tls-verify=true

Expand Down Expand Up @@ -136,7 +135,6 @@ resource "null_resource" "debug_and_remove_taints" {

provisioner "remote-exec" {
inline = [<<EOF
export HTTPS_PROXY="http://${var.nfs_server}:3128"
echo "[All Nodes]"
oc get nodes -owide
echo ""
Expand Down Expand Up @@ -181,7 +179,6 @@ resource "null_resource" "remove_nfs_deployment" {
when = destroy
on_failure = continue
inline = [<<EOF
export HTTPS_PROXY="http://${self.triggers.vpc_support_server_ip}:3128"
oc login \
"${self.triggers.openshift_api_url}" -u "${self.triggers.openshift_user}" -p "${self.triggers.openshift_pass}" --insecure-skip-tls-verify=true

Expand Down Expand Up @@ -220,8 +217,6 @@ resource "null_resource" "cicd_etcd_login" {

provisioner "remote-exec" {
inline = [<<EOF
export HTTPS_PROXY="http://${self.triggers.vpc_support_server_ip}:3128"

echo "[INSTALL ibmcloud]"
curl -fsSL https://clis.cloud.ibm.com/install/linux | sh
ibmcloud plugin install is -f
Expand Down
Loading