diff --git a/README.md b/README.md index 13881fd..3a535af 100644 --- a/README.md +++ b/README.md @@ -55,7 +55,7 @@ Note, the `destroy` command removes the Node resource, removes the NFS deploymen There are some important points to mention: -1. The Power Bastion node uses a https proxy to forward requests to the Cluster's internal api load balancer. This setting is configured in /etc/environment on the Power Bastion. +1. The Power Bastion no longer uses an https proxy to forward requests to the Cluster's internal api load balancer. This setting is removed. 2. NFS is used as the storage provider across nodes. ## Running Automation from another IBMCloud VPC diff --git a/ansible/post/files/approve_and_issue.sh b/ansible/post/files/approve_and_issue.sh index da378e6..9d35fbf 100644 --- a/ansible/post/files/approve_and_issue.sh +++ b/ansible/post/files/approve_and_issue.sh @@ -28,8 +28,6 @@ fi # Setting values for variables IDX=0 -export HTTPS_PROXY="http://${PROXY_SERVER}:3128" -echo "HTTPS_PROXY is set to $HTTPS_PROXY" READY_COUNT=$(oc get nodes -l kubernetes.io/arch=ppc64le | grep "${MACHINE_PREFIX}" | grep -v NotReady | grep -c Ready) diff --git a/ansible/post/files/cicd_hold_while_updating.sh b/ansible/post/files/cicd_hold_while_updating.sh index e8fd8a0..b133df6 100644 --- a/ansible/post/files/cicd_hold_while_updating.sh +++ b/ansible/post/files/cicd_hold_while_updating.sh @@ -14,7 +14,6 @@ echo "Cluster Operator is: " oc get co echo "$(date -u --rfc-3339=seconds) - Waiting for clusteroperators to complete" -export HTTPS_PROXY="http://${PROXY_SERVER}:3128" oc wait clusteroperator.config.openshift.io \ --for=condition=Available=True \ diff --git a/ansible/post/files/destroy-nfs-deployment.sh b/ansible/post/files/destroy-nfs-deployment.sh index e391a01..c64c97d 100644 --- a/ansible/post/files/destroy-nfs-deployment.sh +++ b/ansible/post/files/destroy-nfs-deployment.sh @@ -15,5 +15,4 @@ PROXY_SERVER="${2}" NFS_NAMESPACE="${3}" echo "Removing the Deployment for the NFS storage class. Please ensure that you have taken backup of NFS server." -export HTTPS_PROXY="http://${PROXY_SERVER}:3128" oc delete deployment ${NFS_DEPLOYMENT} -n ${NFS_NAMESPACE} diff --git a/ansible/post/files/destroy-workers.sh b/ansible/post/files/destroy-workers.sh index 85853a7..2001606 100644 --- a/ansible/post/files/destroy-workers.sh +++ b/ansible/post/files/destroy-workers.sh @@ -23,7 +23,6 @@ IDX=0 while [ "$IDX" -lt "$COUNT" ] do echo "Removing the Worker: ${NAME_PREFIX}-worker-${IDX}" - export HTTPS_PROXY="http://${PROXY_SERVER}:3128" oc delete node ${NAME_PREFIX}-worker-${IDX} || true IDX=$(($IDX + 1)) done diff --git a/ansible/post/files/remove-worker-taints.sh b/ansible/post/files/remove-worker-taints.sh index ec5f5af..a5f5f34 100644 --- a/ansible/post/files/remove-worker-taints.sh +++ b/ansible/post/files/remove-worker-taints.sh @@ -18,7 +18,6 @@ IDX=0 while [ "$IDX" -lt "$COUNT" ] do echo "Removing the taint for Worker: ${NAME_PREFIX}-worker-${IDX}" - export HTTPS_PROXY="http://${PROXY_SERVER}:3128" oc adm taint node ${NAME_PREFIX}-worker-${IDX} node.cloudprovider.kubernetes.io/uninitialized- \ || true IDX=$(($IDX + 1)) diff --git a/ansible/post/files/wait_on_power_nodes.sh b/ansible/post/files/wait_on_power_nodes.sh index 9961525..01bf653 100644 --- a/ansible/post/files/wait_on_power_nodes.sh +++ b/ansible/post/files/wait_on_power_nodes.sh @@ -16,7 +16,6 @@ POWER_COUNT="${2}" IDX=0 while [ "$IDX" -lt "121" ] do - export HTTPS_PROXY="http://${PROXY_SERVER}:3128" echo "Try number: ${IDX}" echo "List of Power Workers: " diff --git a/ansible/post/tasks/main.yml b/ansible/post/tasks/main.yml index e390278..657f695 100644 --- a/ansible/post/tasks/main.yml +++ b/ansible/post/tasks/main.yml @@ -15,16 +15,8 @@ - kubernetes - openshift - - name: Populate /etc/environment - lineinfile: - path: "/etc/environment" - state: present - regexp: "^HTTPS_PROXY=" - line: "HTTPS_PROXY=http://{{ nfs_server }}:3128" - become: yes - - name: Fetch the list of projects - shell: "export HTTPS_PROXY=http://{{ nfs_server }}:3128 && oc get projects" + shell: "oc get projects" register: projects_list until: projects_list.stdout.find("openshift-kube-apiserver") != -1 retries: 20 @@ -33,13 +25,11 @@ - name: Create powervm-rmc project when: projects_list.stdout.find(project_name) == -1 shell: | - export HTTPS_PROXY=http://{{ nfs_server }}:3128 oc apply -f ../files/rmc-ns.yml - name: Create powervm-rmc serviceaccount kubernetes.core.k8s: state: present - proxy: "http://{{ nfs_server }}:3128" definition: api_version: v1 kind: ServiceAccount @@ -48,16 +38,15 @@ namespace: "{{ project_name }}" - name: Add privileged scc to powervm-rmc serviceaccount - shell: "export HTTPS_PROXY=http://{{ nfs_server }}:3128 && oc adm policy add-scc-to-user -z {{ rmc_name }} privileged -n {{ project_name }}" + shell: "oc adm policy add-scc-to-user -z {{ rmc_name }} privileged -n {{ project_name }}" - name: Deploy powervm-rmc DaemonSet kubernetes.core.k8s: state: present - proxy: "http://{{ nfs_server }}:3128" definition: "{{ lookup('template', '../templates/rsct-daemonset.yml.j2') }}" - name: Get Nodes with ppc64le architecture - shell: "export HTTPS_PROXY=http://{{ nfs_server }}:3128 && oc get nodes -l kubernetes.io/arch=ppc64le | awk '(NR>1) { print $1 }'" + shell: "oc get nodes -l kubernetes.io/arch=ppc64le | awk '(NR>1) { print $1 }'" register: node_names - name: Print Node names with ppc64le architecture @@ -67,7 +56,6 @@ - name: Add labels defined in node_labels to ppc64le Nodes kubernetes.core.k8s: state: present - proxy: "http://{{ nfs_server }}:3128" kind: Node name: "{{ item }}" definition: @@ -78,4 +66,4 @@ - name: Cleanup events in openshift-etcd for cicd when: cicd == "true" shell: | - export HTTPS_PROXY=http://{{ nfs_server }}:3128 && oc delete events --all=true -n openshift-etcd + oc delete events --all=true -n openshift-etcd diff --git a/ansible/support/tasks/main.yml b/ansible/support/tasks/main.yml index 56c7984..cfa20aa 100644 --- a/ansible/support/tasks/main.yml +++ b/ansible/support/tasks/main.yml @@ -129,11 +129,3 @@ name: "httpd" state: restarted daemon_reload: yes - - - name: Wait for port to become open on the vpc support machine - ansible.builtin.wait_for: - host: "{{ vpc_support_server_ip }}" - port: 3128 - delay: 30 - timeout: 600 - diff --git a/ansible/support/tasks/nfs_provisioner.yml b/ansible/support/tasks/nfs_provisioner.yml index 864c9be..366adb2 100644 --- a/ansible/support/tasks/nfs_provisioner.yml +++ b/ansible/support/tasks/nfs_provisioner.yml @@ -9,32 +9,28 @@ - name: Create nfs-provisioner Project kubernetes.core.k8s: state: present - proxy: "http://{{ nfs_server }}:3128" definition: "{{ lookup('template', '../templates/nfs-ns.yml') }}" - name: Create RBAC for nfs-provisioner kubernetes.core.k8s: state: present - proxy: "http://{{ nfs_server }}:3128" definition: "{{ lookup('template', '../templates/nfs-rbac.yml') }}" - name: Setup Authorization - shell: "export HTTPS_PROXY=http://{{ nfs_server }}:3128 && oc adm policy add-scc-to-user hostmount-anyuid system:serviceaccount:nfs-provisioner:nfs-client-provisioner" + shell: "oc adm policy add-scc-to-user hostmount-anyuid system:serviceaccount:nfs-provisioner:nfs-client-provisioner" - name: Create Deployment for nfs-provisioner kubernetes.core.k8s: state: present - proxy: "http://{{ nfs_server }}:3128" definition: "{{ lookup('template', '../templates/nfs-deployment.yml.j2') }}" - name: Create Storage Class for nfs kubernetes.core.k8s: state: present - proxy: "http://{{ nfs_server }}:3128" definition: "{{ lookup('template', '../templates/nfs-sc.yml') }}" - name: Ensure nfs-provisioner Pod is up and running fine - shell: "export HTTPS_PROXY=http://{{ nfs_server }}:3128 && oc get pods -n nfs-provisioner" + shell: "oc get pods -n nfs-provisioner" register: pod_output until: pod_output.stdout.find("Running") != -1 retries: 10 @@ -42,6 +38,4 @@ - name: Remove the block storage as the default. shell: | - export HTTPS_PROXY=http://{{ nfs_server }}:3128 \ - && oc patch storageclass ibmc-vpc-block-10iops-tier -p '{"metadata": {"annotations": {"storageclass.kubernetes.io/is-default-class": "false"}}}' - + oc patch storageclass ibmc-vpc-block-10iops-tier -p '{"metadata": {"annotations": {"storageclass.kubernetes.io/is-default-class": "false"}}}' diff --git a/main.tf b/main.tf index f65b696..2b3087f 100644 --- a/main.tf +++ b/main.tf @@ -168,7 +168,6 @@ module "worker" { name_prefix = local.name_prefix powervs_service_instance_id = var.powervs_service_instance_id powervs_dhcp_network_id = module.pvs_prepare.powervs_dhcp_network_id - powervs_dhcp_network_name = module.pvs_prepare.powervs_dhcp_network_name powervs_bastion_name = module.pvs_prepare.powervs_bastion_name processor_type = var.processor_type rhcos_image_id = module.pvs_prepare.rhcos_image_id diff --git a/modules/1_vpc_support/2_security_groups/vpc_sgs_supp.tf b/modules/1_vpc_support/2_security_groups/vpc_sgs_supp.tf index c2563eb..bda2c82 100644 --- a/modules/1_vpc_support/2_security_groups/vpc_sgs_supp.tf +++ b/modules/1_vpc_support/2_security_groups/vpc_sgs_supp.tf @@ -36,19 +36,6 @@ resource "ibm_is_security_group_rule" "supp_vm_sg_ssh_all" { } } -# Dev Note: The rules apply to powervs instances to connect to the api-int -# allow all incoming network traffic on port 3128 -resource "ibm_is_security_group_rule" "squid_vm_sg_ssh_all" { - group = ibm_is_security_group.supp_vm_sg.id - direction = "inbound" - remote = var.powervs_machine_cidr - - tcp { - port_min = 3128 - port_max = 3128 - } -} - # allow all incoming network traffic on port 53 resource "ibm_is_security_group_rule" "supp_vm_sg_supp_all" { group = ibm_is_security_group.supp_vm_sg.id diff --git a/modules/1_vpc_support/4_vsi/templates/cloud-init.yaml.tpl b/modules/1_vpc_support/4_vsi/templates/cloud-init.yaml.tpl index 92c6a26..ba10546 100644 --- a/modules/1_vpc_support/4_vsi/templates/cloud-init.yaml.tpl +++ b/modules/1_vpc_support/4_vsi/templates/cloud-init.yaml.tpl @@ -6,7 +6,6 @@ packages: - httpd - mod_ssl - nfs-utils - - squid write_files: - path: /tmp/named-conf-edit.sed permissions: '0640' @@ -21,18 +20,9 @@ write_files: permissions: '0640' content: | /export *(rw) -- path: /etc/squid/squid.conf - permissions: '0640' - content: | - acl localnet src 10.0.0.0/8 - acl localnet src 172.16.0.0/12 - acl localnet src 192.168.0.0/16 - http_access deny !localnet - http_port 3128 - coredump_dir /var/spool/squid runcmd: - export MYIP=`hostname -I`; sed -i.bak "s/MYIP/$MYIP/" /tmp/named-conf-edit.sed - sed -i.orig -f /tmp/named-conf-edit.sed /etc/named.conf - - systemctl enable named.service nfs-server squid - - systemctl start named.service nfs-server squid + - systemctl enable named.service nfs-server + - systemctl start named.service nfs-server - mkdir -p /export && chmod -R 777 /export diff --git a/modules/2_pvs_prepare/bastion/variables.tf b/modules/2_pvs_prepare/bastion/variables.tf index f7e6e92..729c11e 100644 --- a/modules/2_pvs_prepare/bastion/variables.tf +++ b/modules/2_pvs_prepare/bastion/variables.tf @@ -29,6 +29,5 @@ variable "bastion_public_network_id" {} variable "bastion_public_network_name" {} variable "bastion_public_network_cidr" {} variable "powervs_network_id" {} -variable "powervs_network_name" {} variable "powervs_network_cidr" {} variable "vpc_support_server_ip" {} \ No newline at end of file diff --git a/modules/2_pvs_prepare/existing_network/outputs.tf b/modules/2_pvs_prepare/existing_network/outputs.tf index a6ea3eb..cd09cc6 100644 --- a/modules/2_pvs_prepare/existing_network/outputs.tf +++ b/modules/2_pvs_prepare/existing_network/outputs.tf @@ -19,10 +19,6 @@ output "powervs_dhcp_network_id" { value = local.server.network_id } -output "powervs_dhcp_network_name" { - value = local.server.network_name -} - output "powervs_dhcp_service" { value = local.server } diff --git a/modules/2_pvs_prepare/network/outputs.tf b/modules/2_pvs_prepare/network/outputs.tf index 3277080..961d591 100644 --- a/modules/2_pvs_prepare/network/outputs.tf +++ b/modules/2_pvs_prepare/network/outputs.tf @@ -20,10 +20,6 @@ output "powervs_dhcp_network_id" { value = ibm_pi_dhcp.new_dhcp_service.network_id } -output "powervs_dhcp_network_name" { - value = ibm_pi_dhcp.new_dhcp_service.network_name -} - output "powervs_dhcp_service" { value = ibm_pi_dhcp.new_dhcp_service } diff --git a/modules/2_pvs_prepare/outputs.tf b/modules/2_pvs_prepare/outputs.tf index 50d010e..caeec1d 100644 --- a/modules/2_pvs_prepare/outputs.tf +++ b/modules/2_pvs_prepare/outputs.tf @@ -23,11 +23,6 @@ output "powervs_dhcp_network_id" { value = var.override_network_name != "" ? module.existing_network[0].powervs_dhcp_network_id : module.network[0].powervs_dhcp_network_id } -output "powervs_dhcp_network_name" { - depends_on = [module.network] - value = var.override_network_name != "" ? var.override_network_name : module.network[0].powervs_dhcp_network_name -} - output "rhcos_image_id" { depends_on = [module.images] value = module.images.rhcos_image_id diff --git a/modules/2_pvs_prepare/pvs_prepare.tf b/modules/2_pvs_prepare/pvs_prepare.tf index 3d65ef4..f411a3c 100644 --- a/modules/2_pvs_prepare/pvs_prepare.tf +++ b/modules/2_pvs_prepare/pvs_prepare.tf @@ -78,7 +78,6 @@ module "bastion" { bastion_public_network_name = var.override_network_name != "" ? module.existing_network[0].bastion_public_network_name : module.network[0].bastion_public_network_name bastion_public_network_cidr = var.override_network_name != "" ? module.existing_network[0].bastion_public_network_cidr : module.network[0].bastion_public_network_cidr powervs_network_id = var.override_network_name != "" ? module.existing_network[0].powervs_dhcp_network_id : module.network[0].powervs_dhcp_network_id - powervs_network_name = var.override_network_name != "" ? module.existing_network[0].powervs_dhcp_network_name : module.network[0].powervs_dhcp_network_name powervs_network_cidr = var.powervs_machine_cidr private_key_file = var.private_key_file public_key = module.keys.pvs_pubkey_name diff --git a/modules/4_pvs_support/pvs_support.tf b/modules/4_pvs_support/pvs_support.tf index 8a77bb1..cd62332 100644 --- a/modules/4_pvs_support/pvs_support.tf +++ b/modules/4_pvs_support/pvs_support.tf @@ -134,7 +134,6 @@ resource "null_resource" "config_login" { provisioner "remote-exec" { inline = [<