From b21d3c89a848149116d12bd106f770e1f06cd927 Mon Sep 17 00:00:00 2001 From: Veerabhadrarao Damisetti Date: Fri, 15 Dec 2023 15:43:23 +0530 Subject: [PATCH] Enhancement: Support for zVM compute nodes to Hosted Control Plane - vswitch FCP/DASD (#227) Enhancement: Support for zVM compute nodes to Hosted Control Plane - vswitch FCP/DASD - Code changes for supporting zVM for Hosted Control Plane - Supported network type : vswitch - Supported disk types: FCP/DASD - Updated the documentation for the same --------- Signed-off-by: root Co-authored-by: root --- docs/run-the-playbooks-for-hypershift.md | 7 +- docs/set-variables-group-vars.md | 11 ++- .../default/group_vars/all.yaml.template | 68 ++++++++++++++++++- ..._agents_and_wait_for_install_complete.yaml | 13 ++++ playbooks/create_hosted_cluster.yaml | 12 +++- .../tasks/main.yaml | 1 + .../boot_zvm_nodes_hypershift/tasks/main.yaml | 47 +++++++++++++ .../templates/boot_nodes.py | 40 +++++++++++ .../tasks/main.yaml | 20 ++++-- .../templates/inventory_template.j2 | 4 +- .../templates/ssh-key.exp.j2 | 6 ++ .../tasks/main.yaml | 18 +++++ .../tasks/main.yaml | 12 ++-- .../tasks/main.yaml | 23 +++++++ roles/install_tessia_baselib/tasks/main.yaml | 26 +++++++ .../tasks/main.yaml | 7 +- .../tasks/main.yaml | 29 ++++---- .../templates/parm-file.parm.j2 | 1 + 18 files changed, 305 insertions(+), 40 deletions(-) create mode 100644 roles/boot_zvm_nodes_hypershift/tasks/main.yaml create mode 100644 roles/boot_zvm_nodes_hypershift/templates/boot_nodes.py create mode 100644 roles/install_tessia_baselib/tasks/main.yaml create mode 100644 roles/setup_for_agents_hypershift/templates/parm-file.parm.j2 diff --git a/docs/run-the-playbooks-for-hypershift.md b/docs/run-the-playbooks-for-hypershift.md index 3a108bf1..f8e46397 100644 --- a/docs/run-the-playbooks-for-hypershift.md +++ b/docs/run-the-playbooks-for-hypershift.md @@ -1,13 +1,14 @@ # Run the Playbooks ## Prerequisites * Running OCP Cluster ( Management Cluster ) -* KVM host with root user access or user with sudo privileges +* KVM host with root user access or user with sudo privileges if compute nodes are KVM. +* zvm host ( bastion ) and nodes if compute nodes are zVM. ### Network Prerequisites * DNS entry to resolve api.${cluster}.${domain} , api-int.${cluster}.${domain} , *apps.${cluster}.${domain} to a load balancer deployed to redirect incoming traffic to the ingresses pod ( Bastion ). * If using dynamic IP for agents, make sure you have entries in DHCP Server for macaddresses you are using in installation to map to IPv4 addresses and along with this DHCP server should make your IPs to use nameserver which you have configured. ## Note: -* As of now we are supporting only macvtap for hypershift Agent based installation. +* As of now we are supporting only macvtap for hypershift Agent based installation for KVM compute nodes. ## Step-1: Setup Ansible Vault for Management Cluster Credentials ### Overview @@ -36,7 +37,7 @@ ansible-vault edit playbooks/secrets.yaml ## Step-2: Initial Setup for Hypershift * Navigate to the [root folder of the cloned Git repository](https://github.com/IBM/Ansible-OpenShift-Provisioning) in your terminal (`ls` should show [ansible.cfg](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/ansible.cfg)). -* Update all the variables in Section-16 ( Hypershift ) and Section-3 ( File Server : ip , protocol and iso_mount_dir ) in [all.yaml](https://github.com/veera-damisetti/Ansible-OpenShift-Provisioning/blob/main/inventories/default/group_vars/all.yaml.template) before running the playbooks. +* Update variables as per the compute node type (zKVM /zVM) in Section-16 ( Hypershift ) and Section-3 ( File Server : ip , protocol and iso_mount_dir ) in [all.yaml](https://github.com/veera-damisetti/Ansible-OpenShift-Provisioning/blob/main/inventories/default/group_vars/all.yaml.template) before running the playbooks. * First playbook to be run is setup_for_hypershift.yaml which will create inventory file for hypershift and will add ssh key to the kvm host. * Run this shell command: diff --git a/docs/set-variables-group-vars.md b/docs/set-variables-group-vars.md index 9464e6db..b8400ace 100644 --- a/docs/set-variables-group-vars.md +++ b/docs/set-variables-group-vars.md @@ -198,6 +198,7 @@ ## 16 - Hypershift ( Optional ) **Variable Name** | **Description** | **Example** :--- | :--- | :--- +**hypershift.compute_node_type** | Select the compute node type for HCP , either zKVM or zVM | zvm **hypershift.kvm_host** | IPv4 address of KVM host for hypershift
(kvm host where you want to run all oc commands and create VMs)| 192.168.10.1 **hypershift.kvm_host_user** | User for KVM host | root **hypershift.bastion_hypershift** | IPv4 address for bastion of Hosted Cluster | 192.168.10.1 @@ -232,15 +233,23 @@ **hypershift.asc.iso_url** | Give URL for ISO image | https://...
...s390x-live.s390x.iso **hypershift.asc.root_fs_url** | Give URL for rootfs image | https://...
... live-rootfs.s390x.img **hypershift.asc.mce_namespace** | Namespace where your Multicluster Engine Operator is installed.
Recommended Namespace for MCE is 'multicluster-engine'.
Change this only if MCE is installed in other namespace. | multicluster-engine +**hypershift.agents_parms.agents_count** | Number of agents for the hosted cluster
The same number of compute nodes will be attached to Hosted Cotrol Plane | 2 **hypershift.agents_parms.static_ip_parms.static_ip** | true or false - use static IPs for agents using NMState | true **hypershift.agents_parms.static_ip_parms.ip** | List of IP addresses for agents | 192.168.10.1 **hypershift.agents_parms.static_ip_parms.interface** | Interface for agents for configuring NMStateConfig | eth0 -**hypershift.agents_parms.agents_count** | Number of agents for the hosted cluster
The same number of compute nodes will be attached to Hosted Cotrol Plane | 2 **hypershift.agents_parms.agent_mac_addr** | List of macaddresses for the agents.
Configure in DHCP if you are using dynamic IPs for Agents. | - 52:54:00:ba:d3:f7 **hypershift.agents_parms.disk_size** | Disk size for agents | 100G **hypershift.agents_parms.ram** | RAM for agents | 16384 **hypershift.agents_parms.vcpus** | vCPUs for agents | 4 **hypershift.agents_parms.nameserver** | Nameserver to be used for agents | 192.168.10.1 +**hypershift.agents_parms.zvm_parameters.network_mode** | Network mode for zvm nodes
Supported modes: vswitch | vswitch +**hypershift.agents_parms.zvm_parameters.disk_type** | Disk type for zvm nodes
Supported disk types: fcp, dasd | dasd +**hypershift.agents_parms.zvm_parameters.vcpus** | CPUs for each zvm node | 4 +**hypershift.agents_parms.zvm_parameters.memory** | RAM for each zvm node | 16384 +**hypershift.agents_parms.zvm_parameters.nameserver** | Nameserver for compute nodes | 192.168.10.1 +**hypershift.agents_parms.zvm_parameters.subnetmask** | Subnet mask for compute nodes | 255.255.255.0 +**hypershift.agents_parms.zvm_parameters.gateway** | Gateway for compute nodes | 192.168.10.1 +**hypershift.agents_parms.zvm_parameters.nodes** | Set of parameters for zvm nodes
Give the details of each zvm node here | ## 17 - (Optional) Disconnected cluster setup **Variable Name** | **Description** | **Example** diff --git a/inventories/default/group_vars/all.yaml.template b/inventories/default/group_vars/all.yaml.template index 5ee49737..9b320f07 100644 --- a/inventories/default/group_vars/all.yaml.template +++ b/inventories/default/group_vars/all.yaml.template @@ -175,6 +175,7 @@ env: kvm: [ libguestfs, libvirt-client, libvirt-daemon-config-network, libvirt-daemon-kvm, cockpit-machines, libvirt-devel, virt-top, qemu-kvm, python3-lxml, cockpit, lvm2 ] bastion: [ haproxy, httpd, bind, bind-utils, expect, firewalld, mod_ssl, python3-policycoreutils, rsync ] hypershift: [ make, jq, git, virt-install ] + zvm: [ git, python3-pip, python3-devel, openssl-devel, rust, cargo, libffi-devel, wget, tar, jq, gcc, make, x3270, python39 ] # Section 12 - OpenShift Settings install_config: @@ -239,13 +240,15 @@ rhcos_live_rootfs: "rhcos-4.12.3-s390x-live-rootfs.s390x.img" # Section 16 - Hypershift ( Optional ) hypershift: + compute_node_type: # KVM or zVM + kvm_host: kvm_host_user: bastion_hypershift: bastion_hypershift_user: - create_bastion: true - networking_device: enc1100 + create_bastion: true + networking_device: enc1100 # Following set of parameters required only if create_bastion is true gateway: bastion_parms: @@ -257,6 +260,9 @@ hypershift: gateway: subnet_mask: + + # Parameters for oc login + mgmt_cluster_nameserver: oc_url: @@ -291,13 +297,16 @@ hypershift: mce_namespace: multicluster-engine # This is the Recommended Namespace for Multicluster Engine operator agents_parms: + agents_count: + + # KVM specific parameters - KVM on s390x + static_ip_parms: static_ip: true ip: # Required only if static_ip is true #- #- interface: eth0 - agents_count: # If you want to use specific mac addresses, provide them here agent_mac_addr: #- @@ -305,6 +314,59 @@ hypershift: ram: 16384 vcpus: 4 nameserver: + + + + # zVM specific parameters - s390x + + zvm_parameters: + network_mode: vswitch # Supported modes: vswitch + disk_type: # Supported modes: fcp , dasd + vcpus: 4 + memory: 16384 + nameserver: + subnetmask: + gateway: + + nodes: + - name: + host: + user: + password: + osa: + ifname: encbdf0 + id: 0.0.bdf0,0.0.bdf1,0.0.bdf2 + ip: + + # Required if disk_type is dasd + dasd: + disk_id: + + # Required if disk_type is fcp + lun: + - id: + paths: + - wwpn: + fcp: + + - name: + host: + user: + password: + osa: + ifname: encbdf0 + id: 0.0.bdf0,0.0.bdf1,0.0.bdf2 + ip: + + dasd: + disk_id: + + lun: + - id: + paths: + - wwpn: + fcp: + # Section 17 - (Optional) Setup disconnected clusters # Warning: currently, the oc-mirror plugin is officially downloadable to amd64 only. diff --git a/playbooks/create_agents_and_wait_for_install_complete.yaml b/playbooks/create_agents_and_wait_for_install_complete.yaml index c3a858ec..f9406bad 100644 --- a/playbooks/create_agents_and_wait_for_install_complete.yaml +++ b/playbooks/create_agents_and_wait_for_install_complete.yaml @@ -4,6 +4,19 @@ roles: - boot_agents_hypershift +- name: Boot zvm nodes + hosts: bastion_hypershift + tasks: + - name: Install tessia baselib + import_role: + name: install_tessia_baselib + when: hypershift.compute_node_type | lower == 'zvm' + + - name: Start zvm nodes + include_tasks: ../roles/boot_zvm_nodes_hypershift/tasks/main.yaml + loop: "{{ range(hypershift.agents_parms.agents_count | int) | list }}" + when: hypershift.compute_node_type | lower == 'zvm' + - name: Scale Nodepool & Configure Haproxy on bastion for hosted workers hosts: bastion_hypershift roles: diff --git a/playbooks/create_hosted_cluster.yaml b/playbooks/create_hosted_cluster.yaml index a16b75ba..36a05527 100644 --- a/playbooks/create_hosted_cluster.yaml +++ b/playbooks/create_hosted_cluster.yaml @@ -8,9 +8,12 @@ - name: Setting host set_fact: host: 'kvm_host_hypershift' + when: hypershift.compute_node_type | lower != 'zvm' + - name: Install Prereqs on host import_role: name: install_prerequisites_host_hypershift + when: hypershift.compute_node_type | lower != 'zvm' - name: Create macvtap network hosts: kvm_host_hypershift @@ -20,9 +23,12 @@ set_fact: networking: device1: "{{ hypershift.networking_device }}" + when: hypershift.compute_node_type | lower != 'zvm' + - name: Creating macvtap network import_role: name: macvtap + when: hypershift.compute_node_type | lower != 'zvm' - name: Create bastion for hypershift hosts: kvm_host_hypershift @@ -33,7 +39,9 @@ - name: Creating Bastion include_role: name: create_bastion_hypershift - when: hypershift.create_bastion == true + when: + - hypershift.create_bastion == true + - hypershift.compute_node_type | lower != 'zvm' - name: Configuring Bastion hosts: bastion_hypershift @@ -67,7 +75,7 @@ - create_hcp_InfraEnv_hypershift - name: Download Required images for booting Agents - hosts: kvm_host_hypershift + hosts: "{{ 'kvm_host_hypershift' if 'kvm_host_hypershift' in groups['all'] else 'bastion_hypershift' }}" become: true roles: - setup_for_agents_hypershift diff --git a/roles/add_hc_workers_to_haproxy_hypershift/tasks/main.yaml b/roles/add_hc_workers_to_haproxy_hypershift/tasks/main.yaml index 152c26b9..2754d647 100644 --- a/roles/add_hc_workers_to_haproxy_hypershift/tasks/main.yaml +++ b/roles/add_hc_workers_to_haproxy_hypershift/tasks/main.yaml @@ -12,6 +12,7 @@ mode tcp bind {{ hypershift.bastion_hypershift }}:443 bind {{ hypershift.bastion_hypershift }}:80 + marker: "# console" - name: Add Hosted Cluster Worker IPs to Haproxy lineinfile: diff --git a/roles/boot_zvm_nodes_hypershift/tasks/main.yaml b/roles/boot_zvm_nodes_hypershift/tasks/main.yaml new file mode 100644 index 00000000..b8bd60e3 --- /dev/null +++ b/roles/boot_zvm_nodes_hypershift/tasks/main.yaml @@ -0,0 +1,47 @@ +--- +- name: Creating agents + block: + - name: Getting script for booting + template: + src: ../templates/boot_nodes.py + dest: /root/ansible_workdir/boot_nodes.py + + - name: Debug + debug: + msg: "Booting agent-{{ item }}" + + - name: Booting zvm node + shell: | + python /root/ansible_workdir/boot_nodes.py \ + --zvmname "{{ hypershift.agents_parms.zvm_parameters.nodes[item].name }}" \ + --zvmhost "{{ hypershift.agents_parms.zvm_parameters.nodes[item].host }}" \ + --zvmuser "{{ hypershift.agents_parms.zvm_parameters.nodes[item].user }}" \ + --zvmpass "{{ hypershift.agents_parms.zvm_parameters.nodes[item].password }}" \ + --cpu "{{ hypershift.agents_parms.zvm_parameters.vcpus }}" \ + --memory "{{ hypershift.agents_parms.zvm_parameters.memory }}" \ + --kernel 'file:///var/lib/libvirt/images/pxeboot/kernel.img' \ + --initrd 'file:///var/lib/libvirt/images/pxeboot/initrd.img' \ + --cmdline "$(cat /root/ansible_workdir/agent-{{ item }}.parm)" + + - name: Attaching dasd disk + shell: vmcp attach {{ hypershift.agents_parms.zvm_parameters.nodes[item].dasd.disk_id }} to {{ hypershift.agents_parms.zvm_parameters.nodes[item].name }} + when: "{{ hypershift.agents_parms.zvm_parameters.disk_type | lower == 'dasd' }}" + + - name: Attaching fcp disks + shell: vmcp attach {{ hypershift.agents_parms.zvm_parameters.nodes[item].lun[0].paths[0].fcp.split('.')[-1] }} to {{ hypershift.agents_parms.zvm_parameters.nodes[item].name }} + when: "{{ hypershift.agents_parms.zvm_parameters.disk_type | lower == 'fcp' }}" + + - name: Wait for the agent to come up + shell: oc get agents -n "{{ hypershift.hcp.clusters_namespace }}-{{ hypershift.hcp.hosted_cluster_name }}" --no-headers -o custom-columns=NAME:.metadata.name,APPROVED:.spec.approved | awk '$2 == "false"' | wc -l + register: agent_count + until: agent_count.stdout | int == 1 + retries: 40 + delay: 10 + + - name: Get the name of agent + shell: oc get agents -n {{ hypershift.hcp.clusters_namespace }}-{{ hypershift.hcp.hosted_cluster_name }} --no-headers -o custom-columns=NAME:.metadata.name,APPROVED:.spec.approved | awk '$2 == "false"' + register: agent_name + + - name: Approve agents + shell: oc -n {{ hypershift.hcp.clusters_namespace }}-{{ hypershift.hcp.hosted_cluster_name }} patch agent {{ agent_name.stdout.split(' ')[0] }} -p '{"spec":{"approved":true,"hostname":"compute-{{item}}.{{ hypershift.hcp.hosted_cluster_name }}.{{ hypershift.hcp.basedomain }}","installerArgs":"[\"--append-karg\",\"rd.neednet=1\", \"--append-karg\", \"ip={{ hypershift.agents_parms.zvm_parameters.nodes[item].osa.ip }}::{{ hypershift.agents_parms.zvm_parameters.gateway }}:{{ hypershift.agents_parms.zvm_parameters.subnetmask }}:compute-{{ item }}.{{ hypershift.hcp.hosted_cluster_name }}.{{ hypershift.hcp.basedomain }}:{{ hypershift.agents_parms.zvm_parameters.nodes[item].osa.ifname }}:none\", \"--append-karg\", \"nameserver={{ hypershift.agents_parms.zvm_parameters.nameserver }}\", \"--append-karg\",\"rd.znet=qeth,{{ hypershift.agents_parms.zvm_parameters.nodes[item].osa.id }},layer2=1\",\"--append-karg\", {% if hypershift.agents_parms.zvm_parameters.disk_type | lower != 'fcp' %}\"rd.dasd=0.0.{{ hypershift.agents_parms.zvm_parameters.nodes[item].dasd.disk_id }}\"{% else %}\"rd.zfcp={{ hypershift.agents_parms.zvm_parameters.nodes[item].lun[0].paths[0].fcp}},{{ hypershift.agents_parms.zvm_parameters.nodes[item].lun[0].paths[0].wwpn }},{{ hypershift.agents_parms.zvm_parameters.nodes[item].lun[0].id }}\"{% endif %}]"}}' --type merge + diff --git a/roles/boot_zvm_nodes_hypershift/templates/boot_nodes.py b/roles/boot_zvm_nodes_hypershift/templates/boot_nodes.py new file mode 100644 index 00000000..71ef1a32 --- /dev/null +++ b/roles/boot_zvm_nodes_hypershift/templates/boot_nodes.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 +from tessia.baselib.hypervisors.zvm.zvm import HypervisorZvm +import argparse + +parser = argparse.ArgumentParser(description="Get the environment.") + +parser.add_argument("--zvmname", type=str, help="z/VM Hypervisor name", required=True) +parser.add_argument("--zvmhost", type=str, help="z/VM Hostname or IP", required=True) +parser.add_argument("--zvmuser", type=str, help="z/VM user", required=True) +parser.add_argument("--zvmpass", type=str, help="z/VM user password", required=True) +parser.add_argument("--cpu", type=int, help="number of Guest CPUs", required=True) +parser.add_argument("--memory", type=int, help="Guest memory in MB", required=True) +parser.add_argument("--kernel", type=str, help="kernel URI", required=True, default='') +parser.add_argument("--cmdline", type=str, help="kernel cmdline", required=True, default='') +parser.add_argument("--initrd", type=str, help="Initrd URI", required=True, default='') + +args = parser.parse_args() + +parameters = { + 'transfer-buffer-size': 8000 + } + +guest_parameters = { +"boot_method": "network", +"storage_volumes" : [], +"ifaces" : [], +"netboot": { + "cmdline": args.cmdline, + "kernel_uri": args.kernel, + "initrd_uri": args.initrd, + } +} + +zvm = HypervisorZvm(args.zvmname,args.zvmhost, args.zvmuser, args.zvmpass, parameters) +zvm.login() +print("Logged in ") +zvm.start(args.zvmuser, args.cpu, args.memory, guest_parameters) +print("VM Started") +zvm.logoff() +print("Logged out") diff --git a/roles/create_hcp_InfraEnv_hypershift/tasks/main.yaml b/roles/create_hcp_InfraEnv_hypershift/tasks/main.yaml index 7d0bf71f..da96b391 100644 --- a/roles/create_hcp_InfraEnv_hypershift/tasks/main.yaml +++ b/roles/create_hcp_InfraEnv_hypershift/tasks/main.yaml @@ -98,29 +98,39 @@ - name: Creating list of mac addresses set_fact: agent_mac_addr: [] - when: hypershift.agents_parms.static_ip_parms.static_ip == true + when: + - hypershift.agents_parms.static_ip_parms.static_ip == true + - hypershift.compute_node_type | lower != 'zvm' - name: Getting mac addresss for agents set_fact: agent_mac_addr: "{{ hypershift.agents_parms.agent_mac_addr }}" - when: ( hypershift.agents_parms.static_ip_parms.static_ip == true ) and ( hypershift.agents_parms.agent_mac_addr != None ) + when: + - ( hypershift.agents_parms.static_ip_parms.static_ip == true ) and ( hypershift.agents_parms.agent_mac_addr != None ) + - hypershift.compute_node_type | lower != 'zvm' - name: Generate mac addresses for agents set_fact: agent_mac_addr: "{{ agent_mac_addr + ['52:54:00' | community.general.random_mac] }}" - when: ( hypershift.agents_parms.static_ip_parms.static_ip == true ) and ( hypershift.agents_parms.agent_mac_addr == None ) + when: + - ( hypershift.agents_parms.static_ip_parms.static_ip == true ) and ( hypershift.agents_parms.agent_mac_addr == None ) + - hypershift.compute_node_type | lower != 'zvm' loop: "{{ range(hypershift.agents_parms.agents_count|int) | list }}" - name: Create NMState Configs template: src: nmStateConfig.yaml.j2 dest: /root/ansible_workdir/nmStateConfig-agent-{{ item }}.yaml - when: hypershift.agents_parms.static_ip_parms.static_ip == true + when: + - hypershift.agents_parms.static_ip_parms.static_ip == true + - hypershift.compute_node_type | lower != 'zvm' loop: "{{ range(hypershift.agents_parms.agents_count|int) | list }}" - name: Deploy NMState Configs command: oc apply -f /root/ansible_workdir/nmStateConfig-agent-{{ item }}.yaml - when: hypershift.agents_parms.static_ip_parms.static_ip == true + when: + - hypershift.agents_parms.static_ip_parms.static_ip == true + - hypershift.compute_node_type | lower != 'zvm' loop: "{{ range(hypershift.agents_parms.agents_count|int) | list }}" - name: Wait for ISO to generate in InfraEnv diff --git a/roles/create_inventory_setup_hypershift/templates/inventory_template.j2 b/roles/create_inventory_setup_hypershift/templates/inventory_template.j2 index 615034c4..48689443 100644 --- a/roles/create_inventory_setup_hypershift/templates/inventory_template.j2 +++ b/roles/create_inventory_setup_hypershift/templates/inventory_template.j2 @@ -1,7 +1,7 @@ +{% if hypershift.compute_node_type | lower != 'zvm' %} [kvm_host_hypershift] kvm_host_hypershift ansible_host={{ hypershift.kvm_host }} ansible_user={{ hypershift.kvm_host_user }} ansible_become_password={{ kvm_host_password }} - - +{% endif %} [bastion_hypershift] bastion_hypershift ansible_host={{ hypershift.bastion_hypershift }} ansible_user={{ hypershift.bastion_hypershift_user }} diff --git a/roles/create_inventory_setup_hypershift/templates/ssh-key.exp.j2 b/roles/create_inventory_setup_hypershift/templates/ssh-key.exp.j2 index 2d375510..46e8026a 100644 --- a/roles/create_inventory_setup_hypershift/templates/ssh-key.exp.j2 +++ b/roles/create_inventory_setup_hypershift/templates/ssh-key.exp.j2 @@ -1,6 +1,12 @@ #!/usr/bin/expect +{% if hypershift.compute_node_type | lower != 'zvm' %} set password "{{ kvm_host_password }}" spawn ssh-copy-id -i {{ lookup('env', 'HOME') }}/.ssh/{{ env.ansible_key_name }} {{ hypershift.kvm_host_user }}@{{ hypershift.kvm_host }} expect "{{ hypershift.kvm_host_user }}@{{ hypershift.kvm_host }}'s password:" +{% else %} +set password "{{ bastion_root_pw }}" +spawn ssh-copy-id -i {{ lookup('env', 'HOME') }}/.ssh/{{ env.ansible_key_name }} {{ hypershift.bastion_hypershift_user }}@{{ hypershift.bastion_hypershift }} +expect "{{ hypershift.bastion_hypershift_user }}@{{ hypershift.bastion_hypershift }}'s password:" +{% endif %} send "$password\r" expect eof diff --git a/roles/delete_resources_bastion_hypershift/tasks/main.yaml b/roles/delete_resources_bastion_hypershift/tasks/main.yaml index dab8d15f..ff6f62b9 100644 --- a/roles/delete_resources_bastion_hypershift/tasks/main.yaml +++ b/roles/delete_resources_bastion_hypershift/tasks/main.yaml @@ -134,3 +134,21 @@ name: "{{ hypershift.asc.mce_namespace }}" state: absent when: hypershift.mce.delete == true + +- name: Delete initrd.img + file: + path: /var/lib/libvirt/images/pxeboot/initrd.img + state: absent + when: hypershift.compute_node_type | lower == 'zvm' + +- name: Delete kernel.img + file: + path: /var/lib/libvirt/images/pxeboot/kernel.img + state: absent + when: hypershift.compute_node_type | lower == 'zvm' + +- name: Delete rootfs.img + file: + path: /var/www/html/rootfs.img + state: absent + when: hypershift.compute_node_type | lower == 'zvm' diff --git a/roles/download_rootfs_hypershift/tasks/main.yaml b/roles/download_rootfs_hypershift/tasks/main.yaml index 858bd664..17e5d552 100644 --- a/roles/download_rootfs_hypershift/tasks/main.yaml +++ b/roles/download_rootfs_hypershift/tasks/main.yaml @@ -38,17 +38,13 @@ - internal - public -- name: Download ipxe script - shell: curl -k -L $(oc -n {{ hypershift.hcp.clusters_namespace }}-{{ hypershift.hcp.hosted_cluster_name }} get InfraEnv {{ hypershift.hcp.hosted_cluster_name }} -ojsonpath="{.status.bootArtifacts.ipxeScript}") - register: ipxe_script - -- name: Get URL for rootfs - set_fact: - rootfs_url: "{{ ipxe_script.stdout_lines[2].split(' ')[3].split('url=')[1] }}" +- name: Get URL for rootfs.img + shell: oc -n "{{ hypershift.hcp.clusters_namespace }}-{{ hypershift.hcp.hosted_cluster_name }}" get InfraEnv "{{ hypershift.hcp.hosted_cluster_name }}" -ojsonpath="{.status.bootArtifacts.rootfs}" + register: rootfs - name: Download rootfs.img get_url: - url: "{{ rootfs_url }}" + url: "{{ rootfs.stdout }}" dest: /var/www/html/rootfs.img validate_certs: false diff --git a/roles/install_prereqs_bastion_hypershift/tasks/main.yaml b/roles/install_prereqs_bastion_hypershift/tasks/main.yaml index a2e67a6a..72063056 100644 --- a/roles/install_prereqs_bastion_hypershift/tasks/main.yaml +++ b/roles/install_prereqs_bastion_hypershift/tasks/main.yaml @@ -48,6 +48,29 @@ path: /etc/haproxy/haproxy.cfg line: " server worker-{{item}} {{ mgmt_workers.stdout_lines[item]}}" loop: "{{ range(mgmt_workers_count.stdout|int) | list }}" + +- name: Add machine-config-server details to Haproxy + blockinfile: + path: /etc/haproxy/haproxy.cfg + block: | + frontend {{ hypershift.hcp.hosted_cluster_name }}-machine-config-server + mode tcp + option tcplog + bind api.{{ hypershift.hcp.hosted_cluster_name }}.{{ hypershift.hcp.basedomain }}:22623 + default_backend {{ hypershift.hcp.hosted_cluster_name }}-machine-config-server + + backend {{ hypershift.hcp.hosted_cluster_name }}-machine-config-server + mode tcp + balance source + marker: "# machine-config-server" + when: hypershift.compute_node_type | lower == 'zvm' + +- name: Add Management Cluster Worker IPs to Haproxy + lineinfile: + path: /etc/haproxy/haproxy.cfg + line: " server worker{{item}} {{ mgmt_workers.stdout_lines[item]}}" + loop: "{{ range(mgmt_workers_count.stdout|int) | list }}" + when: hypershift.compute_node_type | lower == 'zvm' - name: allow http traffic firewalld: diff --git a/roles/install_tessia_baselib/tasks/main.yaml b/roles/install_tessia_baselib/tasks/main.yaml new file mode 100644 index 00000000..adc85004 --- /dev/null +++ b/roles/install_tessia_baselib/tasks/main.yaml @@ -0,0 +1,26 @@ +--- +- name: Install Packages on zvm host + yum: + name: + - "{{ item }}" + state: present + loop: "{{ env.pkgs.zvm }}" + +- name: Install setuptools_rust using pip + pip: + name: setuptools_rust + executable: pip3 + +- name: Install cryptography using pip + pip: + name: cryptography + executable: pip3 + +- name: Clone tessia-baselib repository + git: + repo: https://gitlab.com/tessia-project/tessia-baselib.git + dest: /root/ansible_workdir/tessia-baselib + +- name: Change directory to tessia-baselib and install dependencies using pip3 + shell: cd /root/ansible_workdir/tessia-baselib ; pip3 install -U . + diff --git a/roles/scale_nodepool_and_wait_for_workers_hypershift/tasks/main.yaml b/roles/scale_nodepool_and_wait_for_workers_hypershift/tasks/main.yaml index 86aa698f..be5dce23 100644 --- a/roles/scale_nodepool_and_wait_for_workers_hypershift/tasks/main.yaml +++ b/roles/scale_nodepool_and_wait_for_workers_hypershift/tasks/main.yaml @@ -8,23 +8,28 @@ until: agents.resources | length == {{ hypershift.agents_parms.agents_count }} retries: 30 delay: 10 + when: hypershift.compute_node_type | lower != 'zvm' - name: Get agent names command: oc get agents -n {{ hypershift.hcp.clusters_namespace }}-{{ hypershift.hcp.hosted_cluster_name }} --no-headers register: agents_info + when: hypershift.compute_node_type | lower != 'zvm' - name: Create List for agents set_fact: agents: [] + when: hypershift.compute_node_type | lower != 'zvm' - name: Get a List of agents set_fact: agents: "{{ agents + [agents_info.stdout.split('\n')[item].split(' ')[0]] }}" loop: "{{ range(hypershift.agents_parms.agents_count|int) | list }}" - + when: hypershift.compute_node_type | lower != 'zvm' + - name: Patch Agents shell: oc -n {{ hypershift.hcp.clusters_namespace }}-{{ hypershift.hcp.hosted_cluster_name }} patch agent {{ agents[item] }} -p '{"spec":{"installation_disk_id":"/dev/vda","approved":true,"hostname":"worker-{{item}}.{{ hypershift.hcp.hosted_cluster_name }}.{{ hypershift.hcp.basedomain }}"}}' --type merge loop: "{{ range(hypershift.agents_parms.agents_count|int) | list }}" + when: hypershift.compute_node_type | lower != 'zvm' - name: Scale Nodepool command: oc -n {{ hypershift.hcp.clusters_namespace }} scale nodepool {{ hypershift.hcp.hosted_cluster_name }} --replicas {{ hypershift.agents_parms.agents_count }} diff --git a/roles/setup_for_agents_hypershift/tasks/main.yaml b/roles/setup_for_agents_hypershift/tasks/main.yaml index a2c65200..bcde6dea 100644 --- a/roles/setup_for_agents_hypershift/tasks/main.yaml +++ b/roles/setup_for_agents_hypershift/tasks/main.yaml @@ -1,34 +1,33 @@ --- - -- name: Download ipxe script - shell: curl -k -L $(oc -n {{ hypershift.hcp.clusters_namespace }}-{{ hypershift.hcp.hosted_cluster_name }} get InfraEnv {{ hypershift.hcp.hosted_cluster_name }} -ojsonpath="{.status.bootArtifacts.ipxeScript}") - register: ipxe_script - - name: Create Installation directory file: path: /var/lib/libvirt/images/pxeboot state: directory mode: '0755' -- name: Get URL for initrd - set_fact: - initrd_url: "{{ ipxe_script.stdout_lines[1].split(' ')[3] }}" +- name: Get URL for initrd.img + shell: oc -n "{{ hypershift.hcp.clusters_namespace }}-{{ hypershift.hcp.hosted_cluster_name }}" get InfraEnv "{{ hypershift.hcp.hosted_cluster_name }}" -ojsonpath="{.status.bootArtifacts.initrd}" + register: initrd -- name: Download initrd.img +- name: Download initrd.img get_url: - url: "{{ initrd_url }}" + url: "{{ initrd.stdout }}" dest: /var/lib/libvirt/images/pxeboot/initrd.img validate_certs: false - name: Get URL for kernel.img - set_fact: - kernel_url: "{{ ipxe_script.stdout_lines[2].split(' ')[1] }}" + shell: oc -n "{{ hypershift.hcp.clusters_namespace }}-{{ hypershift.hcp.hosted_cluster_name }}" get InfraEnv "{{ hypershift.hcp.hosted_cluster_name }}" -ojsonpath="{.status.bootArtifacts.kernel}" + register: kernel - name: Download kernel.img get_url: - url: "{{ kernel_url }}" + url: "{{ kernel.stdout }}" dest: /var/lib/libvirt/images/pxeboot/kernel.img validate_certs: false - - +- name: Generate param files + template: + src: parm-file.parm.j2 + dest: /root/ansible_workdir/agent-{{ item }}.parm + when: hypershift.compute_node_type | lower == 'zvm' + loop: "{{ range(hypershift.agents_parms.agents_count | int) | list }}" diff --git a/roles/setup_for_agents_hypershift/templates/parm-file.parm.j2 b/roles/setup_for_agents_hypershift/templates/parm-file.parm.j2 new file mode 100644 index 00000000..e6cde3ab --- /dev/null +++ b/roles/setup_for_agents_hypershift/templates/parm-file.parm.j2 @@ -0,0 +1 @@ +rd.neednet=1 console=ttysclp0 coreos.live.rootfs_url=http://{{ hypershift.bastion_hypershift }}:8080/rootfs.img ip={{ hypershift.agents_parms.zvm_parameters.nodes[item].osa.ip }}::{{ hypershift.agents_parms.zvm_parameters.gateway }}:{{ hypershift.agents_parms.zvm_parameters.subnetmask }}::{{ hypershift.agents_parms.zvm_parameters.nodes[item].osa.ifname }}:none nameserver={{ hypershift.agents_parms.zvm_parameters.nameserver }} zfcp.allow_lun_scan=0 rd.znet=qeth,{{ hypershift.agents_parms.zvm_parameters.nodes[item].osa.id }},layer2=1 {% if hypershift.agents_parms.zvm_parameters.disk_type | lower != 'fcp' %}rd.dasd=0.0.{{ hypershift.agents_parms.zvm_parameters.nodes[item].dasd.disk_id }}{% else %}rd.zfcp={{ hypershift.agents_parms.zvm_parameters.nodes[item].lun[0].paths[0].fcp}},{{ hypershift.agents_parms.zvm_parameters.nodes[item].lun[0].paths[0].wwpn }},{{ hypershift.agents_parms.zvm_parameters.nodes[item].lun[0].id }} {% endif %} random.trust_cpu=on rd.luks.options=discard ignition.firstboot ignition.platform.id=metal console=tty1 console=ttyS1,115200n8 coreos.inst.persistent-kargs="console=tty1 console=ttyS1,115200n8"