From 2ac869eb33dad797d4b62e00ca9ef750396cc1f8 Mon Sep 17 00:00:00 2001 From: Itxaka Date: Fri, 31 Jan 2020 14:23:50 +0100 Subject: [PATCH 1/7] Enable tfstate compatibility with tf0.12 The output format of tf0.12 has changed in respect to tf0.11 which means that we no longer access the inventory at the same path. In preparation for tf0.12 compatibility, introduce a check for the tfstate version and return the proper path. --- ci/infra/testrunner/platforms/terraform.py | 10 ++++++++-- ci/infra/testrunner/platforms/vmware.py | 5 ++++- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/ci/infra/testrunner/platforms/terraform.py b/ci/infra/testrunner/platforms/terraform.py index 7baed51ad6..e44d6b441a 100644 --- a/ci/infra/testrunner/platforms/terraform.py +++ b/ci/infra/testrunner/platforms/terraform.py @@ -76,7 +76,10 @@ def _load_tfstate(self): def get_lb_ipaddr(self): self._load_tfstate() - return self.state["modules"][0]["outputs"]["ip_load_balancer"]["value"]["{}-lb".format(self.stack_name())] + if self.state["version"] == 3: + return self.state["modules"][0]["outputs"]["ip_load_balancer"]["value"]["{}-lb".format(self.stack_name())] + elif self.state["version"] == 4: + return self.state["outputs"]["ip_load_balancer"]["value"]["{}-lb".format(self.stack_name())] def get_num_nodes(self, role): return len(self.get_nodes_ipaddrs(role)) @@ -92,7 +95,10 @@ def get_nodes_ipaddrs(self, role): raise ValueError("Invalid role: {}".format(role)) role_key = "ip_" + role + "s" - return list(self.state["modules"][0]["outputs"][role_key]["value"].values()) + if self.state["version"] == 3: + return list(self.state["modules"][0]["outputs"][role_key]["value"].values()) + elif self.state["version"] == 4: + return list(self.state["outputs"][role_key]["value"].values()) @step def _fetch_terraform_output(self): diff --git a/ci/infra/testrunner/platforms/vmware.py b/ci/infra/testrunner/platforms/vmware.py index d5b78ba1ee..bcf54f8784 100644 --- a/ci/infra/testrunner/platforms/vmware.py +++ b/ci/infra/testrunner/platforms/vmware.py @@ -41,4 +41,7 @@ def _get_platform_logs(self): def get_lb_ipaddr(self): self._load_tfstate() - return self.state["modules"][0]["outputs"]["ip_load_balancer"]["value"]["{}-lb-0".format(self.stack_name())] \ No newline at end of file + if self.state["version"] == 3: + return self.state["modules"][0]["outputs"]["ip_load_balancer"]["value"]["{}-lb-0".format(self.stack_name())] + elif self.state["version"] == 4: + return self.state["outputs"]["ip_load_balancer"]["value"]["{}-lb-0".format(self.stack_name())] \ No newline at end of file From 6c9400c13b59edb39bcccb040e762303be8e32fd Mon Sep 17 00:00:00 2001 From: Itxaka Date: Mon, 3 Feb 2020 09:46:56 +0100 Subject: [PATCH 2/7] Change 0/1 into bool in tfvars for tf0.12 On tf0.12 having 0/1 as a bool value is no longer allowed, insted we need to use true/false This patch changes our CI and example terraform.tfvars values to sync with the new style --- ci/infra/openstack/terraform.tfvars.example | 6 +++--- ci/infra/openstack/terraform.tfvars.json.ci.example | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/ci/infra/openstack/terraform.tfvars.example b/ci/infra/openstack/terraform.tfvars.example index 9ec9edf10f..f6bdbca3ac 100644 --- a/ci/infra/openstack/terraform.tfvars.example +++ b/ci/infra/openstack/terraform.tfvars.example @@ -54,7 +54,7 @@ master_size = "" worker_size = "" # Attach persistent volumes to workers -workers_vol_enabled = 0 +workers_vol_enabled = false # Size of the worker volumes in GB workers_vol_size = 5 @@ -63,8 +63,8 @@ workers_vol_size = 5 # dnsdomain = "my.domain.com" dnsdomain = "" -# Set DNS Entry (0 is false, 1 is true) -dnsentry = 0 +# Set DNS Entry +dnsentry = false # define the repositories to use # EXAMPLE: diff --git a/ci/infra/openstack/terraform.tfvars.json.ci.example b/ci/infra/openstack/terraform.tfvars.json.ci.example index 6c974d40c5..a13aebb8dd 100644 --- a/ci/infra/openstack/terraform.tfvars.json.ci.example +++ b/ci/infra/openstack/terraform.tfvars.json.ci.example @@ -7,12 +7,12 @@ "stack_name": "testing", "subnet_cidr": "172.28.0.0/24", "dnsdomain": "testing.qa.caasp.suse.net", - "dnsentry": 0, + "dnsentry": false, "masters": 1, "master_size": "m1.medium", "workers": 2, "worker_size": "m1.medium", - "workers_vol_enabled": 0, + "workers_vol_enabled": false, "workers_vol_size": 5, "repositories": { "caasp_devel": "http://download.suse.de/ibs/Devel:/CaaSP:/4.0/SLE_15_SP1/", From 93ffafc8262f6a674c142931cfdbe720aaecff84 Mon Sep 17 00:00:00 2001 From: Jean-Philippe Evrard Date: Tue, 14 Jan 2020 10:17:34 +0100 Subject: [PATCH 3/7] Automatically upgraded tf files This was automatically generated using terraform 0.12upgrade CLI tool, provided with terraform 0.12, in the libvirt, openstack, and vmware folder. This will be similar to what the user experience expected for our users. --- ci/infra/libvirt/lb-instances.tf | 108 +++++++------ ci/infra/libvirt/main.tf | 7 +- ci/infra/libvirt/master-instance.tf | 114 +++++++------- ci/infra/libvirt/network.tf | 9 +- ci/infra/libvirt/output.tf | 16 +- ci/infra/libvirt/variables.tf | 17 +- ci/infra/libvirt/versions.tf | 4 + ci/infra/libvirt/worker-instance.tf | 114 +++++++------- ci/infra/openstack/cloud-provider.tf | 24 +-- ci/infra/openstack/dns.tf | 23 ++- ci/infra/openstack/load-balancer.tf | 60 ++++--- ci/infra/openstack/master-instance.tf | 111 +++++++------ ci/infra/openstack/network.tf | 19 +-- ci/infra/openstack/output.tf | 2 +- ci/infra/openstack/security-groups-common.tf | 25 +-- .../security-groups-load-balancer.tf | 5 +- ci/infra/openstack/security-groups-master.tf | 11 +- ci/infra/openstack/variables.tf | 9 +- ci/infra/openstack/versions.tf | 4 + ci/infra/openstack/worker-instance.tf | 122 ++++++++------- ci/infra/vmware/cloud-init.tf | 1 + ci/infra/vmware/lb-instance.tf | 148 ++++++++++-------- ci/infra/vmware/master-instance.tf | 97 ++++++------ ci/infra/vmware/variables.tf | 53 ++++--- ci/infra/vmware/versions.tf | 4 + ci/infra/vmware/worker-instance.tf | 97 ++++++------ 26 files changed, 676 insertions(+), 528 deletions(-) create mode 100644 ci/infra/libvirt/versions.tf create mode 100644 ci/infra/openstack/versions.tf create mode 100644 ci/infra/vmware/versions.tf diff --git a/ci/infra/libvirt/lb-instances.tf b/ci/infra/libvirt/lb-instances.tf index 71b17f6ede..f4609546a9 100644 --- a/ci/infra/libvirt/lb-instances.tf +++ b/ci/infra/libvirt/lb-instances.tf @@ -1,97 +1,106 @@ data "template_file" "lb_repositories" { - count = "${length(var.lb_repositories)}" - template = "${file("cloud-init/repository.tpl")}" + count = length(var.lb_repositories) + template = file("cloud-init/repository.tpl") - vars { - repository_url = "${element(values(var.lb_repositories), count.index)}" - repository_name = "${element(keys(var.lb_repositories), count.index)}" + vars = { + repository_url = element(values(var.lb_repositories), count.index) + repository_name = element(keys(var.lb_repositories), count.index) } } data "template_file" "haproxy_apiserver_backends_master" { - count = "${var.masters}" + count = var.masters template = "server $${fqdn} $${ip}:6443\n" vars = { fqdn = "${var.stack_name}-master-${count.index}.${var.dns_domain}" - ip = "${cidrhost(var.network_cidr, 512 + count.index)}" + ip = cidrhost(var.network_cidr, 512 + count.index) } } data "template_file" "haproxy_gangway_backends_master" { - count = "${var.masters}" + count = var.masters template = "server $${fqdn} $${ip}:32001\n" vars = { fqdn = "${var.stack_name}-master-${count.index}.${var.dns_domain}" - ip = "${cidrhost(var.network_cidr, 512 + count.index)}" + ip = cidrhost(var.network_cidr, 512 + count.index) } } data "template_file" "haproxy_dex_backends_master" { - count = "${var.masters}" + count = var.masters template = "server $${fqdn} $${ip}:32000\n" vars = { fqdn = "${var.stack_name}-master-${count.index}.${var.dns_domain}" - ip = "${cidrhost(var.network_cidr, 512 + count.index)}" + ip = cidrhost(var.network_cidr, 512 + count.index) } } data "template_file" "lb_haproxy_cfg" { - template = "${file("cloud-init/haproxy.cfg.tpl")}" + template = file("cloud-init/haproxy.cfg.tpl") - vars { - apiserver_backends = "${join(" ", data.template_file.haproxy_apiserver_backends_master.*.rendered)}" - gangway_backends = "${join(" ", data.template_file.haproxy_gangway_backends_master.*.rendered)}" - dex_backends = "${join(" ", data.template_file.haproxy_dex_backends_master.*.rendered)}" + vars = { + apiserver_backends = join( + " ", + data.template_file.haproxy_apiserver_backends_master.*.rendered, + ) + gangway_backends = join( + " ", + data.template_file.haproxy_gangway_backends_master.*.rendered, + ) + dex_backends = join( + " ", + data.template_file.haproxy_dex_backends_master.*.rendered, + ) } } data "template_file" "lb_cloud_init_userdata" { - template = "${file("cloud-init/lb.tpl")}" + template = file("cloud-init/lb.tpl") - vars { - authorized_keys = "${join("\n", formatlist(" - %s", var.authorized_keys))}" - repositories = "${join("\n", data.template_file.lb_repositories.*.rendered)}" - username = "${var.username}" - password = "${var.password}" - ntp_servers = "${join("\n", formatlist (" - %s", var.ntp_servers))}" + vars = { + authorized_keys = join("\n", formatlist(" - %s", var.authorized_keys)) + repositories = join("\n", data.template_file.lb_repositories.*.rendered) + username = var.username + password = var.password + ntp_servers = join("\n", formatlist(" - %s", var.ntp_servers)) } } resource "libvirt_volume" "lb" { name = "${var.stack_name}-lb-volume" - pool = "${var.pool}" - size = "${var.disk_size}" - base_volume_id = "${libvirt_volume.img.id}" + pool = var.pool + size = var.disk_size + base_volume_id = libvirt_volume.img.id } resource "libvirt_cloudinit_disk" "lb" { name = "${var.stack_name}-lb-cloudinit-disk" - pool = "${var.pool}" + pool = var.pool - user_data = "${data.template_file.lb_cloud_init_userdata.rendered}" + user_data = data.template_file.lb_cloud_init_userdata.rendered } resource "libvirt_domain" "lb" { name = "${var.stack_name}-lb-domain" - memory = "${var.lb_memory}" - vcpu = "${var.lb_vcpu}" - cloudinit = "${libvirt_cloudinit_disk.lb.id}" + memory = var.lb_memory + vcpu = var.lb_vcpu + cloudinit = libvirt_cloudinit_disk.lb.id - cpu { + cpu = { mode = "host-passthrough" } disk { - volume_id = "${libvirt_volume.lb.id}" + volume_id = libvirt_volume.lb.id } network_interface { - network_id = "${libvirt_network.network.id}" + network_id = libvirt_network.network.id hostname = "${var.stack_name}-lb" - addresses = ["${cidrhost(var.network_cidr, 256)}"] + addresses = [cidrhost(var.network_cidr, 256)] wait_for_lease = 1 } @@ -102,13 +111,16 @@ resource "libvirt_domain" "lb" { } resource "null_resource" "lb_wait_cloudinit" { - depends_on = ["libvirt_domain.lb"] - count = "${var.lbs}" + depends_on = [libvirt_domain.lb] + count = var.lbs connection { - host = "${element(libvirt_domain.lb.*.network_interface.0.addresses.0, count.index)}" - user = "${var.username}" - password = "${var.password}" + host = element( + libvirt_domain.lb.*.network_interface.0.addresses.0, + count.index, + ) + user = var.username + password = var.password type = "ssh" } @@ -120,22 +132,25 @@ resource "null_resource" "lb_wait_cloudinit" { } resource "null_resource" "lb_push_haproxy_cfg" { - depends_on = ["null_resource.lb_wait_cloudinit"] - count = "${var.lbs}" + depends_on = [null_resource.lb_wait_cloudinit] + count = var.lbs triggers = { - master_count = "${var.masters}" + master_count = var.masters } connection { - host = "${element(libvirt_domain.lb.*.network_interface.0.addresses.0, count.index)}" - user = "${var.username}" + host = element( + libvirt_domain.lb.*.network_interface.0.addresses.0, + count.index, + ) + user = var.username type = "ssh" agent = true } provisioner "file" { - content = "${data.template_file.lb_haproxy_cfg.rendered}" + content = data.template_file.lb_haproxy_cfg.rendered destination = "/tmp/haproxy.cfg" } @@ -146,3 +161,4 @@ resource "null_resource" "lb_push_haproxy_cfg" { ] } } + diff --git a/ci/infra/libvirt/main.tf b/ci/infra/libvirt/main.tf index 3745785d39..48787ca2c1 100644 --- a/ci/infra/libvirt/main.tf +++ b/ci/infra/libvirt/main.tf @@ -1,9 +1,10 @@ provider "libvirt" { - uri = "${var.libvirt_uri}" + uri = var.libvirt_uri } resource "libvirt_volume" "img" { name = "${var.stack_name}-${basename(var.image_uri)}" - source = "${var.image_uri}" - pool = "${var.pool}" + source = var.image_uri + pool = var.pool } + diff --git a/ci/infra/libvirt/master-instance.tf b/ci/infra/libvirt/master-instance.tf index 93942c531a..44e8391898 100644 --- a/ci/infra/libvirt/master-instance.tf +++ b/ci/infra/libvirt/master-instance.tf @@ -1,91 +1,91 @@ data "template_file" "master_repositories" { - template = "${file("cloud-init/repository.tpl")}" - count = "${length(var.repositories)}" + template = file("cloud-init/repository.tpl") + count = length(var.repositories) - vars { - repository_url = "${element(values(var.repositories), count.index)}" - repository_name = "${element(keys(var.repositories), count.index)}" + vars = { + repository_url = element(values(var.repositories), count.index) + repository_name = element(keys(var.repositories), count.index) } } data "template_file" "master_register_scc" { - template = "${file("cloud-init/register-scc.tpl")}" - count = "${var.caasp_registry_code == "" ? 0 : 1}" + template = file("cloud-init/register-scc.tpl") + count = var.caasp_registry_code == "" ? 0 : 1 - vars { - caasp_registry_code = "${var.caasp_registry_code}" + vars = { + caasp_registry_code = var.caasp_registry_code } } data "template_file" "master_register_rmt" { - template = "${file("cloud-init/register-rmt.tpl")}" - count = "${var.rmt_server_name == "" ? 0 : 1}" + template = file("cloud-init/register-rmt.tpl") + count = var.rmt_server_name == "" ? 0 : 1 - vars { - rmt_server_name = "${var.rmt_server_name}" + vars = { + rmt_server_name = var.rmt_server_name } } data "template_file" "master_commands" { - template = "${file("cloud-init/commands.tpl")}" - count = "${join("", var.packages) == "" ? 0 : 1}" + template = file("cloud-init/commands.tpl") + count = join("", var.packages) == "" ? 0 : 1 - vars { - packages = "${join(", ", var.packages)}" + vars = { + packages = join(", ", var.packages) } } data "template_file" "master-cloud-init" { - template = "${file("cloud-init/common.tpl")}" - - vars { - authorized_keys = "${join("\n", formatlist(" - %s", var.authorized_keys))}" - repositories = "${join("\n", data.template_file.master_repositories.*.rendered)}" - register_scc = "${join("\n", data.template_file.master_register_scc.*.rendered)}" - register_rmt = "${join("\n", data.template_file.master_register_rmt.*.rendered)}" - commands = "${join("\n", data.template_file.master_commands.*.rendered)}" - username = "${var.username}" - password = "${var.password}" - ntp_servers = "${join("\n", formatlist (" - %s", var.ntp_servers))}" + template = file("cloud-init/common.tpl") + + vars = { + authorized_keys = join("\n", formatlist(" - %s", var.authorized_keys)) + repositories = join("\n", data.template_file.master_repositories.*.rendered) + register_scc = join("\n", data.template_file.master_register_scc.*.rendered) + register_rmt = join("\n", data.template_file.master_register_rmt.*.rendered) + commands = join("\n", data.template_file.master_commands.*.rendered) + username = var.username + password = var.password + ntp_servers = join("\n", formatlist(" - %s", var.ntp_servers)) } } resource "libvirt_volume" "master" { name = "${var.stack_name}-master-volume-${count.index}" - pool = "${var.pool}" - size = "${var.disk_size}" - base_volume_id = "${libvirt_volume.img.id}" - count = "${var.masters}" + pool = var.pool + size = var.disk_size + base_volume_id = libvirt_volume.img.id + count = var.masters } resource "libvirt_cloudinit_disk" "master" { # needed when 0 master nodes are defined - count = "${var.masters}" + count = var.masters name = "${var.stack_name}-master-cloudinit-disk-${count.index}" - pool = "${var.pool}" - user_data = "${data.template_file.master-cloud-init.rendered}" + pool = var.pool + user_data = data.template_file.master-cloud-init.rendered } resource "libvirt_domain" "master" { - count = "${var.masters}" + count = var.masters name = "${var.stack_name}-master-domain-${count.index}" - memory = "${var.master_memory}" - vcpu = "${var.master_vcpu}" - cloudinit = "${element(libvirt_cloudinit_disk.master.*.id, count.index)}" - depends_on = ["libvirt_domain.lb"] + memory = var.master_memory + vcpu = var.master_vcpu + cloudinit = element(libvirt_cloudinit_disk.master.*.id, count.index) + depends_on = [libvirt_domain.lb] - cpu { + cpu = { mode = "host-passthrough" } disk { - volume_id = "${element(libvirt_volume.master.*.id, count.index)}" + volume_id = element(libvirt_volume.master.*.id, count.index) } network_interface { - network_id = "${libvirt_network.network.id}" + network_id = libvirt_network.network.id hostname = "${var.stack_name}-master-${count.index}" - addresses = ["${cidrhost(var.network_cidr, 512 + count.index)}"] + addresses = [cidrhost(var.network_cidr, 512 + count.index)] wait_for_lease = 1 } @@ -96,13 +96,16 @@ resource "libvirt_domain" "master" { } resource "null_resource" "master_wait_cloudinit" { - depends_on = ["libvirt_domain.master"] - count = "${var.masters}" + depends_on = [libvirt_domain.master] + count = var.masters connection { - host = "${element(libvirt_domain.master.*.network_interface.0.addresses.0, count.index)}" - user = "${var.username}" - password = "${var.password}" + host = element( + libvirt_domain.master.*.network_interface.0.addresses.0, + count.index, + ) + user = var.username + password = var.password type = "ssh" } @@ -114,13 +117,16 @@ resource "null_resource" "master_wait_cloudinit" { } resource "null_resource" "master_reboot" { - depends_on = ["null_resource.master_wait_cloudinit"] - count = "${var.masters}" + depends_on = [null_resource.master_wait_cloudinit] + count = var.masters provisioner "local-exec" { environment = { - user = "${var.username}" - host = "${element(libvirt_domain.master.*.network_interface.0.addresses.0, count.index)}" + user = var.username + host = element( + libvirt_domain.master.*.network_interface.0.addresses.0, + count.index, + ) } command = < Date: Tue, 14 Jan 2020 10:18:06 +0100 Subject: [PATCH 4/7] Manual update of libvirt tf files for 0.12 These are the manual operations to perform for a terraform 0.12 upgrade in libvirt, after a terraform 0.12upgrade, in order to pass terraform validate. --- ci/infra/libvirt/lb-instances.tf | 2 +- ci/infra/libvirt/master-instance.tf | 2 +- ci/infra/libvirt/worker-instance.tf | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ci/infra/libvirt/lb-instances.tf b/ci/infra/libvirt/lb-instances.tf index f4609546a9..9bce9a2c16 100644 --- a/ci/infra/libvirt/lb-instances.tf +++ b/ci/infra/libvirt/lb-instances.tf @@ -101,7 +101,7 @@ resource "libvirt_domain" "lb" { network_id = libvirt_network.network.id hostname = "${var.stack_name}-lb" addresses = [cidrhost(var.network_cidr, 256)] - wait_for_lease = 1 + wait_for_lease = true } graphics { diff --git a/ci/infra/libvirt/master-instance.tf b/ci/infra/libvirt/master-instance.tf index 44e8391898..78e8a3f220 100644 --- a/ci/infra/libvirt/master-instance.tf +++ b/ci/infra/libvirt/master-instance.tf @@ -86,7 +86,7 @@ resource "libvirt_domain" "master" { network_id = libvirt_network.network.id hostname = "${var.stack_name}-master-${count.index}" addresses = [cidrhost(var.network_cidr, 512 + count.index)] - wait_for_lease = 1 + wait_for_lease = true } graphics { diff --git a/ci/infra/libvirt/worker-instance.tf b/ci/infra/libvirt/worker-instance.tf index c0b13156a2..21283b4fe5 100644 --- a/ci/infra/libvirt/worker-instance.tf +++ b/ci/infra/libvirt/worker-instance.tf @@ -86,7 +86,7 @@ resource "libvirt_domain" "worker" { network_id = libvirt_network.network.id hostname = "${var.stack_name}-worker-${count.index}" addresses = [cidrhost(var.network_cidr, 768 + count.index)] - wait_for_lease = 1 + wait_for_lease = true } graphics { From 3eb205476e08b92787170cddceb0ce2da63e60ed Mon Sep 17 00:00:00 2001 From: Jean-Philippe Evrard Date: Tue, 14 Jan 2020 10:18:21 +0100 Subject: [PATCH 5/7] Manual update of OpenStack tf files for 0.12 These are the manual operations to perform for a terraform 0.12 upgrade in OpenStack, after a terraform 0.12upgrade: - Booleans should not be expressed as 0 or 1, and should be written true or false [1] - All the variables are made explicit, so password has to be defined in tfvars - All the dependencies have to be explicit, so we are adding a dependency with the floating IP association [2]. This will make sure the `terraform validate` command succeeds, and that a CaaSP cluster can be fully initialized with the current tf files. [1]: See https://www.terraform.io/docs/configuration/expressions.html , section #literal-expressions [2]: The Floating IP associations should wait for the hosts to be up, and the connection to the hosts should wait that the floating ip is associated. Without this patch, the terraform apply does not know the dependency links, and could lead to the an error where there is no element in the collection master_wait_cloudinit or worker_wait_cloudinit, should the apply go faster than the floating ip association. --- ci/infra/openstack/master-instance.tf | 6 +++++- ci/infra/openstack/variables.tf | 9 +++++++-- ci/infra/openstack/worker-instance.tf | 6 +++++- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/ci/infra/openstack/master-instance.tf b/ci/infra/openstack/master-instance.tf index df3a6e6f5d..a2c51a30ed 100644 --- a/ci/infra/openstack/master-instance.tf +++ b/ci/infra/openstack/master-instance.tf @@ -80,6 +80,7 @@ resource "openstack_networking_floatingip_v2" "master_ext" { } resource "openstack_compute_floatingip_associate_v2" "master_ext_ip" { + depends_on = [openstack_compute_instance_v2.master] count = var.masters floating_ip = element( openstack_networking_floatingip_v2.master_ext.*.address, @@ -89,7 +90,10 @@ resource "openstack_compute_floatingip_associate_v2" "master_ext_ip" { } resource "null_resource" "master_wait_cloudinit" { - depends_on = [openstack_compute_instance_v2.master] + depends_on = [ + openstack_compute_instance_v2.master, + openstack_compute_floatingip_associate_v2.master_ext_ip + ] count = var.masters connection { diff --git a/ci/infra/openstack/variables.tf b/ci/infra/openstack/variables.tf index 41d9521b3a..f4e457c859 100644 --- a/ci/infra/openstack/variables.tf +++ b/ci/infra/openstack/variables.tf @@ -55,7 +55,7 @@ variable "workers" { } variable "workers_vol_enabled" { - default = 0 + default = false description = "Attach persistent volumes to workers" } @@ -70,7 +70,7 @@ variable "dnsdomain" { } variable "dnsentry" { - default = 0 + default = false description = "DNS Entry" } @@ -112,6 +112,11 @@ variable "username" { description = "Default user for the cluster nodes created by cloud-init default configuration for all SUSE SLES systems" } +variable "password" { + default = "sles" + description = "Default password for the cluster nodes created by cloud-init default configuration for all SUSE SLES systems" +} + variable "caasp_registry_code" { default = "" description = "SUSE CaaSP Product Registration Code" diff --git a/ci/infra/openstack/worker-instance.tf b/ci/infra/openstack/worker-instance.tf index d789bc7bbd..c167867da9 100644 --- a/ci/infra/openstack/worker-instance.tf +++ b/ci/infra/openstack/worker-instance.tf @@ -94,6 +94,7 @@ resource "openstack_networking_floatingip_v2" "worker_ext" { } resource "openstack_compute_floatingip_associate_v2" "worker_ext_ip" { + depends_on = [openstack_compute_instance_v2.worker] count = var.workers floating_ip = element( openstack_networking_floatingip_v2.worker_ext.*.address, @@ -103,7 +104,10 @@ resource "openstack_compute_floatingip_associate_v2" "worker_ext_ip" { } resource "null_resource" "worker_wait_cloudinit" { - depends_on = [openstack_compute_instance_v2.worker] + depends_on = [ + openstack_compute_instance_v2.worker, + openstack_compute_floatingip_associate_v2.worker_ext_ip, + ] count = var.workers connection { From 8793e2e94f995309652cfd8a54cb988066f2b9fc Mon Sep 17 00:00:00 2001 From: Itxaka Date: Mon, 3 Feb 2020 10:32:10 +0100 Subject: [PATCH 6/7] Move jobs to new tf12 workers We need to move the jobs to the new integration workers as we have updated our terraform files to be compatible with 0.12, otherwise the jobs will fail to run under tf0.11 Signed-off-by: Pablo Chacin --- ci/jenkins/pipelines/prs/skuba-code-lint.Jenkinsfile | 2 +- ci/jenkins/pipelines/prs/skuba-jjb-validation.Jenkinsfile | 2 +- ci/jenkins/pipelines/prs/skuba-test-vmware.Jenkinsfile | 2 +- ci/jenkins/pipelines/prs/skuba-test.Jenkinsfile | 2 +- ci/jenkins/pipelines/prs/skuba-update-acceptance.Jenkinsfile | 2 +- ci/jenkins/pipelines/prs/skuba-update-unit.Jenkinsfile | 2 +- ci/jenkins/pipelines/prs/skuba-validate-pr-author.Jenkinsfile | 2 +- ci/jenkins/pipelines/skuba-conformance.Jenkinsfile | 2 +- ci/jenkins/pipelines/skuba-e2e-nightly.Jenkinsfile | 2 +- ci/jenkins/pipelines/skuba-handle-pr.Jenkinsfile | 2 +- ci/jenkins/pipelines/skuba-jjb.Jenkinsfile | 2 +- ci/jenkins/pipelines/skuba-update-nightly.Jenkinsfile | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/ci/jenkins/pipelines/prs/skuba-code-lint.Jenkinsfile b/ci/jenkins/pipelines/prs/skuba-code-lint.Jenkinsfile index bdd655726d..541cfccf2c 100644 --- a/ci/jenkins/pipelines/prs/skuba-code-lint.Jenkinsfile +++ b/ci/jenkins/pipelines/prs/skuba-code-lint.Jenkinsfile @@ -3,7 +3,7 @@ */ pipeline { - agent { node { label 'caasp-team-private' } } + agent { node { label 'caasp-team-private-integration' } } environment { OPENRC = credentials('ecp-openrc') diff --git a/ci/jenkins/pipelines/prs/skuba-jjb-validation.Jenkinsfile b/ci/jenkins/pipelines/prs/skuba-jjb-validation.Jenkinsfile index 2a48603f10..021b5292f2 100644 --- a/ci/jenkins/pipelines/prs/skuba-jjb-validation.Jenkinsfile +++ b/ci/jenkins/pipelines/prs/skuba-jjb-validation.Jenkinsfile @@ -1,7 +1,7 @@ // this pipeline update all jenkins pipelines via jenkins job builder plugin pipeline { - agent { node { label 'caasp-team-private' } } + agent { node { label 'caasp-team-private-integration' } } environment { GITHUB_TOKEN = credentials('github-token') JENKINS_JOB_CONFIG = credentials('jenkins-job-config') diff --git a/ci/jenkins/pipelines/prs/skuba-test-vmware.Jenkinsfile b/ci/jenkins/pipelines/prs/skuba-test-vmware.Jenkinsfile index 87ccad89d8..c9e86e7fd6 100644 --- a/ci/jenkins/pipelines/prs/skuba-test-vmware.Jenkinsfile +++ b/ci/jenkins/pipelines/prs/skuba-test-vmware.Jenkinsfile @@ -6,7 +6,7 @@ pipeline { - agent { node { label 'caasp-team-private' } } + agent { node { label 'caasp-team-private-integration' } } environment { VMWARE_ENV_FILE = credentials('vmware-env') diff --git a/ci/jenkins/pipelines/prs/skuba-test.Jenkinsfile b/ci/jenkins/pipelines/prs/skuba-test.Jenkinsfile index 72e6eb1c3b..53f9920eaf 100644 --- a/ci/jenkins/pipelines/prs/skuba-test.Jenkinsfile +++ b/ci/jenkins/pipelines/prs/skuba-test.Jenkinsfile @@ -5,7 +5,7 @@ */ pipeline { - agent { node { label 'caasp-team-private' } } + agent { node { label 'caasp-team-private-integration' } } environment { SKUBA_BINPATH = '/home/jenkins/go/bin/skuba' diff --git a/ci/jenkins/pipelines/prs/skuba-update-acceptance.Jenkinsfile b/ci/jenkins/pipelines/prs/skuba-update-acceptance.Jenkinsfile index a834f8e80d..7df0cbcd6a 100644 --- a/ci/jenkins/pipelines/prs/skuba-update-acceptance.Jenkinsfile +++ b/ci/jenkins/pipelines/prs/skuba-update-acceptance.Jenkinsfile @@ -1,7 +1,7 @@ // this pipeline runs os acceptance tests for skuba-update pipeline { - agent { node { label 'caasp-team-private' } } + agent { node { label 'caasp-team-private-integration' } } environment { GITHUB_TOKEN = credentials('github-token') JENKINS_JOB_CONFIG = credentials('jenkins-job-config') diff --git a/ci/jenkins/pipelines/prs/skuba-update-unit.Jenkinsfile b/ci/jenkins/pipelines/prs/skuba-update-unit.Jenkinsfile index acdac14966..fff407b48f 100644 --- a/ci/jenkins/pipelines/prs/skuba-update-unit.Jenkinsfile +++ b/ci/jenkins/pipelines/prs/skuba-update-unit.Jenkinsfile @@ -1,7 +1,7 @@ // this pipeline runs unit tests for skuba-update pipeline { - agent { node { label 'caasp-team-private' } } + agent { node { label 'caasp-team-private-integration' } } environment { GITHUB_TOKEN = credentials('github-token') JENKINS_JOB_CONFIG = credentials('jenkins-job-config') diff --git a/ci/jenkins/pipelines/prs/skuba-validate-pr-author.Jenkinsfile b/ci/jenkins/pipelines/prs/skuba-validate-pr-author.Jenkinsfile index bf74a64010..9b03a820bd 100644 --- a/ci/jenkins/pipelines/prs/skuba-validate-pr-author.Jenkinsfile +++ b/ci/jenkins/pipelines/prs/skuba-validate-pr-author.Jenkinsfile @@ -3,7 +3,7 @@ */ pipeline { - agent { node { label 'caasp-team-private' } } + agent { node { label 'caasp-team-private-integration' } } environment { GITHUB_TOKEN = credentials('github-token') diff --git a/ci/jenkins/pipelines/skuba-conformance.Jenkinsfile b/ci/jenkins/pipelines/skuba-conformance.Jenkinsfile index 957f34db3b..941813af5d 100644 --- a/ci/jenkins/pipelines/skuba-conformance.Jenkinsfile +++ b/ci/jenkins/pipelines/skuba-conformance.Jenkinsfile @@ -1,5 +1,5 @@ pipeline { - agent { node { label 'caasp-team-private' } } + agent { node { label 'caasp-team-private-integration' } } environment { SKUBA_BINPATH = "/home/jenkins/go/bin/skuba" diff --git a/ci/jenkins/pipelines/skuba-e2e-nightly.Jenkinsfile b/ci/jenkins/pipelines/skuba-e2e-nightly.Jenkinsfile index 1cbe253029..5c3d328bf2 100644 --- a/ci/jenkins/pipelines/skuba-e2e-nightly.Jenkinsfile +++ b/ci/jenkins/pipelines/skuba-e2e-nightly.Jenkinsfile @@ -3,7 +3,7 @@ */ pipeline { - agent { node { label 'caasp-team-private' } } + agent { node { label 'caasp-team-private-integration' } } parameters { string(name: 'E2E_MAKE_TARGET_NAME', defaultValue: 'all', description: 'The make target to run (only e2e related)') diff --git a/ci/jenkins/pipelines/skuba-handle-pr.Jenkinsfile b/ci/jenkins/pipelines/skuba-handle-pr.Jenkinsfile index 329f005699..1cac4aa629 100644 --- a/ci/jenkins/pipelines/skuba-handle-pr.Jenkinsfile +++ b/ci/jenkins/pipelines/skuba-handle-pr.Jenkinsfile @@ -3,7 +3,7 @@ */ pipeline { - agent { node { label 'caasp-team-private' } } + agent { node { label 'caasp-team-private-integration' } } environment { GITHUB_TOKEN = credentials('github-token') diff --git a/ci/jenkins/pipelines/skuba-jjb.Jenkinsfile b/ci/jenkins/pipelines/skuba-jjb.Jenkinsfile index bbaa4a9d12..71a6f0d56c 100644 --- a/ci/jenkins/pipelines/skuba-jjb.Jenkinsfile +++ b/ci/jenkins/pipelines/skuba-jjb.Jenkinsfile @@ -1,6 +1,6 @@ // this pipeline update all jenkins pipelines via jenkins job builder plugin pipeline { - agent { node { label 'caasp-team-private' } } + agent { node { label 'caasp-team-private-integration' } } environment { JENKINS_JOB_CONFIG = credentials('jenkins-job-config') REQUESTS_CA_BUNDLE = "/var/lib/ca-certificates/ca-bundle.pem" diff --git a/ci/jenkins/pipelines/skuba-update-nightly.Jenkinsfile b/ci/jenkins/pipelines/skuba-update-nightly.Jenkinsfile index 25fbd5eae0..77e0ecbebc 100644 --- a/ci/jenkins/pipelines/skuba-update-nightly.Jenkinsfile +++ b/ci/jenkins/pipelines/skuba-update-nightly.Jenkinsfile @@ -3,7 +3,7 @@ */ pipeline { - agent { node { label 'caasp-team-private' } } + agent { node { label 'caasp-team-private-integration' } } environment { OPENRC = credentials('ecp-openrc') From a091229e532d3a8e7db452a7b79e72cf8f3fd174 Mon Sep 17 00:00:00 2001 From: Itxaka Date: Mon, 3 Feb 2020 20:52:53 +0100 Subject: [PATCH 7/7] tf: wait for node before provisioning Seems like there could be a race condition when launching the cloud-init resource in which tf wont get the IP of the host because we havent get it yet. This patch just makes the provision resource of vmware so it depends on the machine being ready. This is the same as its done on other providers. --- ci/infra/vmware/master-instance.tf | 1 + ci/infra/vmware/worker-instance.tf | 1 + 2 files changed, 2 insertions(+) diff --git a/ci/infra/vmware/master-instance.tf b/ci/infra/vmware/master-instance.tf index b225ea9577..eb277f4351 100644 --- a/ci/infra/vmware/master-instance.tf +++ b/ci/infra/vmware/master-instance.tf @@ -90,6 +90,7 @@ resource "vsphere_virtual_machine" "master" { } resource "null_resource" "master_wait_cloudinit" { + depends_on = [vsphere_virtual_machine.master] count = var.masters connection { diff --git a/ci/infra/vmware/worker-instance.tf b/ci/infra/vmware/worker-instance.tf index 006b1a0457..77ecb3d40b 100644 --- a/ci/infra/vmware/worker-instance.tf +++ b/ci/infra/vmware/worker-instance.tf @@ -90,6 +90,7 @@ resource "vsphere_virtual_machine" "worker" { } resource "null_resource" "worker_wait_cloudinit" { + depends_on = [vsphere_virtual_machine.worker] count = var.workers connection {