From ec392e8a83980fd551e03361aca581ce297e3fb5 Mon Sep 17 00:00:00 2001 From: a-dubs Date: Thu, 10 Oct 2024 09:43:53 -0400 Subject: [PATCH 1/4] feat(ephemeral): refactor ephemeralIP and add ipv6 connectivity check --- cloudinit/net/ephemeral.py | 169 +++++++++++++++++++----- cloudinit/sources/DataSourceHetzner.py | 8 +- cloudinit/sources/DataSourceNWCS.py | 8 +- cloudinit/sources/helpers/vultr.py | 2 +- tests/unittests/net/test_dhcp.py | 4 +- tests/unittests/net/test_ephemeral.py | 123 ++++++++++++++++- tests/unittests/sources/test_hetzner.py | 8 +- tests/unittests/sources/test_nwcs.py | 6 +- 8 files changed, 279 insertions(+), 49 deletions(-) diff --git a/cloudinit/net/ephemeral.py b/cloudinit/net/ephemeral.py index 39dd8ba3c8e..a996a825c99 100644 --- a/cloudinit/net/ephemeral.py +++ b/cloudinit/net/ephemeral.py @@ -5,7 +5,7 @@ import contextlib import logging from functools import partial -from typing import Any, Callable, Dict, List, Optional +from typing import Any, Callable, Dict, List, Optional, Tuple import cloudinit.net as net import cloudinit.netinfo as netinfo @@ -20,7 +20,7 @@ class EphemeralIPv4Network: No operations are performed if the provided interface already has the specified configuration. - This can be verified with the connectivity_url_data. + This can be verified with the connectivity_urls_data. If unconnected, bring up the interface with valid ip, prefix and broadcast. If router is provided setup a default route for that interface. Upon context exit, clean up the interface leaving no configuration behind. @@ -281,26 +281,26 @@ def __init__( self, distro, iface=None, - connectivity_url_data: Optional[Dict[str, Any]] = None, + connectivity_urls_data: Optional[List[Dict[str, Any]]] = None, dhcp_log_func=None, ): self.iface = iface self._ephipv4: Optional[EphemeralIPv4Network] = None self.lease: Optional[Dict[str, Any]] = None self.dhcp_log_func = dhcp_log_func - self.connectivity_url_data = connectivity_url_data + self.connectivity_urls_data = connectivity_urls_data or [] self.distro = distro self.interface_addrs_before_dhcp = netinfo.netdev_info() def __enter__(self): """Setup sandboxed dhcp context, unless connectivity_url can already be reached.""" - if self.connectivity_url_data: - if net.has_url_connectivity(self.connectivity_url_data): + for url_data in self.connectivity_urls_data: + if net.has_url_connectivity(url_data): LOG.debug( "Skip ephemeral DHCP setup, instance has connectivity" " to %s", - self.connectivity_url_data, + url_data, ) return return self.obtain_lease() @@ -404,13 +404,37 @@ def __init__( interface, ipv6: bool = False, ipv4: bool = True, + connectivity_urls_data: Optional[List[Dict[str, Any]]] = None, + ipv6_connectivity_check_callback: Optional[Callable] = None, ): + """ + Args: + distro: The distro object + interface: The interface to bring up + ipv6: Whether to bring up an ipv6 network + ipv4: Whether to bring up an ipv4 network + connectivity_urls_data: List of url data to use for connectivity + check before bringing up DHCPv4 ephemeral network. + ipv6_connectivity_check_callback: A callback to check for ipv6 + connectivity. If provided, it is assumed that ipv6 networking + is preferred and ipv4 networking will be skipped if ipv6 + connectivity to IMDS is successful. This function should return + the url that was reached to verify ipv6 connectivity (if + successful - otherwise it should return None). + """ self.interface = interface self.ipv4 = ipv4 self.ipv6 = ipv6 self.stack = contextlib.ExitStack() self.state_msg: str = "" self.distro = distro + self.connectivity_urls_data = connectivity_urls_data + self.ipv6_connectivity_check_callback = ( + ipv6_connectivity_check_callback + ) + + # will be updated by the context manager + self.ipv6_reached_at_url = None def __enter__(self): if not (self.ipv4 or self.ipv6): @@ -419,33 +443,37 @@ def __enter__(self): return self exceptions = [] ephemeral_obtained = False - if self.ipv4: - try: - self.stack.enter_context( - EphemeralDHCPv4( - self.distro, - self.interface, - ) - ) - ephemeral_obtained = True - except (ProcessExecutionError, NoDHCPLeaseError) as e: - LOG.info("Failed to bring up %s for ipv4.", self) - exceptions.append(e) - if self.ipv6: - try: - self.stack.enter_context( - EphemeralIPv6Network( - self.distro, - self.interface, - ) + if self.ipv6_connectivity_check_callback is not None: + if not self.ipv6: + raise ValueError( + "ipv6_connectivity_check_callback provided but ipv6 is " + "not enabled" ) - ephemeral_obtained = True - if exceptions or not self.ipv4: - self.state_msg = "using link-local ipv6" - except ProcessExecutionError as e: - LOG.info("Failed to bring up %s for ipv6.", self) - exceptions.append(e) + ephemeral_obtained, exceptions = self._do_ipv6( + ephemeral_obtained, exceptions + ) + self.ipv6_reached_at_url = self.ipv6_connectivity_check_callback() + # if ipv6_connectivity_check_callback is provided, then we want to + # skip ipv4 ephemeral network setup if ipv6 ephemeral network setup + # and imds connectivity check succeeded + if self.ipv4 and not self.ipv6_reached_at_url: + LOG.debug( + "Bringing up ipv4 ephemeral network since ipv6 failed" + ) + ephemeral_obtained, exceptions = self._do_ipv4( + ephemeral_obtained, exceptions + ) + else: + if self.ipv4: + ephemeral_obtained, exceptions = self._do_ipv4( + ephemeral_obtained, exceptions + ) + if self.ipv6: + ephemeral_obtained, exceptions = self._do_ipv6( + ephemeral_obtained, exceptions + ) + if not ephemeral_obtained: # Ephemeral network setup failed in linkup for both ipv4 and # ipv6. Raise only the first exception found. @@ -456,5 +484,82 @@ def __enter__(self): raise exceptions[0] return self + def _do_ipv4( + self, ephemeral_obtained, exceptions + ) -> Tuple[str, List[Exception]]: + """ + Attempt to bring up an ephemeral network for ipv4 on the interface. + + Args: + ephemeral_obtained: Whether an ephemeral network has already been + obtained + exceptions: List of exceptions encountered so far + + Returns: + A tuple containing the updated ephemeral_obtained and + exceptions values + """ + try: + self.stack.enter_context( + EphemeralDHCPv4( + distro=self.distro, + iface=self.interface, + connectivity_urls_data=self.connectivity_urls_data, + ) + ) + ephemeral_obtained = True + LOG.debug( + "Successfully brought up %s for ephemeral ipv4 networking.", + self.interface, + ) + except (ProcessExecutionError, NoDHCPLeaseError) as e: + LOG.debug( + "Failed to bring up %s for ephemeral ipv4 networking.", + self.interface, + ) + # we don't set ephemeral_obtained to False here because we want to + # retain a potential true value from any previous successful + # ephemeral network setup + exceptions.append(e) + return ephemeral_obtained, exceptions + + def _do_ipv6( + self, ephemeral_obtained, exceptions + ) -> Tuple[str, List[Exception]]: + """ + Attempt to bring up an ephemeral network for ipv6 on the interface. + + Args: + ephemeral_obtained: Whether an ephemeral network has already been + obtained + exceptions: List of exceptions encountered so far + + Returns: + tupleA tuple containing the updated ephemeral_obtained and + exceptions values + """ + try: + self.stack.enter_context( + EphemeralIPv6Network( + self.distro, + self.interface, + ) + ) + ephemeral_obtained = True + LOG.debug( + "Successfully brought up %s for ephemeral ipv6 networking.", + self.interface, + ) + except ProcessExecutionError as e: + LOG.debug( + "Failed to bring up %s for ephemeral ipv6 networking.", + self.interface, + ) + # we don't set ephemeral_obtained to False here because we want to + # retain a potential true value from any previous successful + # ephemeral network setup + exceptions.append(e) + return ephemeral_obtained, exceptions + def __exit__(self, *_args): self.stack.close() diff --git a/cloudinit/sources/DataSourceHetzner.py b/cloudinit/sources/DataSourceHetzner.py index 6529e2ff1c6..7b919f66fa4 100644 --- a/cloudinit/sources/DataSourceHetzner.py +++ b/cloudinit/sources/DataSourceHetzner.py @@ -60,9 +60,11 @@ def _get_data(self): with EphemeralDHCPv4( self.distro, iface=net.find_fallback_nic(), - connectivity_url_data={ - "url": BASE_URL_V1 + "/metadata/instance-id", - }, + connectivity_urls_data=[ + { + "url": BASE_URL_V1 + "/metadata/instance-id", + } + ], ): md = hc_helper.read_metadata( self.metadata_address, diff --git a/cloudinit/sources/DataSourceNWCS.py b/cloudinit/sources/DataSourceNWCS.py index 7c89713cb33..bd692d3c31f 100644 --- a/cloudinit/sources/DataSourceNWCS.py +++ b/cloudinit/sources/DataSourceNWCS.py @@ -76,9 +76,11 @@ def get_metadata(self): with EphemeralDHCPv4( self.distro, iface=net.find_fallback_nic(), - connectivity_url_data={ - "url": BASE_URL_V1 + "/metadata/instance-id", - }, + connectivity_urls_data=[ + { + "url": BASE_URL_V1 + "/metadata/instance-id", + } + ], ): return read_metadata( self.metadata_address, diff --git a/cloudinit/sources/helpers/vultr.py b/cloudinit/sources/helpers/vultr.py index 0c3aa6079d3..3e20ddf5e05 100644 --- a/cloudinit/sources/helpers/vultr.py +++ b/cloudinit/sources/helpers/vultr.py @@ -29,7 +29,7 @@ def get_metadata( with EphemeralDHCPv4( distro, iface=iface, - connectivity_url_data={"url": url}, + connectivity_urls_data=[{"url": url}], ): # Fetch the metadata v1 = read_metadata(url, timeout, retries, sec_between, agent) diff --git a/tests/unittests/net/test_dhcp.py b/tests/unittests/net/test_dhcp.py index 049ee6c9fe2..3a467d90d86 100644 --- a/tests/unittests/net/test_dhcp.py +++ b/tests/unittests/net/test_dhcp.py @@ -824,7 +824,7 @@ def test_ephemeral_dhcp_no_network_if_url_connectivity(self, m_dhcp): self.responses.add(responses.GET, url) with EphemeralDHCPv4( MockDistro(), - connectivity_url_data={"url": url}, + connectivity_urls_data=[{"url": url}], ) as lease: self.assertIsNone(lease) # Ensure that no teardown happens: @@ -847,7 +847,7 @@ def test_ephemeral_dhcp_setup_network_if_url_connectivity( self.responses.add(responses.GET, url, body=b"", status=404) with EphemeralDHCPv4( MockDistro(), - connectivity_url_data={"url": url}, + connectivity_urls_data=[{"url": url}], ) as lease: self.assertEqual(m_dhcp.return_value, lease) # Ensure that dhcp discovery occurs diff --git a/tests/unittests/net/test_ephemeral.py b/tests/unittests/net/test_ephemeral.py index 77ac95c912f..6c98a6e1ee9 100644 --- a/tests/unittests/net/test_ephemeral.py +++ b/tests/unittests/net/test_ephemeral.py @@ -15,6 +15,9 @@ class TestEphemeralIPNetwork: @pytest.mark.parametrize("ipv6", [False, True]) @pytest.mark.parametrize("ipv4", [False, True]) + @pytest.mark.parametrize( + "connectivity_urls_data", [None, [{"url": "foo"}]] + ) @mock.patch(M_PATH + "contextlib.ExitStack") @mock.patch(M_PATH + "EphemeralIPv6Network") @mock.patch(M_PATH + "EphemeralDHCPv4") @@ -23,12 +26,19 @@ def test_stack_order( m_ephemeral_dhcp_v4, m_ephemeral_ip_v6_network, m_exit_stack, + connectivity_urls_data, ipv4, ipv6, ): interface = object() distro = MockDistro() - with EphemeralIPNetwork(distro, interface, ipv4=ipv4, ipv6=ipv6): + with EphemeralIPNetwork( + distro, + interface, + ipv4=ipv4, + ipv6=ipv6, + connectivity_urls_data=connectivity_urls_data, + ): pass expected_call_args_list = [] if ipv4: @@ -36,7 +46,11 @@ def test_stack_order( mock.call(m_ephemeral_dhcp_v4.return_value) ) assert [ - mock.call(distro, interface) + mock.call( + distro=distro, + iface=interface, + connectivity_urls_data=connectivity_urls_data, + ) ] == m_ephemeral_dhcp_v4.call_args_list else: assert [] == m_ephemeral_dhcp_v4.call_args_list @@ -54,6 +68,111 @@ def test_stack_order( == m_exit_stack.return_value.enter_context.call_args_list ) + @pytest.mark.parametrize( + [ + "ipv4_enabled", + "ipv6_connectivity", + ], + [ + pytest.param( + True, + True, + id="ipv4_enabled_with_ipv6_connectivity", + ), + pytest.param( + False, + True, + id="ipv4_disabled_with_ipv6_connectivity", + ), + pytest.param( + False, + False, + id="ipv4_disabled_without_ipv6_connectivity", + ), + pytest.param( + True, + False, + id="ipv4_enabled_without_ipv6_connectivity", + ), + ], + ) + @mock.patch(M_PATH + "contextlib.ExitStack") + def test_ipv6_stuff( + self, + m_exit_stack, + ipv4_enabled, + ipv6_connectivity, + mocker, + ): + """ + Assumes that ipv6_check_callback is always provided and the _do_ipv6 + helper always succeeds and thus ephemeral_obtained is always True. + """ + m_ipv6_check_callback = mock.MagicMock() + m_do_ipv6 = mocker.patch(M_PATH + "EphemeralIPNetwork._do_ipv6") + m_do_ipv4 = mocker.patch(M_PATH + "EphemeralIPNetwork._do_ipv4") + # always have ipv6 interface be brought up successfully + m_do_ipv6.return_value = (True, []) + m_do_ipv4.return_value = (True, []) + + # ipv6 check returns url on success and None on failure + m_ipv6_check_callback.return_value = ( + "fake_url" if ipv6_connectivity else None + ) + + # check if ipv4 is attempted to be brought up + # should only be attempted if ipv4 is enabled + # and ipv6 connectivity is not available + expected_ipv4_bringup = ipv4_enabled and not ipv6_connectivity + + interface = object() + distro = MockDistro() + ephemeral_net = EphemeralIPNetwork( + distro, + interface, + ipv4=ipv4_enabled, + ipv6=True, + ipv6_connectivity_check_callback=m_ipv6_check_callback, + ) + with ephemeral_net: + pass + + if expected_ipv4_bringup: + m_do_ipv4.assert_called_once() + else: + m_do_ipv4.assert_not_called() + + m_do_ipv6.assert_called_once() + m_ipv6_check_callback.assert_called_once() + # assert m_exit_stack.return_value.enter_context.call_count == 2 + + @mock.patch(M_PATH + "contextlib.ExitStack") + def test_ipv6_arg_mismatch_raises_exception( + self, + m_exit_stack, + mocker, + ): + """ + Validate that ValueError exception is raised when ipv6 is not enabled + but ipv6_connectivity_check_callback is provided. + """ + m_ipv6_check_callback = mock.MagicMock() + + interface = object() + distro = MockDistro() + ephemeral_net = EphemeralIPNetwork( + distro, + interface, + ipv4=True, + # set ipv6 to disabled + ipv6=False, + # but provide ipv6_connectivity_check_callback + ipv6_connectivity_check_callback=m_ipv6_check_callback, + ) + with pytest.raises(ValueError): + with ephemeral_net: + pass + @pytest.mark.parametrize( "m_v4, m_v6, m_context, m_side_effects", [ diff --git a/tests/unittests/sources/test_hetzner.py b/tests/unittests/sources/test_hetzner.py index 52ac511c54d..5867a4fac91 100644 --- a/tests/unittests/sources/test_hetzner.py +++ b/tests/unittests/sources/test_hetzner.py @@ -107,9 +107,11 @@ def test_read_data( m_net.assert_called_once_with( ds.distro, iface="eth0", - connectivity_url_data={ - "url": "http://169.254.169.254/hetzner/v1/metadata/instance-id" - }, + connectivity_urls_data=[ + { + "url": "http://169.254.169.254/hetzner/v1/metadata/instance-id" + } + ], ) self.assertTrue(m_readmd.called) diff --git a/tests/unittests/sources/test_nwcs.py b/tests/unittests/sources/test_nwcs.py index f96b585cda2..9ecd13a52aa 100644 --- a/tests/unittests/sources/test_nwcs.py +++ b/tests/unittests/sources/test_nwcs.py @@ -76,9 +76,9 @@ def test_read_data( m_net.assert_called_once_with( ds.distro, iface="eth0", - connectivity_url_data={ - "url": "http://169.254.169.254/api/v1/metadata/instance-id" - }, + connectivity_urls_data=[ + {"url": "http://169.254.169.254/api/v1/metadata/instance-id"} + ], ) self.assertTrue(m_readmd.called) From 2f2e065cf548c8b447cfa1dc435d163d34d7c5ac Mon Sep 17 00:00:00 2001 From: a-dubs Date: Thu, 10 Oct 2024 09:50:39 -0400 Subject: [PATCH 2/4] feat(oracle): add true single stack ipv6 support --- cloudinit/sources/DataSourceOracle.py | 292 +++++-- .../datasources/test_oracle_ipv6.py | 186 +++++ tests/unittests/sources/test_oracle.py | 710 ++++++++++++++++-- 3 files changed, 1049 insertions(+), 139 deletions(-) create mode 100644 tests/integration_tests/datasources/test_oracle_ipv6.py diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py index cca9488ab88..3b7c345d353 100644 --- a/cloudinit/sources/DataSourceOracle.py +++ b/cloudinit/sources/DataSourceOracle.py @@ -19,7 +19,7 @@ import logging import time from collections import namedtuple -from typing import Dict, Optional, Tuple +from typing import Dict, List, Optional, Tuple from cloudinit import atomic_helper, dmi, net, sources, util from cloudinit.distros.networking import NetworkConfig @@ -29,7 +29,7 @@ get_interfaces_by_mac, is_netfail_master, ) -from cloudinit.url_helper import wait_for_url +from cloudinit.url_helper import UrlError, readurl, wait_for_url LOG = logging.getLogger(__name__) @@ -38,8 +38,19 @@ "configure_secondary_nics": False, } CHASSIS_ASSET_TAG = "OracleCloud.com" -METADATA_ROOT = "http://169.254.169.254/opc/v{version}/" -METADATA_PATTERN = METADATA_ROOT + "{path}/" +IPV4_METADATA_ROOT = "http://169.254.169.254/opc/v{version}/" +IPV6_METADATA_ROOT = "http://[fd00:c1::a9fe:a9fe]/opc/v{version}/" +IPV4_METADATA_PATTERN = IPV4_METADATA_ROOT + "{path}/" +IPV6_METADATA_PATTERN = IPV6_METADATA_ROOT + "{path}/" +METADATA_URLS = [ + IPV4_METADATA_ROOT, + IPV6_METADATA_ROOT, +] +METADATA_ROOTS = [ + IPV4_METADATA_ROOT, + IPV6_METADATA_ROOT, +] + # https://docs.cloud.oracle.com/iaas/Content/Network/Troubleshoot/connectionhang.htm#Overview, # indicates that an MTU of 9000 is used within OCI MTU = 9000 @@ -123,6 +134,7 @@ class DataSourceOracle(sources.DataSource): sources.NetworkConfigSource.INITRAMFS, ) + # for init-local stage, we want to bring up an ephemeral network perform_dhcp_setup = True # Careful...these can be overridden in __init__ @@ -170,16 +182,40 @@ def ds_detect() -> bool: def _get_data(self): self.system_uuid = _read_system_uuid() + nic_name = net.find_fallback_nic() - if self.perform_dhcp_setup: - network_context = ephemeral.EphemeralDHCPv4( - self.distro, - iface=net.find_fallback_nic(), - connectivity_url_data={ - "url": METADATA_PATTERN.format(version=2, path="instance"), - "headers": V2_HEADERS, - }, - ) + # Test against both v1 and v2 metadata URLs + connectivity_urls_data = [ + { + "url": IPV4_METADATA_PATTERN.format( + version=1, path="instance" + ), + }, + { + "url": IPV4_METADATA_PATTERN.format( + version=2, path="instance" + ), + "headers": V2_HEADERS, + }, + ] + + # if we have connectivity to imds, then skip ephemeral network setup + if self.perform_dhcp_setup: # and not available_urls: + # TODO: ask james: this obviously fails on ipv6 single stack only + # is there a way to detect when we need this? + # would this only work/be needed if isci is being used? + # if so, could we just check for iscsi root and then do this? + try: + network_context = ephemeral.EphemeralIPNetwork( + distro=self.distro, + interface=nic_name, + ipv6=True, + ipv4=True, + connectivity_urls_data=connectivity_urls_data, + ipv6_connectivity_check_callback=check_ipv6_connectivity, + ) + except Exception: + network_context = util.nullcontext() else: network_context = util.nullcontext() fetch_primary_nic = not self._is_iscsi_root() @@ -189,18 +225,30 @@ def _get_data(self): ) with network_context: - fetched_metadata = read_opc_metadata( + if network_context.ipv6_reached_at_url: + md_patterns = [IPV6_METADATA_PATTERN] + else: + md_patterns = [IPV4_METADATA_PATTERN] + fetched_metadata, url_that_worked = read_opc_metadata( fetch_vnics_data=fetch_primary_nic or fetch_secondary_nics, max_wait=self.url_max_wait, timeout=self.url_timeout, + metadata_patterns=md_patterns, ) + # set the metadata root address that worked to allow for detecting + # whether ipv4 or ipv6 was used for getting metadata + self.metadata_address = _get_versioned_metadata_base_url( + url=url_that_worked + ) + if _is_ipv4_metadata_url(self.metadata_address): + LOG.debug("Read metadata from IPv4 IMDS server") + else: + LOG.debug("Read metadata from IPv6 IMDS server") + if not fetched_metadata: return False data = self._crawled_metadata = fetched_metadata.instance_data - self.metadata_address = METADATA_ROOT.format( - version=fetched_metadata.version - ) self._vnics_data = fetched_metadata.vnics_data self.metadata = { @@ -291,7 +339,7 @@ def _add_network_config_from_opc_imds(self, set_primary: bool = False): :param set_primary: If True set primary interface. :raises: - Exceptions are not handled within this function. Likely + Exceptions are not handled within this function. Likely exceptions are KeyError/IndexError (if the IMDS returns valid JSON with unexpected contents). """ @@ -317,9 +365,14 @@ def _add_network_config_from_opc_imds(self, set_primary: bool = False): vnics_data = self._vnics_data if set_primary else self._vnics_data[1:] + # If the metadata address is an IPv6 address + for index, vnic_dict in enumerate(vnics_data): is_primary = set_primary and index == 0 mac_address = vnic_dict["macAddr"].lower() + is_ipv6_only = vnic_dict.get( + "ipv6SubnetCidrBlock", False + ) and not vnic_dict.get("privateIp", False) if mac_address not in interfaces_by_mac: LOG.warning( "Interface with MAC %s not found; skipping", @@ -327,24 +380,47 @@ def _add_network_config_from_opc_imds(self, set_primary: bool = False): ) continue name = interfaces_by_mac[mac_address] - network = ipaddress.ip_network(vnic_dict["subnetCidrBlock"]) + if is_ipv6_only: + network = ipaddress.ip_network( + vnic_dict["ipv6Addresses"][0], + ) + else: + network = ipaddress.ip_network(vnic_dict["subnetCidrBlock"]) if self._network_config["version"] == 1: if is_primary: - subnet = {"type": "dhcp"} + if is_ipv6_only: + subnets = [{"type": "dhcp6"}] + else: + subnets = [{"type": "dhcp"}] else: - subnet = { - "type": "static", - "address": ( - f"{vnic_dict['privateIp']}/{network.prefixlen}" - ), - } + subnets = [] + if vnic_dict.get("privateIp"): + subnets.append( + { + "type": "static", + "address": ( + f"{vnic_dict['privateIp']}/" + f"{network.prefixlen}" + ), + } + ) + if vnic_dict.get("ipv6Addresses"): + subnets.append( + { + "type": "static", + "address": ( + f"{vnic_dict['ipv6Addresses'][0]}/" + f"{network.prefixlen}" + ), + } + ) interface_config = { "name": name, "type": "physical", "mac_address": mac_address, "mtu": MTU, - "subnets": [subnet], + "subnets": subnets, } self._network_config["config"].append(interface_config) elif self._network_config["version"] == 2: @@ -353,13 +429,22 @@ def _add_network_config_from_opc_imds(self, set_primary: bool = False): interface_config = { "mtu": MTU, "match": {"macaddress": mac_address}, - "dhcp6": False, - "dhcp4": is_primary, } + self._network_config["ethernets"][name] = interface_config + + interface_config["dhcp6"] = is_primary and is_ipv6_only + interface_config["dhcp4"] = is_primary and not is_ipv6_only if not is_primary: - interface_config["addresses"] = [ - f"{vnic_dict['privateIp']}/{network.prefixlen}" - ] + interface_config["addresses"] = [] + if vnic_dict.get("privateIp"): + interface_config["addresses"].append( + f"{vnic_dict['privateIp']}/{network.prefixlen}" + ) + if vnic_dict.get("ipv6Addresses"): + interface_config["addresses"].append( + f"{vnic_dict['ipv6Addresses'][0]}/" + f"{network.prefixlen}" + ) self._network_config["ethernets"][name] = interface_config @@ -367,6 +452,12 @@ class DataSourceOracleNet(DataSourceOracle): perform_dhcp_setup = False +def _is_ipv4_metadata_url(metadata_address: str): + if not metadata_address: + return False + return metadata_address.startswith(IPV4_METADATA_ROOT.split("opc")[0]) + + def _read_system_uuid() -> Optional[str]: sys_uuid = dmi.read_dmi_data("system-uuid") return None if sys_uuid is None else sys_uuid.lower() @@ -378,51 +469,83 @@ def _is_platform_viable() -> bool: def _url_version(url: str) -> int: - return 2 if url.startswith("http://169.254.169.254/opc/v2") else 1 + return 2 if "/opc/v2/" in url else 1 def _headers_cb(url: str) -> Optional[Dict[str, str]]: return V2_HEADERS if _url_version(url) == 2 else None +def _get_versioned_metadata_base_url(url: str) -> str: + """ + Remove everything following the version number in the metadata address. + """ + if not url: + return url + if "v2" in url: + return url.split("v2")[0] + "v2/" + elif "v1" in url: + return url.split("v1")[0] + "v1/" + else: + raise ValueError("Invalid metadata address: " + url) + + def read_opc_metadata( *, fetch_vnics_data: bool = False, max_wait=DataSourceOracle.url_max_wait, timeout=DataSourceOracle.url_timeout, -) -> Optional[OpcMetadata]: + metadata_patterns: List[str] = [IPV4_METADATA_PATTERN], +) -> Tuple[Optional[OpcMetadata], Optional[str]]: """Fetch metadata from the /opc/ routes. :return: - A namedtuple containing: - The metadata version as an integer - The JSON-decoded value of the instance data endpoint on the IMDS - The JSON-decoded value of the vnics data endpoint if - `fetch_vnics_data` is True, else None - or None if fetching metadata failed - + A tuple containing: + - A namedtuple containing: + The metadata version as an integer + The JSON-decoded value of the instance data from the IMDS + The JSON-decoded value of the vnics data from the IMDS if + `fetch_vnics_data` is True, else None. Alternatively, + None if fetching metadata failed + - The metadata pattern url that was used to fetch the metadata. + This allows for later determining if v1 or v2 endppoint was + used and whether the IMDS was reached via IPv4 or IPv6. """ # Per Oracle, there are short windows (measured in milliseconds) throughout # an instance's lifetime where the IMDS is being updated and may 404 as a # result. urls = [ - METADATA_PATTERN.format(version=2, path="instance"), - METADATA_PATTERN.format(version=1, path="instance"), + metadata_pattern.format(version=version, path="instance") + for version in [2, 1] + for metadata_pattern in metadata_patterns ] + + url_that_worked = None + + LOG.debug("Attempting to fetch IMDS metadata from: %s", urls) start_time = time.monotonic() - instance_url, instance_response = wait_for_url( - urls, + url_that_worked, instance_response = wait_for_url( + urls=urls, max_wait=max_wait, timeout=timeout, headers_cb=_headers_cb, - sleep_time=0, + sleep_time=0.1, ) - if not instance_url: - LOG.warning("Failed to fetch IMDS metadata!") - return None + if not url_that_worked: + LOG.warning( + "Failed to fetch IMDS metadata from any of: %s", + ", ".join(urls), + ) + return (None, None) + else: + LOG.debug( + "Successfully fetched instance metadata from IMDS at: %s", + url_that_worked, + ) instance_data = json.loads(instance_response.decode("utf-8")) - metadata_version = _url_version(instance_url) + # save whichever version we got the instance data from for vnics data later + metadata_version = _url_version(url_that_worked) vnics_data = None if fetch_vnics_data: @@ -430,17 +553,70 @@ def read_opc_metadata( # but if we were able to retrieve instance metadata, that seems # like a worthwhile tradeoff rather than having incomplete metadata. vnics_url, vnics_response = wait_for_url( - [METADATA_PATTERN.format(version=metadata_version, path="vnics")], + urls=[url_that_worked.replace("instance", "vnics")], max_wait=max_wait - (time.monotonic() - start_time), timeout=timeout, headers_cb=_headers_cb, - sleep_time=0, + sleep_time=0.1, ) if vnics_url: vnics_data = json.loads(vnics_response.decode("utf-8")) + LOG.debug( + "Successfully fetched vnics metadata from IMDS at: %s", + vnics_url, + ) else: LOG.warning("Failed to fetch IMDS network configuration!") - return OpcMetadata(metadata_version, instance_data, vnics_data) + return ( + OpcMetadata(metadata_version, instance_data, vnics_data), + url_that_worked, + ) + + +def check_ipv6_connectivity() -> Optional[str]: + """ + Check if IMDS is reachable over IPv6 and that the status code is < 400. + + :return: The URL that was used to reach the IMDS if successful, else None + """ + ipv6_imds_endpoint_urls_data: List[Dict] = [ # try v2 first, then v1 + { + "url": IPV6_METADATA_PATTERN.format(version=2, path="instance"), + "headers": V2_HEADERS, + }, + { + "url": IPV6_METADATA_PATTERN.format(version=1, path="instance"), + }, + ] + for url_data in ipv6_imds_endpoint_urls_data: + LOG.debug( + "Checking ipv6 connectivity against url: %s", + url_data["url"], + ) + try: + url_response = readurl( + check_status=False, # don't raise exceptions on non-200 status + url=url_data["url"], + headers=url_data.get("headers"), + timeout=0.5, # keep really short for quick failure path + ) + except UrlError: + LOG.debug( + "Failed to reach IMDS over IPv6 at: %s", + url_data["url"], + ) + continue + # check if the response is ok + if url_response.code < 400: + LOG.debug( + "IPv6 IMDS was successfully reached at: %s", + url_data["url"], + ) + return url_data["url"] + else: + LOG.debug("Failed to get OK from IMDS at: %s", url_data["url"]) + LOG.debug("IMDS could not be reached over IPv6.") + return None # Used to match classes to dependencies @@ -462,19 +638,21 @@ def get_datasource_list(depends): if __name__ == "__main__": - import argparse description = """ Query Oracle Cloud metadata and emit a JSON object with two keys: `read_opc_metadata` and `_is_platform_viable`. The values of each are the return values of the corresponding functions defined in DataSourceOracle.py.""" - parser = argparse.ArgumentParser(description=description) - parser.parse_args() + print( atomic_helper.json_dumps( { - "read_opc_metadata": read_opc_metadata(), + "read_opc_metadata": read_opc_metadata( + metadata_patterns=( + [IPV6_METADATA_PATTERN, IPV4_METADATA_PATTERN] + ) + ), "_is_platform_viable": _is_platform_viable(), } ) diff --git a/tests/integration_tests/datasources/test_oracle_ipv6.py b/tests/integration_tests/datasources/test_oracle_ipv6.py new file mode 100644 index 00000000000..063db0c6796 --- /dev/null +++ b/tests/integration_tests/datasources/test_oracle_ipv6.py @@ -0,0 +1,186 @@ +import time + +import pytest + +from tests.integration_tests.instances import IntegrationInstance +from tests.integration_tests.integration_settings import PLATFORM + +IPV4_METADATA_ADDRESS = "169.254.169.254" +IPV6_METADATA_ADDRESS = "fd00:c1::a9fe:a9fe" + + +def _find_imds_read_log(log: str, address: str) -> bool: + """ + Check if the log contains a successful IMDS read from the given address. + + Args: + log: Contents of the cloud-init log file to search. + address: The IMDS IP address to look for in the log. + """ + return ( + "Successfully fetched instance metadata from IMDS at: " + f"http://[{address}]/" in log + ) + + +def _test_reading_ipv6_metadata_succeeds(client: IntegrationInstance): + """ + Test that reading metadata succeeds for an IPv6 instance. + + This function performs the following checks: + 1. Reads the cloud-init log file and ensures that a log entry indicates + the metadata was successfully read from the IPv6 IMDS address. + 2. Verifies that the cloud-init status indicates a successful read of + """ + log = client.read_from_file("/var/log/cloud-init.log") + matching_line = None + for line in log.splitlines(): + if _find_imds_read_log(line, IPV6_METADATA_ADDRESS): + matching_line = line + break + assert matching_line is not None + assert client.execute("cloud-init status --long").ok + + +def _test_reading_ipv6_metadata_fails(client: IntegrationInstance): + """ + Test that reading metadata fails for an IPv6 instance and that the fallback + datasource is used instead. + This function performs the following checks: + 1. Reads the cloud-init log file and ensures that no log entry indicates + an attempt to read metadata from the IPv6 IMDS address. + 2. Verifies that the cloud-init status indicates the use of a fallback + datasource due to the failure of IPv6 connectivity checks. + 3. Ensures that the status message confirms the failure to fetch IMDS + metadata from the IPv6 address and instead used the fallback datasource. + 4. The cloud-init status exit code should indicate a failure. + Args: + client (IntegrationInstance): The integration test client instance. + address (str): The IPv6 address to check in the cloud-init log. + """ + log = client.read_from_file("/var/log/cloud-init.log") + matching_line = None + for line in log.splitlines(): + if _find_imds_read_log(line, IPV6_METADATA_ADDRESS): + matching_line = line + break + + # ensure that the line was NOT found + assert matching_line is None + + # now check cloud-init status to ensure that ipv6 instance failed to + # retrieve metadata from IMDS + status = client.execute("cloud-init status --long") + assert "Used fallback datasource" in status + # since IPV6 connectivity checks will fail in Oracle Datasource, + # we want to verify that it did NOT try querying the IPV6 address + # and instead used the fallback datasource + assert ( + "Failed to fetch IMDS metadata from any of: " + f"http://{IPV4_METADATA_ADDRESS}" in status + ) + assert not status.ok + + +def _install_netfilter_perstistent(client: IntegrationInstance): + assert client.execute("sudo apt-get install -y iptables-persistent").ok + + +def _clean_and_wait_for_cloudinit(client: IntegrationInstance): + """ + Clean up the instance and wait for cloud-init to finish + + This function performs the following steps: + 1. Cleans up the instance and wipes all cloud-init logs and data, and then + reboots the instance. + 2. Waits for the instance to reboot and for cloud-init to finish running. + """ + r = client.execute("cloud-init clean -r --logs") + print(r) + # assert r.ok + # sleep for 10s to allow cloud-init clean to run + # oracle takes > 10s to boot so this is not slowing us down + time.sleep(10) + # then wait on cloud-init to finish + client.instance._wait_for_execute() + + +@pytest.mark.unstable +@pytest.mark.skipif(PLATFORM != "oci", reason="test is oci specific") +def test_single_stack(client: IntegrationInstance): + """ + Test the behavior of cloud-init when interacting with the Oracle Cloud + Infrastructure (OCI) Instance Metadata Service (IMDS) over IPv6, while + selectively blocking IPv4 and IPv6 traffic. + + This test performs the following steps: + 1. Ensures the IPv6 IMDS is reachable via curl. + 2. Blocks IPv4 traffic and verifies that the IPv6 IMDS is still reachable. + 3. Blocks IPv6 traffic and verifies that the IPv6 IMDS is no longer + reachable. + 4. Cleans up iptables rules to restore normal network behavior. + + The test is marked as unstable because it requires a specially configured + instance that is running a private custom-made IPv6-only image that is not + generally available. Thus, this test is only meant to be run manually by + developers who have access to the necessary resources. + """ + _install_netfilter_perstistent(client) + + # Ensure IPv6 is not disabled from a previous test + client.execute(f"ip6tables -D OUTPUT -d {IPV6_METADATA_ADDRESS} -j REJECT") + client.execute(f"ip6tables -D INPUT -s {IPV6_METADATA_ADDRESS} -j DROP") + # Ensure IPv4 is not disabled from a previous test + client.execute(f"iptables -D OUTPUT -d {IPV4_METADATA_ADDRESS} -j REJECT") + client.execute(f"iptables -D INPUT -s {IPV4_METADATA_ADDRESS} -j DROP") + + # assert ipv6 imds is reachable via curl + assert client.execute( + "curl -f -g -6 -L http://[fd00:c1::a9fe:a9fe]/opc/v1/instance/" + ).ok + + # Drop IPv4 responses + assert client.execute( + f"iptables -I INPUT -s {IPV4_METADATA_ADDRESS} -j DROP" + ).ok + # Block IPv4 requests + assert client.execute( + f"iptables -I OUTPUT -d {IPV4_METADATA_ADDRESS} -j REJECT" + ).ok + # Save the rules so they persist across reboot + assert client.execute("sudo netfilter-persistent save").ok + + # assert ipv6 imds is reachable via curl when ipv4 is blocked + assert client.execute( + "curl -f -g -6 -L http://[fd00:c1::a9fe:a9fe]/opc/v1/instance/" + ).ok + + _clean_and_wait_for_cloudinit(client) + # cloud-init should be able to use ipv6 IMDS when ipv4 is blocked + _test_reading_ipv6_metadata_succeeds(client) + + # Drop IPv6 responses + assert client.execute( + f"ip6tables -I INPUT -s {IPV6_METADATA_ADDRESS} -j DROP" + ).ok + # Block IPv6 requests + assert client.execute( + f"ip6tables -I OUTPUT -d {IPV6_METADATA_ADDRESS} -j REJECT" + ).ok + # Save the rules so they persist across reboot + assert client.execute("sudo netfilter-persistent save").ok + # assert that curling the ipv6 address fails when ipv6 is blocked + assert not client.execute( + "curl -f -g -6 -L http://[fd00:c1::a9fe:a9fe]/opc/v1/instance/" + ).ok + + _clean_and_wait_for_cloudinit(client) + # cloud-init should NOT be able to use ipv6 IMDS when ipv6 is blocked + _test_reading_ipv6_metadata_fails(client) + + # Re-enable IPv6 + client.execute(f"ip6tables -D OUTPUT -d {IPV6_METADATA_ADDRESS} -j REJECT") + client.execute(f"ip6tables -D INPUT -s {IPV6_METADATA_ADDRESS} -j DROP") + # Re-enable IPv4 + client.execute(f"iptables -D OUTPUT -d {IPV4_METADATA_ADDRESS} -j REJECT") + client.execute(f"iptables -D INPUT -s {IPV4_METADATA_ADDRESS} -j DROP") diff --git a/tests/unittests/sources/test_oracle.py b/tests/unittests/sources/test_oracle.py index 2372ca5ecfc..64faa2a4e5e 100644 --- a/tests/unittests/sources/test_oracle.py +++ b/tests/unittests/sources/test_oracle.py @@ -5,15 +5,17 @@ import json import logging from itertools import count +from typing import Optional from unittest import mock import pytest import responses +from requests import Response from cloudinit.sources import DataSourceOracle as oracle from cloudinit.sources import NetworkConfigSource from cloudinit.sources.DataSourceOracle import OpcMetadata -from cloudinit.url_helper import UrlError +from cloudinit.url_helper import UrlError, UrlResponse from tests.unittests import helpers as test_helpers DS_PATH = "cloudinit.sources.DataSourceOracle" @@ -57,7 +59,117 @@ "virtualRouterIp" : "10.0.0.1", "subnetCidrBlock" : "10.0.0.0/24" } ]""" +############################################################################### +OPC_VM_DUAL_STACK_SECONDARY_VNIC_RESPONSE = """\ +[ + { + "ipv6Addresses": [ + "2603:c020:400d:5dbb:e94a:a85d:26e3:e0d4" + ], + "ipv6SubnetCidrBlock": "2603:c020:400d:5dbb::/64", + "ipv6VirtualRouterIp": "fe80::200:17ff:fe40:8972", + "macAddr": "02:00:17:0D:6B:BE", + "privateIp": "10.0.0.183", + "subnetCidrBlock": "10.0.0.0/24", + "virtualRouterIp": "10.0.0.1", + "vlanTag": 929, + "vnicId": "ocid1.vnic.oc1.iad.abuwcljtr2b6363afca55nzerlvwmfhxp_truncated" + }, + { + "ipv6Addresses": [ + "2603:c020:400d:5d7e:aacc:8e5f:3b1b:3a4a" + ], + "ipv6SubnetCidrBlock": "2603:c020:400d:5d7e::/64", + "ipv6VirtualRouterIp": "fe80::200:17ff:fe40:8972", + "macAddr": "02:00:17:18:F6:FF", + "subnetCidrBlock": "\u003cnull\u003e", + "vlanTag": 2659, + "vnicId": "ocid1.vnic.oc1.iad.abuwcljtpfktyl2e3xm2ez4spj7wiliyc_truncated" + } +]""" +OPC_VM_IPV6_ONLY_SECONDARY_VNIC_RESPONSE = """\ +[ + { + "ipv6Addresses": [ + "2603:c020:400d:5dbb:e94a:a85d:26e3:e0d4" + ], + "ipv6SubnetCidrBlock": "2603:c020:400d:5dbb::/64", + "ipv6VirtualRouterIp": "fe80::200:17ff:fe40:8972", + "macAddr": "02:00:17:0D:6B:BE", + "vlanTag": 929, + "vnicId": "ocid1.vnic.oc1.iad.abuwcljtr2b6363afca55nzerlvwmfhxp_truncated" + }, + { + "ipv6Addresses": [ + "2603:c020:400d:5d7e:aacc:8e5f:3b1b:3a4a" + ], + "ipv6SubnetCidrBlock": "2603:c020:400d:5d7e::/64", + "ipv6VirtualRouterIp": "fe80::200:17ff:fe40:8972", + "macAddr": "02:00:17:18:F6:FF", + "vlanTag": 2659, + "vnicId": "ocid1.vnic.oc1.iad.abuwcljtpfktyl2e3xm2ez4spj7wiliyc_truncated" + } +]""" +############################################################################### + +OPC_DUAL_STACK_VM_VNIC_RESPONSE = """\ +[ + { + "ipv6Addresses": [ + "2603:c020:400d:5dbb:e94a:a85d:26e3:e0d4" + ], + "ipv6SubnetCidrBlock": "2603:c020:400d:5dbb::/64", + "ipv6VirtualRouterIp": "fe80::200:17ff:fe40:8972", + "macAddr": "02:00:17:0D:6B:BE", + "privateIp": "10.0.0.183", + "subnetCidrBlock": "10.0.0.0/24", + "virtualRouterIp": "10.0.0.1", + "vlanTag": 929, + "vnicId": "ocid1.vnic.oc1.iad.abuwcljtr2b6363afca55nzerlvwmfhxp_truncated" + } +]""" +OPC_DUAL_STACK_WITHI_IPV6_ONLY_SECONDARY_VNIC_RESPONSE = """\ +[ + { + "ipv6Addresses": [ + "2603:c020:400d:5dbb:e94a:a85d:26e3:e0d4" + ], + "ipv6SubnetCidrBlock": "2603:c020:400d:5dbb::/64", + "ipv6VirtualRouterIp": "fe80::200:17ff:fe40:8972", + "macAddr": "02:00:17:0D:6B:BE", + "privateIp": "10.0.0.183", + "subnetCidrBlock": "10.0.0.0/24", + "virtualRouterIp": "10.0.0.1", + "vlanTag": 929, + "vnicId": "ocid1.vnic.oc1.iad.abuwcljtr2b6363afca55nzerlvwmfhx4_truncated" + }, + { + "ipv6Addresses": [ + "2603:c020:400d:5d7e:aacc:8e5f:3b1b:3a4a" + ], + "ipv6SubnetCidrBlock": "2603:c020:400d:5d7e::/64", + "ipv6VirtualRouterIp": "fe80::200:17ff:fe40:8972", + "macAddr": "02:00:17:18:F6:FF", + "subnetCidrBlock": "\u003cnull\u003e", + "vlanTag": 2659, + "vnicId": "ocid1.vnic.oc1.iad.abuwcljtpfktyl2e3xm2ez4spj7wiliy_truncated" + } +]""" + +OPC_IPV6_VM_VNIC_RESPONSE = """\ +[ + { + "ipv6Addresses": [ + "2603:c020:400d:5d7e:c2af:e153:ebe2:2478" + ], + "ipv6SubnetCidrBlock": "2603:c020:400d:5d7e::/64", + "ipv6VirtualRouterIp": "fe80::200:17ff:fe40:8972", + "macAddr": "02:00:17:15:92:4E", + "vlanTag": 1970, + "vnicId": "ocid1.vnic.oc1.iad.abuwcljtpfktyl2e3xm2ez4spj7wiliy_truncated" + } +]""" # Fetched with `curl http://169.254.169.254/opc/v1/instance/` (and then # truncated for line length) @@ -86,10 +198,45 @@ } }""" +OPC_V2_IPV6_METADATA = """\ +{ + "availabilityDomain": "qIZq:US-ASHBURN-AD-1", + "canonicalRegionName": "us-ashburn-1", + "compartmentId": "ocid1.compartment.oc1..aaaaaaaayyvhlkxdjkhz_truncated", + "displayName": "jammy-ipv6-only-variant-base-instance", + "faultDomain": "FAULT-DOMAIN-3", + "hostname": "jammy-ipv6-only-variant-base-instance", + "id": "ocid1.instance.oc1.iad.anuwcljtniwq6sycqxrp36a46y7o2ox_truncated", + "image": "ocid1.image.oc1.iad.aaaaaaaa4jvzkneq635p7xr2nekgbff_truncated", + "metadata": { + "ssh_authorized_keys": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAA truncated" + }, + "ociAdName": "iad-ad-3", + "region": "iad", + "regionInfo": { + "realmDomainComponent": "oraclecloud.com", + "realmKey": "oc1", + "regionIdentifier": "us-ashburn-1", + "regionKey": "IAD" + }, + "shape": "VM.Standard.E5.Flex", + "shapeConfig": { + "maxVnicAttachments": 2, + "memoryInGBs": 12.0, + "networkingBandwidthInGbps": 1.0, + "ocpus": 1.0 + }, + "state": "Running", + "tenantId": "ocid1.tenancy.oc1..aaaaaaaao7f7cccogqrg5emjxkxmct_truncated", + "timeCreated": 1726753916086 +}""" + # Just a small meaningless change to differentiate the two metadatas OPC_V1_METADATA = OPC_V2_METADATA.replace("ocid1.instance", "ocid2.instance") MAC_ADDR = "00:00:17:02:2b:b1" +IPV6_MAC_ADDR1 = "02:00:17:0d:6b:be" +IPV6_MAC_ADDR2 = "02:00:17:18:f6:ff" DHCP = { "name": "eth0", @@ -108,6 +255,19 @@ } KLIBC_NET_CFG = {"version": 1, "config": [DHCP]} +ipv6_v1_instance_url = oracle.IPV6_METADATA_PATTERN.format( + path="instance", + version=1, +) +ipv4_v1_instance_url = oracle.IPV4_METADATA_PATTERN.format( + path="instance", + version=1, +) +ipv4_v2_instance_url = oracle.IPV4_METADATA_PATTERN.format( + path="instance", + version=2, +) + @pytest.fixture def metadata_version(): @@ -139,13 +299,29 @@ def oracle_ds(request, fixture_utils, paths, metadata_version, mocker): is_iscsi = fixture_utils.closest_marker_first_arg_or( request, "is_iscsi", True ) + use_ipv6 = fixture_utils.closest_marker_first_arg_or( + request, "use_ipv6", False + ) + metadata = OpcMetadata(metadata_version, json.loads(OPC_V2_METADATA), None) + ipv6_v1_instance_url = oracle.IPV6_METADATA_PATTERN.format( + path="instance/", + version=metadata_version, + ) + ipv4_v1_instance_url = oracle.IPV4_METADATA_PATTERN.format( + path="instance/", + version=metadata_version, + ) + md_url_pattern = ipv6_v1_instance_url if use_ipv6 else ipv4_v1_instance_url - mocker.patch(DS_PATH + ".net.find_fallback_nic") + mocker.patch(DS_PATH + ".net.find_fallback_nic", return_value="fake_eth0") mocker.patch(DS_PATH + ".ephemeral.EphemeralDHCPv4") + mocker.patch(DS_PATH + ".ephemeral.EphemeralIPNetwork") mocker.patch(DS_PATH + "._read_system_uuid", return_value="someuuid") mocker.patch(DS_PATH + ".DataSourceOracle.ds_detect", return_value=True) - mocker.patch(DS_PATH + ".read_opc_metadata", return_value=metadata) + mocker.patch( + DS_PATH + ".read_opc_metadata", return_value=(metadata, md_url_pattern) + ) mocker.patch(DS_PATH + ".KlibcOracleNetworkConfigSource") ds = oracle.DataSourceOracle( sys_cfg=sys_cfg, @@ -162,6 +338,19 @@ def oracle_ds(request, fixture_utils, paths, metadata_version, mocker): class TestDataSourceOracle: + + def test_check_instance_id(self, oracle_ds): + oracle_ds.system_uuid = "someuuid" + with mock.patch( + DS_PATH + ".sources.instance_id_matches_system_uuid" + ) as m: + oracle_ds.check_instance_id("sys_cfg") + m.assert_called_once_with("someuuid") + + def test_get_public_ssh_keys(self, oracle_ds): + oracle_ds.metadata = {"public_keys": "key"} + assert ["key"] == oracle_ds.get_public_ssh_keys() + def test_platform_info(self, oracle_ds): assert "oracle" == oracle_ds.cloud_name assert "oracle" == oracle_ds.platform_type @@ -360,6 +549,104 @@ def test_secondary_nic_v2(self, set_primary, oracle_ds): assert 1 == len(secondary_cfg["addresses"]) assert "10.0.0.231/24" == secondary_cfg["addresses"][0] + @pytest.mark.parametrize( + "set_primary", + [ + pytest.param(True, id="set_primary"), + pytest.param(False, id="dont_set_primary"), + ], + ) + def test_imds_nic_setup_v1_ipv6_only(self, set_primary, oracle_ds): + oracle_ds._vnics_data = json.loads( + OPC_VM_IPV6_ONLY_SECONDARY_VNIC_RESPONSE + ) + oracle_ds._network_config = { + "version": 1, + "config": [{"primary": "nic"}], + } + with mock.patch( + f"{DS_PATH}.get_interfaces_by_mac", + return_value={ + "02:00:17:0d:6b:be": "ens3", + "02:00:17:18:f6:ff": "ens4", + }, + ): + oracle_ds._add_network_config_from_opc_imds( + set_primary=set_primary + ) + + secondary_nic_index = 1 + nic_cfg = oracle_ds.network_config["config"] + if set_primary: + primary_cfg = nic_cfg[1] + secondary_nic_index += 1 + + assert "ens3" == primary_cfg["name"] + assert "physical" == primary_cfg["type"] + assert "02:00:17:0d:6b:be" == primary_cfg["mac_address"] + assert 9000 == primary_cfg["mtu"] + assert 1 == len(primary_cfg["subnets"]) + assert "address" not in primary_cfg["subnets"][0] + assert "dhcp6" == primary_cfg["subnets"][0]["type"] + secondary_cfg = nic_cfg[secondary_nic_index] + assert "ens4" == secondary_cfg["name"] + assert "physical" == secondary_cfg["type"] + assert "02:00:17:18:f6:ff" == secondary_cfg["mac_address"] + assert 9000 == secondary_cfg["mtu"] + assert 1 == len(secondary_cfg["subnets"]) + assert ( + "2603:c020:400d:5d7e:aacc:8e5f:3b1b:3a4a/128" + == secondary_cfg["subnets"][0]["address"] + ) + assert "static" == secondary_cfg["subnets"][0]["type"] + + @pytest.mark.parametrize( + "set_primary", + [True, False], + ) + def test_secondary_nic_v2_ipv6_only(self, set_primary, oracle_ds): + oracle_ds._vnics_data = json.loads( + OPC_VM_IPV6_ONLY_SECONDARY_VNIC_RESPONSE + ) + oracle_ds._network_config = { + "version": 2, + "ethernets": {"primary": {"nic": {}}}, + } + with mock.patch( + f"{DS_PATH}.get_interfaces_by_mac", + return_value={ + "02:00:17:0d:6b:be": "ens3", + "02:00:17:18:f6:ff": "ens4", + }, + ): + oracle_ds._add_network_config_from_opc_imds( + set_primary=set_primary + ) + + nic_cfg = oracle_ds.network_config["ethernets"] + if set_primary: + assert "ens3" in nic_cfg + primary_cfg = nic_cfg["ens3"] + + assert primary_cfg["dhcp4"] is False + assert primary_cfg["dhcp6"] is True + assert "02:00:17:0d:6b:be" == primary_cfg["match"]["macaddress"] + assert 9000 == primary_cfg["mtu"] + assert "addresses" not in primary_cfg + + assert "ens4" in nic_cfg + secondary_cfg = nic_cfg["ens4"] + assert secondary_cfg["dhcp4"] is False + assert secondary_cfg["dhcp6"] is False + assert "02:00:17:18:f6:ff" == secondary_cfg["match"]["macaddress"] + assert 9000 == secondary_cfg["mtu"] + + assert 1 == len(secondary_cfg["addresses"]) + assert ( + "2603:c020:400d:5d7e:aacc:8e5f:3b1b:3a4a/128" + == secondary_cfg["addresses"][0] + ) + @pytest.mark.parametrize("error_add_network", [None, Exception]) @pytest.mark.parametrize( "configure_secondary_nics", @@ -724,7 +1011,7 @@ def test_metadata_returned( mocked_responses, ): setup_urls(mocked_responses) - metadata = oracle.read_opc_metadata(fetch_vnics_data=fetch_vnics) + metadata, url = oracle.read_opc_metadata(fetch_vnics_data=fetch_vnics) assert version == metadata.version assert instance_data == metadata.instance_data @@ -780,20 +1067,89 @@ def test_attempt_vnics_after_max_wait_expire(self, m_wait_for_url, m_time): # No need to actually wait between retries in the tests @mock.patch("cloudinit.url_helper.time.sleep", lambda _: None) - def test_fetch_vnics_error(self, caplog): + @pytest.mark.parametrize( + [ + "instance_md_succeeds", + "vnics_md_succeeds", + ], + [ + pytest.param( + True, + True, + id="fetching both instance and vnics metadata succeeds", + ), + pytest.param( + False, + None, + id="fetching instance metadata fails, vnics not fetched", + ), + pytest.param( + True, + False, + id="fetching instance metadata succeeds, vnics metadata fails", + ), + ], + ) + def test_fetchin_instance_and_vnic_successes_and_failures( + self, + caplog, + instance_md_succeeds: bool, + vnics_md_succeeds: Optional[bool], + ): + instance_md_requsted = False + vnics_md_requsted = False + def m_wait(*args, **kwargs): - for url in args[0]: + nonlocal instance_md_requsted, vnics_md_requsted + if "urls" in kwargs: + url_arg = kwargs["urls"] + else: + url_arg = args[0] + print(url_arg) + for url in url_arg: if "vnics" in url: + vnics_md_requsted = True + if vnics_md_succeeds: + return url, b"{}" + return False, None + if "instance" in url: + instance_md_requsted = True + if instance_md_succeeds: + return url, b"{}" return False, None return ("http://localhost", b"{}") with mock.patch(DS_PATH + ".wait_for_url", side_effect=m_wait): - opc_metadata = oracle.read_opc_metadata(fetch_vnics_data=True) - assert None is opc_metadata.vnics_data - assert ( - logging.WARNING, - "Failed to fetch IMDS network configuration!", - ) == caplog.record_tuples[-1][1:], caplog.record_tuples + opc_metadata, url = oracle.read_opc_metadata(fetch_vnics_data=True) + if instance_md_succeeds: + assert url + assert opc_metadata + else: + assert not url + assert not opc_metadata + assert instance_md_requsted + assert vnics_md_requsted == instance_md_succeeds + + if instance_md_succeeds: + assert ( + "Successfully fetched instance metadata from IMDS at:" + in caplog.text + ) + if vnics_md_succeeds: + assert ( + "Successfully fetched vnics metadata from IMDS at:" + in caplog.text + ) + else: + assert ( + "Failed to fetch IMDS network configuration!" + in caplog.text + ) + else: + log_level = caplog.record_tuples[-1][1] + assert logging.WARNING == log_level + log_message = caplog.record_tuples[-1][2] + assert "Failed to fetch IMDS metadata from any of:" in log_message @pytest.mark.parametrize( @@ -901,30 +1257,58 @@ def test_public_keys_handled_correctly( metadata = OpcMetadata(None, instance_data, None) with mock.patch( DS_PATH + ".read_opc_metadata", - mock.Mock(return_value=metadata), + mock.Mock(return_value=(metadata, ipv4_v1_instance_url)), ): assert oracle_ds._check_and_get_data() assert expected_value == oracle_ds.get_public_ssh_keys() - def test_missing_user_data_handled_gracefully(self, oracle_ds): + # @pytest.mark.parametrize( + # "use_ipv6", + # [ + # pytest.param(marks=pytest.mark.use_ipv6(True), id="ipv6"), + # pytest.param(marks=pytest.mark.use_ipv6(False), id="ipv4"), + # ], + # ) + @pytest.mark.parametrize( + "use_ipv6", + [ + pytest.param(True, id="ipv6"), + pytest.param(False, id="ipv4"), + ], + ) + def test_missing_user_data_handled_gracefully(self, oracle_ds, use_ipv6): instance_data = json.loads(OPC_V1_METADATA) del instance_data["metadata"]["user_data"] metadata = OpcMetadata(None, instance_data, None) + md_url_pattern = ( + ipv6_v1_instance_url if use_ipv6 else ipv4_v1_instance_url + ) + with mock.patch( DS_PATH + ".read_opc_metadata", - mock.Mock(return_value=metadata), + mock.Mock(return_value=(metadata, md_url_pattern)), ): assert oracle_ds._check_and_get_data() assert oracle_ds.userdata_raw is None - def test_missing_metadata_handled_gracefully(self, oracle_ds): + @pytest.mark.parametrize( + "use_ipv6", + [ + pytest.param(True, id="ipv6"), + pytest.param(False, id="ipv4"), + ], + ) + def test_missing_metadata_handled_gracefully(self, oracle_ds, use_ipv6): instance_data = json.loads(OPC_V1_METADATA) del instance_data["metadata"] metadata = OpcMetadata(None, instance_data, None) + md_url_pattern = ( + ipv6_v1_instance_url if use_ipv6 else ipv4_v1_instance_url + ) with mock.patch( DS_PATH + ".read_opc_metadata", - mock.Mock(return_value=metadata), + mock.Mock(return_value=(metadata, md_url_pattern)), ): assert oracle_ds._check_and_get_data() @@ -934,54 +1318,14 @@ def test_missing_metadata_handled_gracefully(self, oracle_ds): @pytest.mark.is_iscsi(False) class TestNonIscsiRoot_GetDataBehaviour: - @mock.patch(DS_PATH + ".ephemeral.EphemeralDHCPv4") - @mock.patch(DS_PATH + ".net.find_fallback_nic") - def test_run_net_files( - self, m_find_fallback_nic, m_EphemeralDHCPv4, oracle_ds - ): - in_context_manager = False - - def enter_context_manager(): - nonlocal in_context_manager - in_context_manager = True - - def exit_context_manager(*args): - nonlocal in_context_manager - in_context_manager = False - - m_EphemeralDHCPv4.return_value.__enter__.side_effect = ( - enter_context_manager - ) - m_EphemeralDHCPv4.return_value.__exit__.side_effect = ( - exit_context_manager - ) - - def assert_in_context_manager(**kwargs): - assert in_context_manager - return mock.MagicMock() - - with mock.patch( - DS_PATH + ".read_opc_metadata", - mock.Mock(side_effect=assert_in_context_manager), - ): - assert oracle_ds._check_and_get_data() - - assert [ - mock.call( - oracle_ds.distro, - iface=m_find_fallback_nic.return_value, - connectivity_url_data={ - "headers": {"Authorization": "Bearer Oracle"}, - "url": "http://169.254.169.254/opc/v2/instance/", - }, - ) - ] == m_EphemeralDHCPv4.call_args_list - - @mock.patch(DS_PATH + ".ephemeral.EphemeralDHCPv4") - @mock.patch(DS_PATH + ".net.find_fallback_nic") + @mock.patch(DS_PATH + ".ephemeral.EphemeralIPNetwork") + @mock.patch(DS_PATH + ".net.find_fallback_nic", return_value="fake_eth0") + @mock.patch(DS_PATH + ".check_ipv6_connectivity") def test_read_opc_metadata_called_with_ephemeral_dhcp( - self, m_find_fallback_nic, m_EphemeralDHCPv4, oracle_ds + self, m_ipv6_check, m_find_fallback_nic, m_ephemeral_network, oracle_ds ): + url_that_worked = ipv4_v1_instance_url + in_context_manager = False def enter_context_manager(): @@ -992,33 +1336,43 @@ def exit_context_manager(*args): nonlocal in_context_manager in_context_manager = False - m_EphemeralDHCPv4.return_value.__enter__.side_effect = ( + m_ephemeral_network.return_value.__enter__.side_effect = ( enter_context_manager ) - m_EphemeralDHCPv4.return_value.__exit__.side_effect = ( + m_ephemeral_network.return_value.__exit__.side_effect = ( exit_context_manager ) def assert_in_context_manager(**kwargs): assert in_context_manager - return mock.MagicMock() + return (mock.MagicMock(), url_that_worked) with mock.patch( DS_PATH + ".read_opc_metadata", - mock.Mock(side_effect=assert_in_context_manager), + mock.Mock( + side_effect=assert_in_context_manager, + ), ): assert oracle_ds._check_and_get_data() assert [ mock.call( - oracle_ds.distro, - iface=m_find_fallback_nic.return_value, - connectivity_url_data={ - "headers": {"Authorization": "Bearer Oracle"}, - "url": "http://169.254.169.254/opc/v2/instance/", - }, + distro=oracle_ds.distro, + interface=m_find_fallback_nic.return_value, + ipv6=True, + ipv4=True, + connectivity_urls_data=[ + { + "url": ipv4_v1_instance_url, + }, + { + "url": ipv4_v2_instance_url, + "headers": oracle.V2_HEADERS, + }, + ], + ipv6_connectivity_check_callback=m_ipv6_check, ) - ] == m_EphemeralDHCPv4.call_args_list + ] == m_ephemeral_network.call_args_list @mock.patch(DS_PATH + ".get_interfaces_by_mac", return_value={}) @@ -1169,31 +1523,223 @@ def test_missing_mac_skipped( f"Interface with MAC {MAC_ADDR} not found; skipping", ) == caplog.record_tuples[-1][1:] - @pytest.mark.parametrize("set_primary", [True, False]) + # @pytest.mark.parametrize("set_primary", [True, False]) + # @pytest.mark.parametrize("use_ipv6", [True, False]) + @pytest.mark.parametrize( + [ + "set_primary", + "use_ipv6", + "secondary_mac_present", + ], + [ + # pytest.param(True, True, id="ipv6 vnics setting primary"), + # pytest.param(False, True, id="ipv6 vnics not setting primary"), + # pytest.param(True, False, id="ipv4 vnics setting primary"), + # pytest.param(False, False, id="ipv4 vnics not setting primary"), + pytest.param( + True, + True, + True, + id="ipv6 vnics setting primary with secondary mac present", + ), + pytest.param( + False, + True, + True, + id="ipv6 vnics not setting primary with secondary mac present", + ), + pytest.param( + False, + True, + False, + id="ipv6 vnics not setting primary w/o secondary mac present", + ), + ], + ) def test_nics( self, m_get_interfaces_by_mac, set_primary, + use_ipv6, + secondary_mac_present, oracle_ds, caplog, mocker, ): """Correct number of configs added""" - vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE) + if use_ipv6: + vnics_data = json.loads(OPC_VM_DUAL_STACK_SECONDARY_VNIC_RESPONSE) + else: + vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE) if set_primary: assert not oracle_ds._has_network_config() else: # Simulate primary config was taken from iscsi oracle_ds._network_config = copy.deepcopy(KLIBC_NET_CFG) + if use_ipv6: + interfaces = {IPV6_MAC_ADDR1: "eth_0"} + if secondary_mac_present: + interfaces[IPV6_MAC_ADDR2] = "eth_1" + mocker.patch( + DS_PATH + ".get_interfaces_by_mac", + return_value=interfaces, + ) + else: + mocker.patch( + DS_PATH + ".get_interfaces_by_mac", + return_value={ + "02:00:17:05:d1:db": "eth_0", + MAC_ADDR: "name_1", + }, + ) + mocker.patch.object(oracle_ds, "_vnics_data", vnics_data) + mocker.patch.object( + oracle_ds, + "metadata_address", + ipv6_v1_instance_url if use_ipv6 else ipv4_v1_instance_url, + ) - mocker.patch( - DS_PATH + ".get_interfaces_by_mac", - return_value={"02:00:17:05:d1:db": "eth_0", MAC_ADDR: "name_1"}, + # assert that oracle_ds.metadata_address is set correctly + assert oracle_ds.metadata_address == ( + ipv6_v1_instance_url if use_ipv6 else ipv4_v1_instance_url ) - mocker.patch.object(oracle_ds, "_vnics_data", vnics_data) oracle_ds._add_network_config_from_opc_imds(set_primary) - assert 2 == len( + num_configs_expected = 1 + int(secondary_mac_present) + assert num_configs_expected == len( oracle_ds._network_config["config"] ), "Config not added" - assert "" == caplog.text + + # assert that secondary vnic config is skipped if its mac is not found + assert int(not secondary_mac_present) == caplog.text.count( + " not found; skipping" + ) + + +class TestHelpers: + @pytest.mark.parametrize( + [ + "v2_response_code", + "v1_response_code", + ], + [ + pytest.param(200, 0, id="v2_success"), + pytest.param(404, 200, id="v2_fail_v1_success"), + pytest.param(404, 404, id="both_fail"), + ], + ) + def test_check_ipv6_connectivity( + self, + v2_response_code, + v1_response_code, + mocker, + caplog, + ): + """ + Test all possible combinations of responses from the two IMDS endpoints + and ensure that + """ + v2_response = Response() + v2_response.status_code = v2_response_code + v1_response = Response() + v1_response.status_code = v1_response_code + + v2_url_response = UrlResponse(response=v2_response) + v1_url_response = UrlResponse(response=v1_response) + + mocker.patch( + DS_PATH + ".readurl", + side_effect=[ + v2_url_response, + v1_url_response, + ], + ) + # assert that the mock is working as expected + # assert v2_response_code == oracle.readurl("url1").status + # assert v1_response_code == oracle.readurl("url2").status + + # with mock.patch(DS_PATH + ".wait_for_url", side_effect=m_wait): + + if v2_response_code == 200: + expected_url = oracle.IPV6_METADATA_PATTERN.format( + version=2, path="instance" + ) + elif v1_response_code == 200: + expected_url = oracle.IPV6_METADATA_PATTERN.format( + version=1, path="instance" + ) + else: + expected_url = None + + assert expected_url == oracle.check_ipv6_connectivity() + + # if both error codes are >= 400, then assert log message + if v2_response_code >= 400 and v1_response_code >= 400: + assert ( + logging.DEBUG, + "IMDS could not be reached over IPv6.", + ) == caplog.record_tuples[-1][1:] + + @pytest.mark.parametrize( + [ + "url", + "expected_base_url", + "raises_exception", + ], + [ + pytest.param( + None, + None, + None, + id="None", + ), + pytest.param( + "", + "", + None, + id="empty", + ), + pytest.param( + "http://url.tld/v2/instance/", + "http://url.tld/v2/", + None, + id="v2", + ), + pytest.param( + "http://url.tld/v1/instance/", + "http://url.tld/v1/", + None, + id="v1", + ), + pytest.param( + "http://url.tld/v3/instance/", + None, + ValueError, + id="v3_error", + ), + pytest.param( + "http://localhost:12345/v1/instance/vnics/a/b/c", + "http://localhost:12345/v1/", + None, + id="v1_many_parts_with_host_and_ports", + ), + ], + ) + def test_get_versioned_metadata_base_url( + self, + url, + expected_base_url, + raises_exception, + ): + """ + Test that the correct base URL is returned from the given URL, or that + the expected exception is raised if the URL is not valid. + """ + if raises_exception: + with pytest.raises(raises_exception): + oracle._get_versioned_metadata_base_url(url) + else: + assert ( + expected_base_url + == oracle._get_versioned_metadata_base_url(url) + ) From ea9b7462fdc8834c314adc48d292a3400914ce0d Mon Sep 17 00:00:00 2001 From: a-dubs Date: Thu, 17 Oct 2024 12:27:51 -0400 Subject: [PATCH 3/4] trying ipv6 conn check from oracleds instead of ephemeral.py --- cloudinit/sources/DataSourceOracle.py | 29 +++++++++++++-------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py index 3b7c345d353..520655bc5bb 100644 --- a/cloudinit/sources/DataSourceOracle.py +++ b/cloudinit/sources/DataSourceOracle.py @@ -182,37 +182,40 @@ def ds_detect() -> bool: def _get_data(self): self.system_uuid = _read_system_uuid() - nic_name = net.find_fallback_nic() # Test against both v1 and v2 metadata URLs connectivity_urls_data = [ { "url": IPV4_METADATA_PATTERN.format( - version=1, path="instance" + version=2, path="instance" ), + "headers": V2_HEADERS, }, { "url": IPV4_METADATA_PATTERN.format( - version=2, path="instance" + version=1, path="instance" ), - "headers": V2_HEADERS, }, ] + ipv6_url_that_worked = check_ipv6_connectivity() + if ipv6_url_that_worked: + md_patterns = [IPV6_METADATA_PATTERN] + else: + md_patterns = [IPV4_METADATA_PATTERN] + # if we have connectivity to imds, then skip ephemeral network setup - if self.perform_dhcp_setup: # and not available_urls: - # TODO: ask james: this obviously fails on ipv6 single stack only - # is there a way to detect when we need this? - # would this only work/be needed if isci is being used? - # if so, could we just check for iscsi root and then do this? + if self.perform_dhcp_setup and not ipv6_url_that_worked: + + nic_name = net.find_fallback_nic() try: network_context = ephemeral.EphemeralIPNetwork( distro=self.distro, interface=nic_name, - ipv6=True, + ipv6=False, ipv4=True, connectivity_urls_data=connectivity_urls_data, - ipv6_connectivity_check_callback=check_ipv6_connectivity, + ipv6_connectivity_check_callback=None, ) except Exception: network_context = util.nullcontext() @@ -225,10 +228,6 @@ def _get_data(self): ) with network_context: - if network_context.ipv6_reached_at_url: - md_patterns = [IPV6_METADATA_PATTERN] - else: - md_patterns = [IPV4_METADATA_PATTERN] fetched_metadata, url_that_worked = read_opc_metadata( fetch_vnics_data=fetch_primary_nic or fetch_secondary_nics, max_wait=self.url_max_wait, From 697b4bdff68ef8bc3e4373740f92cbb56bdab6e9 Mon Sep 17 00:00:00 2001 From: a-dubs Date: Tue, 22 Oct 2024 17:12:21 -0400 Subject: [PATCH 4/4] tried adding _perform_connectivity_check to ephemeral ip --- cloudinit/net/ephemeral.py | 82 ++++++++++++++++++--------- cloudinit/sources/DataSourceOracle.py | 37 ++++++------ 2 files changed, 72 insertions(+), 47 deletions(-) diff --git a/cloudinit/net/ephemeral.py b/cloudinit/net/ephemeral.py index a996a825c99..f95d9fbef52 100644 --- a/cloudinit/net/ephemeral.py +++ b/cloudinit/net/ephemeral.py @@ -3,6 +3,7 @@ """Module for ephemeral network context managers """ import contextlib +import json import logging from functools import partial from typing import Any, Callable, Dict, List, Optional, Tuple @@ -11,6 +12,7 @@ import cloudinit.netinfo as netinfo from cloudinit.net.dhcp import NoDHCPLeaseError, maybe_perform_dhcp_discovery from cloudinit.subp import ProcessExecutionError +from cloudinit.url_helper import UrlError, wait_for_url LOG = logging.getLogger(__name__) @@ -405,7 +407,6 @@ def __init__( ipv6: bool = False, ipv4: bool = True, connectivity_urls_data: Optional[List[Dict[str, Any]]] = None, - ipv6_connectivity_check_callback: Optional[Callable] = None, ): """ Args: @@ -429,12 +430,9 @@ def __init__( self.state_msg: str = "" self.distro = distro self.connectivity_urls_data = connectivity_urls_data - self.ipv6_connectivity_check_callback = ( - ipv6_connectivity_check_callback - ) # will be updated by the context manager - self.ipv6_reached_at_url = None + self.imds_reached_at_url: Optional[str] = None def __enter__(self): if not (self.ipv4 or self.ipv6): @@ -444,27 +442,14 @@ def __enter__(self): exceptions = [] ephemeral_obtained = False - if self.ipv6_connectivity_check_callback is not None: - if not self.ipv6: - raise ValueError( - "ipv6_connectivity_check_callback provided but ipv6 is " - "not enabled" - ) - ephemeral_obtained, exceptions = self._do_ipv6( - ephemeral_obtained, exceptions - ) - self.ipv6_reached_at_url = self.ipv6_connectivity_check_callback() - # if ipv6_connectivity_check_callback is provided, then we want to - # skip ipv4 ephemeral network setup if ipv6 ephemeral network setup - # and imds connectivity check succeeded - if self.ipv4 and not self.ipv6_reached_at_url: - LOG.debug( - "Bringing up ipv4 ephemeral network since ipv6 failed" - ) - ephemeral_obtained, exceptions = self._do_ipv4( - ephemeral_obtained, exceptions - ) + self.imds_reached_at_url = self._perform_connectivity_check() + + if self.imds_reached_at_url: + LOG.debug("We already have connectivity to IMDS, skipping DHCP.") else: + LOG.debug("No connectivity to IMDS, attempting DHCP setup.") + # first try to bring up ephemeral network for ipv4 (if enabled) + # then try to bring up ephemeral network for ipv6 (if enabled) if self.ipv4: ephemeral_obtained, exceptions = self._do_ipv4( ephemeral_obtained, exceptions @@ -474,7 +459,7 @@ def __enter__(self): ephemeral_obtained, exceptions ) - if not ephemeral_obtained: + if not self.imds_reached_at_url and not ephemeral_obtained: # Ephemeral network setup failed in linkup for both ipv4 and # ipv6. Raise only the first exception found. LOG.error( @@ -504,7 +489,6 @@ def _do_ipv4( EphemeralDHCPv4( distro=self.distro, iface=self.interface, - connectivity_urls_data=self.connectivity_urls_data, ) ) ephemeral_obtained = True @@ -545,7 +529,9 @@ def _do_ipv6( self.interface, ) ) - ephemeral_obtained = True + ephemeral_obtained = True + if exceptions or not self.ipv4: + self.state_msg = "using link-local ipv6" LOG.debug( "Successfully brought up %s for ephemeral ipv6 networking.", self.interface, @@ -561,5 +547,45 @@ def _do_ipv6( exceptions.append(e) return ephemeral_obtained, exceptions + def _perform_connectivity_check( + self, + ) -> Optional[str]: + + def headers_cb(url): + headers = [ + url_data.get("headers") for url_data in self.connectivity_urls_data + if url_data["url"] == url + ][0] + return headers + + try: + url_that_worked, url_response = wait_for_url( + urls=[url_data["url"] for url_data in self.connectivity_urls_data], + headers_cb=headers_cb, + timeout=0.5, # keep really short for quick failure path + connect_synchronously=False, + ) + imds_data = json.loads(url_response.decode("utf-8")) + except UrlError as e: + LOG.debug( + "Failed to reach IMDS with error: %s", + e, + ) + except Exception as e: # pylint: disable=broad-except + LOG.debug( + "Unexpected error occurred. Failed to reach IMDS: %s", + e, + ) + else: + if imds_data: + LOG.debug( + "IMDS was successfully reached at %s without ephemeral " + "network setup.", + url_that_worked, + ) + return url_that_worked + LOG.debug("Failed to reach IMDS without ephemeral network setup.") + return None + def __exit__(self, *_args): self.stack.close() diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py index 520655bc5bb..7aee2dea756 100644 --- a/cloudinit/sources/DataSourceOracle.py +++ b/cloudinit/sources/DataSourceOracle.py @@ -42,14 +42,6 @@ IPV6_METADATA_ROOT = "http://[fd00:c1::a9fe:a9fe]/opc/v{version}/" IPV4_METADATA_PATTERN = IPV4_METADATA_ROOT + "{path}/" IPV6_METADATA_PATTERN = IPV6_METADATA_ROOT + "{path}/" -METADATA_URLS = [ - IPV4_METADATA_ROOT, - IPV6_METADATA_ROOT, -] -METADATA_ROOTS = [ - IPV4_METADATA_ROOT, - IPV6_METADATA_ROOT, -] # https://docs.cloud.oracle.com/iaas/Content/Network/Troubleshoot/connectionhang.htm#Overview, # indicates that an MTU of 9000 is used within OCI @@ -196,26 +188,29 @@ def _get_data(self): version=1, path="instance" ), }, + { + "url": IPV6_METADATA_PATTERN.format( + version=2, path="instance" + ), + "headers": V2_HEADERS, + }, + { + "url": IPV6_METADATA_PATTERN.format( + version=1, path="instance" + ), + }, ] - ipv6_url_that_worked = check_ipv6_connectivity() - if ipv6_url_that_worked: - md_patterns = [IPV6_METADATA_PATTERN] - else: - md_patterns = [IPV4_METADATA_PATTERN] - - # if we have connectivity to imds, then skip ephemeral network setup - if self.perform_dhcp_setup and not ipv6_url_that_worked: + if self.perform_dhcp_setup: nic_name = net.find_fallback_nic() try: network_context = ephemeral.EphemeralIPNetwork( distro=self.distro, interface=nic_name, - ipv6=False, + ipv6=True, ipv4=True, connectivity_urls_data=connectivity_urls_data, - ipv6_connectivity_check_callback=None, ) except Exception: network_context = util.nullcontext() @@ -232,7 +227,10 @@ def _get_data(self): fetch_vnics_data=fetch_primary_nic or fetch_secondary_nics, max_wait=self.url_max_wait, timeout=self.url_timeout, - metadata_patterns=md_patterns, + metadata_patterns=[ + IPV6_METADATA_PATTERN, + IPV4_METADATA_PATTERN, + ], ) # set the metadata root address that worked to allow for detecting # whether ipv4 or ipv6 was used for getting metadata @@ -557,6 +555,7 @@ def read_opc_metadata( timeout=timeout, headers_cb=_headers_cb, sleep_time=0.1, + connect_synchronously=False, ) if vnics_url: vnics_data = json.loads(vnics_response.decode("utf-8"))