diff --git a/.github/labeler.yml b/.github/labeler.yml index b341bb5018e..eaf08134c34 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -1,2 +1,7 @@ documentation: -- doc/* +- all: + - changed-files: + - any-glob-to-any-file: + - 'doc/*' + - 'cloudinit/config/schemas/*' + - base-branch: ['main'] diff --git a/.github/workflows/doc-autolabel.yml b/.github/workflows/doc-autolabel.yml deleted file mode 100644 index 54c065bdc6f..00000000000 --- a/.github/workflows/doc-autolabel.yml +++ /dev/null @@ -1,12 +0,0 @@ -name: Label documentation changes automatically -on: -- pull_request_target - -jobs: - triage: - permissions: - contents: read - pull-requests: write - runs-on: ubuntu-latest - steps: - - uses: actions/labeler@v4 diff --git a/.github/workflows/labeler.yaml b/.github/workflows/labeler.yaml new file mode 100644 index 00000000000..71171438900 --- /dev/null +++ b/.github/workflows/labeler.yaml @@ -0,0 +1,9 @@ +name: "Pull Request Labeler" +on: +- pull_request_target + +jobs: + labeler: + runs-on: ubuntu-latest + steps: + - uses: actions/labeler@v5 diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py index a1e4eb8dfad..f5ae53ce26a 100755 --- a/cloudinit/cmd/devel/logs.py +++ b/cloudinit/cmd/devel/logs.py @@ -7,14 +7,16 @@ """Define 'collect-logs' utility and handler to include in cloud-init cmd.""" import argparse +import logging import os +import pathlib import shutil import subprocess import sys from datetime import datetime, timezone -from pathlib import Path -from typing import NamedTuple, Optional +from typing import List, NamedTuple, Optional, cast +from cloudinit import log from cloudinit.cmd.devel import read_cfg_paths from cloudinit.stages import Init from cloudinit.subp import ProcessExecutionError, subp @@ -27,6 +29,8 @@ write_file, ) +LOG = cast(log.CustomLoggerType, logging.getLogger(__name__)) + class LogPaths(NamedTuple): userdata_raw: str @@ -96,7 +100,9 @@ class ApportFile(NamedTuple): ] -def get_parser(parser=None): +def get_parser( + parser: Optional[argparse.ArgumentParser] = None, +) -> argparse.ArgumentParser: """Build or extend and arg parser for collect-logs utility. @param parser: Optional existing ArgumentParser instance representing the @@ -141,7 +147,7 @@ def get_parser(parser=None): return parser -def _get_copytree_ignore_files(paths: LogPaths): +def _get_copytree_ignore_files(paths: LogPaths) -> List[str]: """Return a list of files to ignore for /run/cloud-init directory""" ignored_files = [ "hook-hotplug-cmd", # named pipe for hotplug @@ -152,111 +158,109 @@ def _get_copytree_ignore_files(paths: LogPaths): return ignored_files -def _write_command_output_to_file(cmd, filename, msg, verbosity): +def _write_command_output_to_file( + cmd: List[str], + file_path: pathlib.Path, + msg: str, +) -> Optional[str]: """Helper which runs a command and writes output or error to filename.""" - ensure_dir(os.path.dirname(filename)) + file_path.parent.mkdir(parents=True, exist_ok=True) try: output = subp(cmd).stdout except ProcessExecutionError as e: - write_file(filename, str(e)) - _debug("collecting %s failed.\n" % msg, 1, verbosity) + write_file(file_path, str(e)) + LOG.debug("collecting %s failed.", msg) + output = None else: - write_file(filename, output) - _debug("collected %s\n" % msg, 1, verbosity) - return output + write_file(file_path, output) + LOG.debug("collected %s to file '%s'", msg, file_path.stem) + return output -def _stream_command_output_to_file(cmd, filename, msg, verbosity): - """Helper which runs a command and writes output or error to filename.""" - ensure_dir(os.path.dirname(filename)) +def _stream_command_output_to_file( + cmd: List[str], file_path: pathlib.Path, msg: str +) -> None: + """Helper which runs a command and writes output or error to filename. + + `subprocess.call` is invoked directly here to stream output to the file. + Otherwise memory usage can be high for large outputs. + """ + file_path.parent.mkdir(parents=True, exist_ok=True) try: - with open(filename, "w") as f: + with file_path.open("w") as f: subprocess.call(cmd, stdout=f, stderr=f) # nosec B603 except OSError as e: - write_file(filename, str(e)) - _debug("collecting %s failed.\n" % msg, 1, verbosity) + write_file(file_path, str(e)) + LOG.debug("collecting %s failed.", msg) else: - _debug("collected %s\n" % msg, 1, verbosity) + LOG.debug("collected %s to file '%s'", msg, file_path.stem) -def _debug(msg, level, verbosity): - if level <= verbosity: - sys.stderr.write(msg) - - -def _collect_file(path, out_dir, verbosity): +def _collect_file(path: str, out_dir: str) -> None: if os.path.isfile(path): copy(path, out_dir) - _debug("collected file: %s\n" % path, 1, verbosity) + LOG.debug("collected file: %s", path) else: - _debug("file %s did not exist\n" % path, 2, verbosity) + LOG.trace("file %s did not exist", path) -def _collect_installer_logs( - log_dir: str, include_userdata: bool, verbosity: int -): +def _collect_installer_logs(log_dir: str, include_userdata: bool) -> None: """Obtain subiquity logs and config files.""" for src_file in INSTALLER_APPORT_FILES: - destination_dir = Path(log_dir + src_file.path).parent + destination_dir = pathlib.Path(log_dir + src_file.path).parent if not destination_dir.exists(): ensure_dir(str(destination_dir)) - _collect_file(src_file.path, str(destination_dir), verbosity) + _collect_file(src_file.path, str(destination_dir)) if include_userdata: for src_file in INSTALLER_APPORT_SENSITIVE_FILES: - destination_dir = Path(log_dir + src_file.path).parent + destination_dir = pathlib.Path(log_dir + src_file.path).parent if not destination_dir.exists(): ensure_dir(str(destination_dir)) - _collect_file(src_file.path, str(destination_dir), verbosity) + _collect_file(src_file.path, str(destination_dir)) -def _collect_version_info(log_dir: str, verbosity: int): +def _collect_version_info(log_dir: str) -> None: version = _write_command_output_to_file( cmd=["cloud-init", "--version"], - filename=os.path.join(log_dir, "version"), + file_path=pathlib.Path(log_dir, "version"), msg="cloud-init --version", - verbosity=verbosity, ) dpkg_ver = _write_command_output_to_file( cmd=["dpkg-query", "--show", "-f=${Version}\n", "cloud-init"], - filename=os.path.join(log_dir, "dpkg-version"), + file_path=pathlib.Path(log_dir, "dpkg-version"), msg="dpkg version", - verbosity=verbosity, ) if not version: - version = dpkg_ver if dpkg_ver else "not-available" - _debug("collected cloud-init version: %s\n" % version, 1, verbosity) + version = dpkg_ver or "not-available" -def _collect_system_logs(log_dir: str, verbosity: int): +def _collect_system_logs(log_dir: str) -> None: _stream_command_output_to_file( cmd=["dmesg"], - filename=os.path.join(log_dir, "dmesg.txt"), + file_path=pathlib.Path(log_dir, "dmesg.txt"), msg="dmesg output", - verbosity=verbosity, ) _stream_command_output_to_file( cmd=["journalctl", "--boot=0", "-o", "short-precise"], - filename=os.path.join(log_dir, "journal.txt"), + file_path=pathlib.Path(log_dir, "journal.txt"), msg="systemd journal of current boot", - verbosity=verbosity, ) def _collect_cloudinit_logs( log_dir: str, - verbosity: int, init: Init, paths: LogPaths, include_userdata: bool, -): - for log in get_config_logfiles(init.cfg): - _collect_file(log, log_dir, verbosity) +) -> None: + for logfile in get_config_logfiles(init.cfg): + _collect_file(logfile, log_dir) if include_userdata: user_data_file = paths.userdata_raw - _collect_file(user_data_file, log_dir, verbosity) + _collect_file(user_data_file, log_dir) -def _collect_run_dir(log_dir: str, verbosity: int, paths: LogPaths): +def _collect_run_dir(log_dir: str, paths: LogPaths) -> None: run_dir = os.path.join(log_dir, "run") ensure_dir(run_dir) if os.path.exists(paths.run_dir): @@ -267,28 +271,21 @@ def _collect_run_dir(log_dir: str, verbosity: int, paths: LogPaths): ignore=lambda _, __: _get_copytree_ignore_files(paths), ) except shutil.Error as e: - sys.stderr.write("Failed collecting file(s) due to error:\n") - sys.stderr.write(str(e) + "\n") - _debug("collected dir %s\n" % paths.run_dir, 1, verbosity) + LOG.warning("Failed collecting file(s) due to error: %s", e) + LOG.debug("collected directory: %s", paths.run_dir) else: - _debug( - "directory '%s' did not exist\n" % paths.run_dir, - 1, - verbosity, - ) + LOG.debug("directory '%s' did not exist", paths.run_dir) if os.path.exists(os.path.join(paths.run_dir, "disabled")): # Fallback to grab previous cloud/data - cloud_data_dir = Path(paths.cloud_data) + cloud_data_dir = pathlib.Path(paths.cloud_data) if cloud_data_dir.exists(): shutil.copytree( str(cloud_data_dir), - Path(log_dir + str(cloud_data_dir)), + pathlib.Path(log_dir + str(cloud_data_dir)), ) -def collect_logs( - tarfile: str, include_userdata: bool, verbosity: int = 0 -) -> int: +def collect_logs(tarfile: str, include_userdata: bool) -> int: """Collect all cloud-init logs and tar them up into the provided tarfile. @param tarfile: The path of the tar-gzipped file to create. @@ -296,9 +293,9 @@ def collect_logs( @return: 0 on success, 1 on failure. """ if include_userdata and os.getuid() != 0: - sys.stderr.write( - "To include userdata, root user is required." - " Try sudo cloud-init collect-logs\n" + LOG.error( + "To include userdata, root user is required. " + "Try sudo cloud-init collect-logs" ) return 1 @@ -312,33 +309,39 @@ def collect_logs( init.read_cfg() paths = get_log_paths(init) - _collect_version_info(log_dir, verbosity) - _collect_system_logs(log_dir, verbosity) - _collect_cloudinit_logs( - log_dir, verbosity, init, paths, include_userdata - ) - _collect_installer_logs(log_dir, include_userdata, verbosity) - _collect_run_dir(log_dir, verbosity, paths) + _collect_version_info(log_dir) + _collect_system_logs(log_dir) + _collect_cloudinit_logs(log_dir, init, paths, include_userdata) + _collect_installer_logs(log_dir, include_userdata) + _collect_run_dir(log_dir, paths) with chdir(tmp_dir): subp(["tar", "czvf", tarfile, log_dir.replace(f"{tmp_dir}/", "")]) - sys.stderr.write("Wrote %s\n" % tarfile) + LOG.info("Wrote %s", tarfile) return 0 -def handle_collect_logs_args(name, args): +def _setup_logger(verbosity: int) -> None: + log.reset_logging() + if verbosity == 0: + level = logging.INFO + elif verbosity == 1: + level = logging.DEBUG + else: + level = log.TRACE + LOG.setLevel(level) + handler = logging.StreamHandler() + handler.setFormatter(logging.Formatter("%(message)s")) + LOG.addHandler(handler) + + +def handle_collect_logs_args(_name: str, args: argparse.Namespace) -> int: """Handle calls to 'cloud-init collect-logs' as a subcommand.""" + _setup_logger(args.verbosity) return collect_logs( tarfile=args.tarfile, include_userdata=args.userdata, - verbosity=args.verbosity, ) -def main(): - """Tool to collect and tar all cloud-init related logs.""" - parser = get_parser() - return handle_collect_logs_args("collect-logs", parser.parse_args()) - - if __name__ == "__main__": - sys.exit(main()) + sys.exit(handle_collect_logs_args("", get_parser().parse_args())) diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index 4a1c8b2e28c..2de9826bb83 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -24,6 +24,7 @@ from cloudinit import netinfo from cloudinit import signal_handler from cloudinit import sources +from cloudinit import socket from cloudinit import stages from cloudinit import url_helper from cloudinit import util @@ -31,13 +32,19 @@ from cloudinit import warnings from cloudinit import reporting from cloudinit import atomic_helper +from cloudinit import lifecycle from cloudinit.cmd.devel import read_cfg_paths from cloudinit.config import cc_set_hostname from cloudinit.config.modules import Modules from cloudinit.config.schema import validate_cloudconfig_schema from cloudinit import log from cloudinit.reporting import events -from cloudinit.settings import PER_INSTANCE, PER_ALWAYS, PER_ONCE, CLOUD_CONFIG +from cloudinit.settings import ( + PER_INSTANCE, + PER_ALWAYS, + PER_ONCE, + CLOUD_CONFIG, +) # Welcome message template WELCOME_MSG_TPL = ( @@ -79,17 +86,36 @@ def print_exc(msg=""): sys.stderr.write("\n") -def log_ppid(): - if util.is_Linux(): +DEPRECATE_BOOT_STAGE_MESSAGE = ( + "Triggering cloud-init boot stages outside of intial system boot is not a" + " fully supported operation which can lead to incomplete or incorrect" + " configuration. As such, cloud-init is deprecating this feature in the" + " future. If you currently use cloud-init in this way," + " please file an issue describing in detail your use case so that" + " cloud-init can better support your needs:" + " https://github.com/canonical/cloud-init/issues/new" +) + + +def log_ppid(distro, bootstage_name): + if distro.is_linux: ppid = os.getppid() - LOG.info("PID [%s] started cloud-init.", ppid) + if 1 != ppid and distro.uses_systemd(): + lifecycle.deprecate( + deprecated=( + "Unsupported configuration: boot stage called " + f"by PID [{ppid}] outside of systemd" + ), + deprecated_version="24.3", + extra_message=DEPRECATE_BOOT_STAGE_MESSAGE, + ) + LOG.info("PID [%s] started cloud-init '%s'.", ppid, bootstage_name) def welcome(action, msg=None): if not msg: msg = welcome_format(action) util.multi_log("%s\n" % (msg), console=False, stderr=True, log=LOG) - log_ppid() return msg @@ -236,7 +262,7 @@ def attempt_cmdline_url(path, network=True, cmdline=None) -> Tuple[int, str]: is_cloud_cfg = False if is_cloud_cfg: if cmdline_name == "url": - return util.deprecate( + return lifecycle.deprecate( deprecated="The kernel command line key `url`", deprecated_version="22.3", extra_message=" Please use `cloud-config-url` " @@ -333,10 +359,8 @@ def main_init(name, args): # objects config as it may be different from init object # 10. Run the modules for the 'init' stage # 11. Done! - if not args.local: - w_msg = welcome_format(name) - else: - w_msg = welcome_format("%s-local" % (name)) + bootstage_name = "init-local" if args.local else "init" + w_msg = welcome_format(bootstage_name) init = stages.Init(ds_deps=deps, reporter=args.reporter) # Stage 1 init.read_cfg(extract_fns(args)) @@ -344,8 +368,11 @@ def main_init(name, args): outfmt = None errfmt = None try: - close_stdin(lambda msg: early_logs.append((logging.DEBUG, msg))) - outfmt, errfmt = util.fixup_output(init.cfg, name) + if not args.skip_log_setup: + close_stdin(lambda msg: early_logs.append((logging.DEBUG, msg))) + outfmt, errfmt = util.fixup_output(init.cfg, name) + else: + outfmt, errfmt = util.get_output_cfg(init.cfg, name) except Exception: msg = "Failed to setup output redirection!" util.logexc(LOG, msg) @@ -357,13 +384,15 @@ def main_init(name, args): "Logging being reset, this logger may no longer be active shortly" ) log.reset_logging() - log.setup_logging(init.cfg) - apply_reporting_cfg(init.cfg) + if not args.skip_log_setup: + log.setup_logging(init.cfg) + apply_reporting_cfg(init.cfg) # Any log usage prior to setup_logging above did not have local user log # config applied. We send the welcome message now, as stderr/out have # been redirected and log now configured. welcome(name, msg=w_msg) + log_ppid(init.distro, bootstage_name) # re-play early log messages before logging was setup for lvl, msg in early_logs: @@ -591,7 +620,8 @@ def main_modules(action_name, args): # the modules objects configuration # 5. Run the modules for the given stage name # 6. Done! - w_msg = welcome_format("%s:%s" % (action_name, name)) + bootstage_name = "%s:%s" % (action_name, name) + w_msg = welcome_format(bootstage_name) init = stages.Init(ds_deps=[], reporter=args.reporter) # Stage 1 init.read_cfg(extract_fns(args)) @@ -613,8 +643,9 @@ def main_modules(action_name, args): mods = Modules(init, extract_fns(args), reporter=args.reporter) # Stage 4 try: - close_stdin() - util.fixup_output(mods.cfg, name) + if not args.skip_log_setup: + close_stdin() + util.fixup_output(mods.cfg, name) except Exception: util.logexc(LOG, "Failed to setup output redirection!") if args.debug: @@ -623,14 +654,16 @@ def main_modules(action_name, args): "Logging being reset, this logger may no longer be active shortly" ) log.reset_logging() - log.setup_logging(mods.cfg) - apply_reporting_cfg(init.cfg) + if not args.skip_log_setup: + log.setup_logging(mods.cfg) + apply_reporting_cfg(init.cfg) # now that logging is setup and stdout redirected, send welcome welcome(name, msg=w_msg) + log_ppid(init.distro, bootstage_name) if name == "init": - util.deprecate( + lifecycle.deprecate( deprecated="`--mode init`", deprecated_version="24.1", extra_message="Use `cloud-init init` instead.", @@ -783,9 +816,10 @@ def status_wrapper(name, args): ) v1[mode]["start"] = float(util.uptime()) - preexisting_recoverable_errors = next( + handler = next( filter(lambda h: isinstance(h, log.LogExporter), root_logger.handlers) - ).export_logs() + ) + preexisting_recoverable_errors = handler.export_logs() # Write status.json prior to running init / module code atomic_helper.write_json(status_path, status) @@ -826,11 +860,8 @@ def status_wrapper(name, args): v1["stage"] = None # merge new recoverable errors into existing recoverable error list - new_recoverable_errors = next( - filter( - lambda h: isinstance(h, log.LogExporter), root_logger.handlers - ) - ).export_logs() + new_recoverable_errors = handler.export_logs() + handler.clean_logs() for key in new_recoverable_errors.keys(): if key in preexisting_recoverable_errors: v1[mode]["recoverable_errors"][key] = list( @@ -932,9 +963,19 @@ def main(sysv_args=None): default=False, ) + parser.add_argument( + "--all-stages", + dest="all_stages", + action="store_true", + help=( + "Run cloud-init's stages under a single process using a " + "syncronization protocol. This is not intended for CLI usage." + ), + default=False, + ) + parser.set_defaults(reporter=None) subparsers = parser.add_subparsers(title="Subcommands", dest="subcommand") - subparsers.required = True # Each action and its sub-options (if any) parser_init = subparsers.add_parser( @@ -963,7 +1004,7 @@ def main(sysv_args=None): parser_mod = subparsers.add_parser( "modules", help="Activate modules using a given configuration key." ) - extra_help = util.deprecate( + extra_help = lifecycle.deprecate( deprecated="`init`", deprecated_version="24.1", extra_message="Use `cloud-init init` instead.", @@ -1122,8 +1163,76 @@ def main(sysv_args=None): status_parser(parser_status) parser_status.set_defaults(action=("status", handle_status_args)) + else: + parser.error("a subcommand is required") args = parser.parse_args(args=sysv_args) + setattr(args, "skip_log_setup", False) + if not args.all_stages: + return sub_main(args) + return all_stages(parser) + + +def all_stages(parser): + """Run all stages in a single process using an ordering protocol.""" + LOG.info("Running cloud-init in single process mode.") + + # this _must_ be called before sd_notify is called otherwise netcat may + # attempt to send "start" before a socket exists + sync = socket.SocketSync("local", "network", "config", "final") + + # notify systemd that this stage has completed + socket.sd_notify("READY=1") + # wait for cloud-init-local.service to start + with sync("local"): + # set up logger + args = parser.parse_args(args=["init", "--local"]) + args.skip_log_setup = False + # run local stage + sync.systemd_exit_code = sub_main(args) + + # wait for cloud-init-network.service to start + with sync("network"): + # skip re-setting up logger + args = parser.parse_args(args=["init"]) + args.skip_log_setup = True + # run init stage + sync.systemd_exit_code = sub_main(args) + + # wait for cloud-config.service to start + with sync("config"): + # skip re-setting up logger + args = parser.parse_args(args=["modules", "--mode=config"]) + args.skip_log_setup = True + # run config stage + sync.systemd_exit_code = sub_main(args) + + # wait for cloud-final.service to start + with sync("final"): + # skip re-setting up logger + args = parser.parse_args(args=["modules", "--mode=final"]) + args.skip_log_setup = True + # run final stage + sync.systemd_exit_code = sub_main(args) + + # signal completion to cloud-init-main.service + if sync.experienced_any_error: + message = "a stage of cloud-init exited non-zero" + if sync.first_exception: + message = f"first exception received: {sync.first_exception}" + socket.sd_notify( + f"STATUS=Completed with failure, {message}. Run 'cloud-init status" + " --long' for more details." + ) + socket.sd_notify("STOPPING=1") + # exit 1 for a fatal failure in any stage + return 1 + else: + socket.sd_notify("STATUS=Completed") + socket.sd_notify("STOPPING=1") + + +def sub_main(args): # Subparsers.required = True and each subparser sets action=(name, functor) (name, functor) = args.action diff --git a/cloudinit/cmd/status.py b/cloudinit/cmd/status.py index 39089802984..f027321ce22 100644 --- a/cloudinit/cmd/status.py +++ b/cloudinit/cmd/status.py @@ -318,8 +318,9 @@ def systemd_failed(wait: bool) -> bool: for service in [ "cloud-final.service", "cloud-config.service", - "cloud-init.service", + "cloud-init-network.service", "cloud-init-local.service", + "cloud-init-main.service", ]: try: stdout = query_systemctl( diff --git a/cloudinit/config/cc_ansible.py b/cloudinit/config/cc_ansible.py index fce8ae3b4c4..3b9e931a58d 100644 --- a/cloudinit/config/cc_ansible.py +++ b/cloudinit/config/cc_ansible.py @@ -8,13 +8,13 @@ from copy import deepcopy from typing import Optional -from cloudinit import subp +from cloudinit import lifecycle, subp from cloudinit.cloud import Cloud from cloudinit.config import Config from cloudinit.config.schema import MetaSchema from cloudinit.distros import ALL_DISTROS, Distro from cloudinit.settings import PER_INSTANCE -from cloudinit.util import Version, get_cfg_by_path +from cloudinit.util import get_cfg_by_path meta: MetaSchema = { "id": "cc_ansible", @@ -39,13 +39,13 @@ def __init__(self, distro: Distro): # and cloud-init might not have that set, default: /root self.env["HOME"] = os.environ.get("HOME", "/root") - def get_version(self) -> Optional[Version]: + def get_version(self) -> Optional[lifecycle.Version]: stdout, _ = self.do_as(self.cmd_version) first_line = stdout.splitlines().pop(0) matches = re.search(r"([\d\.]+)", first_line) if matches: version = matches.group(0) - return Version.from_str(version) + return lifecycle.Version.from_str(version) return None def pull(self, *args) -> str: @@ -210,7 +210,7 @@ def run_ansible_pull(pull: AnsiblePull, cfg: dict): v = pull.get_version() if not v: LOG.warning("Cannot parse ansible version") - elif v < Version(2, 7, 0): + elif v < lifecycle.Version(2, 7, 0): # diff was added in commit edaa0b52450ade9b86b5f63097ce18ebb147f46f if cfg.get("diff"): raise ValueError( diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index b79b6483b9e..787270e665d 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -17,7 +17,7 @@ from textwrap import indent from typing import Dict, Iterable, List, Mapping -from cloudinit import features, subp, templater, util +from cloudinit import features, lifecycle, subp, templater, util from cloudinit.cloud import Cloud from cloudinit.config import Config from cloudinit.config.schema import MetaSchema @@ -745,7 +745,7 @@ def add_apt_sources( def convert_v1_to_v2_apt_format(srclist): """convert v1 apt format to v2 (dict in apt_sources)""" srcdict = {} - util.deprecate( + lifecycle.deprecate( deprecated="Config key 'apt_sources'", deprecated_version="22.1", extra_message="Use 'apt' instead", @@ -824,7 +824,7 @@ def convert_v2_to_v3_apt_format(oldcfg): # no old config, so no new one to be created if not needtoconvert: return oldcfg - util.deprecate( + lifecycle.deprecate( deprecated=f"The following config key(s): {needtoconvert}", deprecated_version="22.1", ) @@ -832,7 +832,7 @@ def convert_v2_to_v3_apt_format(oldcfg): # if old AND new config are provided, prefer the new one (LP #1616831) newaptcfg = oldcfg.get("apt", None) if newaptcfg is not None: - util.deprecate( + lifecycle.deprecate( deprecated="Support for combined old and new apt module keys", deprecated_version="22.1", ) diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py index 61345fcb58d..d6dbc977f88 100644 --- a/cloudinit/config/cc_ca_certs.py +++ b/cloudinit/config/cc_ca_certs.py @@ -7,7 +7,7 @@ import logging import os -from cloudinit import subp, util +from cloudinit import lifecycle, subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config from cloudinit.config.schema import MetaSchema @@ -23,6 +23,13 @@ "ca_cert_update_cmd": ["update-ca-certificates"], } DISTRO_OVERRIDES = { + "aosc": { + "ca_cert_path": "/etc/ssl/certs/", + "ca_cert_local_path": "/etc/ssl/certs/", + "ca_cert_filename": "cloud-init-ca-cert-{cert_index}.pem", + "ca_cert_config": "/etc/ca-certificates/conf.d/cloud-init.conf", + "ca_cert_update_cmd": ["update-ca-bundle"], + }, "fedora": { "ca_cert_path": "/etc/pki/ca-trust/", "ca_cert_local_path": "/usr/share/pki/ca-trust-source/", @@ -71,6 +78,7 @@ distros = [ "almalinux", + "aosc", "cloudlinux", "alpine", "debian", @@ -149,7 +157,7 @@ def disable_default_ca_certs(distro_name, distro_cfg): """ if distro_name in ["rhel", "photon"]: remove_default_ca_certs(distro_cfg) - elif distro_name in ["alpine", "debian", "ubuntu"]: + elif distro_name in ["alpine", "aosc", "debian", "ubuntu"]: disable_system_ca_certs(distro_cfg) if distro_name in ["debian", "ubuntu"]: @@ -223,7 +231,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: @param args: Any module arguments from cloud.cfg """ if "ca-certs" in cfg: - util.deprecate( + lifecycle.deprecate( deprecated="Key 'ca-certs'", deprecated_version="22.1", extra_message="Use 'ca_certs' instead.", @@ -246,7 +254,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: # If there is a remove_defaults option set to true, disable the system # default trusted CA certs first. if "remove-defaults" in ca_cert_cfg: - util.deprecate( + lifecycle.deprecate( deprecated="Key 'remove-defaults'", deprecated_version="22.1", extra_message="Use 'remove_defaults' instead.", diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index e1a56f91f09..459f0a3cded 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -20,7 +20,7 @@ from pathlib import Path from typing import Optional, Tuple -from cloudinit import subp, temp_utils, util +from cloudinit import lifecycle, subp, temp_utils, util from cloudinit.cloud import Cloud from cloudinit.config import Config from cloudinit.config.schema import MetaSchema @@ -542,7 +542,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: mode = mycfg.get("mode", "auto") if util.is_false(mode): if mode != "off": - util.deprecate( + lifecycle.deprecate( deprecated=f"Growpart's 'mode' key with value '{mode}'", deprecated_version="22.2", extra_message="Use 'off' instead.", diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index 0fdcf3c19e1..1d9f821bbd0 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -8,13 +8,15 @@ """Mounts: Configure mount points and swap files""" + +import copy import logging import math import os import re -from string import whitespace +from typing import Dict, List, Optional, Tuple, cast -from cloudinit import subp, type_utils, util +from cloudinit import subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config from cloudinit.config.schema import MetaSchema @@ -33,7 +35,6 @@ # Name matches 'server:/path' NETWORK_NAME_FILTER = r"^.+:.*" NETWORK_NAME_RE = re.compile(NETWORK_NAME_FILTER) -WS = re.compile("[%s]+" % (whitespace)) FSTAB_PATH = "/etc/fstab" MNT_COMMENT = "comment=cloudconfig" MB = 2**20 @@ -133,6 +134,25 @@ def sanitize_devname(startname, transformer, aliases=None): return None +def sanitized_devname_is_valid( + original: str, sanitized: Optional[str], fstab_devs: Dict[str, str] +) -> bool: + """Get if the sanitized device name is valid.""" + if sanitized != original: + LOG.debug("changed %s => %s", original, sanitized) + if sanitized is None: + LOG.debug("Ignoring nonexistent default named mount %s", original) + return False + elif sanitized in fstab_devs: + LOG.debug( + "Device %s already defined in fstab: %s", + sanitized, + fstab_devs[sanitized], + ) + return False + return True + + def suggested_swapsize(memsize=None, maxsize=None, fsys=None): # make a suggestion on the size of swap for this system. if memsize is None: @@ -334,30 +354,16 @@ def handle_swapcfg(swapcfg): return None -def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: - # fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno - def_mnt_opts = "defaults,nobootwait" - uses_systemd = cloud.distro.uses_systemd() - if uses_systemd: - def_mnt_opts = ( - "defaults,nofail,x-systemd.after=cloud-init.service,_netdev" - ) - - defvals = [None, None, "auto", def_mnt_opts, "0", "2"] - defvals = cfg.get("mount_default_fields", defvals) - - # these are our default set of mounts - defmnts: list = [ - ["ephemeral0", "/mnt", "auto", defvals[3], "0", "2"], - ["swap", "none", "swap", "sw", "0", "0"], - ] - - cfgmnt = [] - if "mounts" in cfg: - cfgmnt = cfg["mounts"] - - LOG.debug("mounts configuration is %s", cfgmnt) +def parse_fstab() -> Tuple[List[str], Dict[str, str], List[str]]: + """Parse /etc/fstab. + Parse fstab, ignoring any lines containing "comment=cloudconfig". + :return: A 3-tuple containing: + - A list of lines exactly as they appear in fstab + - A dictionary with key being the first token in the line + and value being the entire line + - A list of any lines that were ignored due to "comment=cloudconfig" + """ fstab_lines = [] fstab_devs = {} fstab_removed = [] @@ -367,180 +373,219 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: if MNT_COMMENT in line: fstab_removed.append(line) continue - - try: - toks = WS.split(line) - except Exception: - pass + toks = line.split() fstab_devs[toks[0]] = line fstab_lines.append(line) - - device_aliases = cfg.get("device_aliases", {}) - - for i in range(len(cfgmnt)): + return fstab_lines, fstab_devs, fstab_removed + + +def sanitize_mounts_configuration( + mounts: List[Optional[List[Optional[str]]]], + fstab_devs: Dict[str, str], + device_aliases: Dict[str, str], + default_fields: List[Optional[str]], + cloud: Cloud, +) -> List[List[str]]: + """Sanitize mounts to ensure we can work with devices in config. + + Specifically: + - Ensure the mounts configuration is a list of lists + - Transform and sanitize device names + - Ensure all tokens are strings + - Add default options to any lines without options + """ + updated_lines = [] + for line in mounts: # skip something that wasn't a list - if not isinstance(cfgmnt[i], list): - LOG.warning( - "Mount option %s not a list, got a %s instead", - (i + 1), - type_utils.obj_name(cfgmnt[i]), - ) + if not isinstance(line, list): + LOG.warning("Mount option not a list, ignoring: %s", line) continue - start = str(cfgmnt[i][0]) - sanitized = sanitize_devname( + start = str(line[0]) + sanitized_devname = sanitize_devname( start, cloud.device_name_to_device, aliases=device_aliases ) - if sanitized != start: - LOG.debug("changed %s => %s", start, sanitized) + if sanitized_devname_is_valid(start, sanitized_devname, fstab_devs): + updated_line = [sanitized_devname] + line[1:] + else: + updated_line = line - if sanitized is None: - LOG.debug("Ignoring nonexistent named mount %s", start) - continue - elif sanitized in fstab_devs: - LOG.info( - "Device %s already defined in fstab: %s", - sanitized, - fstab_devs[sanitized], - ) - continue + # Ensure all tokens are strings as users may not have quoted them + # If token is None, replace it with the default value + for index, token in enumerate(updated_line): + if token is None: + updated_line[index] = default_fields[index] + else: + updated_line[index] = str(updated_line[index]) - cfgmnt[i][0] = sanitized + # fill remaining values with defaults from defvals above + updated_line += default_fields[len(updated_line) :] - # in case the user did not quote a field (likely fs-freq, fs_passno) - # but do not convert None to 'None' (LP: #898365) - for j in range(len(cfgmnt[i])): - if cfgmnt[i][j] is None: - continue - else: - cfgmnt[i][j] = str(cfgmnt[i][j]) - - for i in range(len(cfgmnt)): - # fill in values with defaults from defvals above - for j in range(len(defvals)): - if len(cfgmnt[i]) <= j: - cfgmnt[i].append(defvals[j]) - elif cfgmnt[i][j] is None: - cfgmnt[i][j] = defvals[j] - - # if the second entry in the list is 'None' this - # clears all previous entries of that same 'fs_spec' - # (fs_spec is the first field in /etc/fstab, ie, that device) - if cfgmnt[i][1] is None: - for j in range(i): - if cfgmnt[j][0] == cfgmnt[i][0]: - cfgmnt[j][1] = None - - # for each of the "default" mounts, add them only if no other - # entry has the same device name - for defmnt in defmnts: - start = defmnt[0] + updated_lines.append(updated_line) + return updated_lines + + +def remove_nonexistent_devices(mounts: List[List[str]]) -> List[List[str]]: + """Remove any entries that have a device name that doesn't exist. + + If the second field of a mount line is None (not the string, the value), + we skip it along with any other entries that came before it that share + the same device name. + """ + actlist = [] + dev_denylist = [] + for line in mounts[::-1]: + if line[1] is None or line[0] in dev_denylist: + LOG.debug("Skipping nonexistent device named %s", line[0]) + dev_denylist.append(line[0]) + else: + actlist.append(line) + # Reverse the list to maintain the original order + return actlist[::-1] + + +def add_default_mounts_to_cfg( + mounts: List[List[str]], + default_mount_options: str, + fstab_devs: Dict[str, str], + device_aliases: Dict[str, str], + cloud: Cloud, +) -> List[List[str]]: + """Add default mounts to the user provided mount configuration. + + Add them only if no other entry has the same device name + """ + new_mounts = copy.deepcopy(mounts) + for default_mount in [ + ["ephemeral0", "/mnt", "auto", default_mount_options, "0", "2"], + ["swap", "none", "swap", "sw", "0", "0"], # Is this used anywhere? + ]: + start = default_mount[0] sanitized = sanitize_devname( start, cloud.device_name_to_device, aliases=device_aliases ) - if sanitized != start: - LOG.debug("changed default device %s => %s", start, sanitized) - - if sanitized is None: - LOG.debug("Ignoring nonexistent default named mount %s", start) - continue - elif sanitized in fstab_devs: - LOG.debug( - "Device %s already defined in fstab: %s", - sanitized, - fstab_devs[sanitized], - ) + if not sanitized_devname_is_valid(start, sanitized, fstab_devs): continue - defmnt[0] = sanitized + # Cast here because the previous call checked for None + default_mount[0] = cast(str, sanitized) - cfgmnt_has = False - for cfgm in cfgmnt: - if cfgm[0] == defmnt[0]: - cfgmnt_has = True - break - - if cfgmnt_has: + default_already_exists = any( + cfgm[0] == default_mount[0] for cfgm in mounts + ) + if default_already_exists: LOG.debug("Not including %s, already previously included", start) continue - cfgmnt.append(defmnt) + new_mounts.append(default_mount) + return new_mounts - # now, each entry in the cfgmnt list has all fstab values - # if the second field is None (not the string, the value) we skip it - actlist = [] - for x in cfgmnt: - if x[1] is None: - LOG.debug("Skipping nonexistent device named %s", x[0]) - else: - actlist.append(x) - swapret = handle_swapcfg(cfg.get("swap", {})) - if swapret: - actlist.append([swapret, "none", "swap", "sw", "0", "0"]) +def add_comment(actlist: List[List[str]]) -> List[List[str]]: + """Add "comment=cloudconfig" to the mount options of each entry.""" + return [ + entry[:3] + [f"{entry[3]},{MNT_COMMENT}"] + entry[4:] + for entry in actlist + ] + + +def activate_swap_if_needed(actlist: List[List[str]]) -> None: + """Call 'swapon -a' if any entry has a swap fs type.""" + if any(entry[2] == "swap" for entry in actlist): + subp.subp(["swapon", "-a"]) + - if len(actlist) == 0: +def mount_if_needed( + uses_systemd: bool, changes_made: bool, dirs: List[str] +) -> None: + """Call 'mount -a' if needed. + + If changes were made, always call 'mount -a'. Otherwise, call 'mount -a' + if any of the directories in the mount list are not already mounted. + """ + do_mount = False + if changes_made: + do_mount = True + else: + mount_points = { + val["mountpoint"] + for val in util.mounts().values() + if "mountpoint" in val + } + do_mount = bool(set(dirs).difference(mount_points)) + + if do_mount: + subp.subp(["mount", "-a"]) + if uses_systemd: + subp.subp(["systemctl", "daemon-reload"]) + + +def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: + """Handle the mounts configuration.""" + # fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno + uses_systemd = cloud.distro.uses_systemd() + default_mount_options = ( + "defaults,nofail,x-systemd.after=cloud-init-network.service,_netdev" + if uses_systemd + else "defaults,nobootwait" + ) + + hardcoded_defaults = [None, None, "auto", default_mount_options, "0", "2"] + default_fields: List[Optional[str]] = cfg.get( + "mount_default_fields", hardcoded_defaults + ) + mounts: List[Optional[List[Optional[str]]]] = cfg.get("mounts", []) + + LOG.debug("mounts configuration is %s", mounts) + + fstab_lines, fstab_devs, fstab_removed = parse_fstab() + device_aliases = cfg.get("device_aliases", {}) + + updated_cfg = sanitize_mounts_configuration( + mounts, fstab_devs, device_aliases, default_fields, cloud + ) + updated_cfg = add_default_mounts_to_cfg( + updated_cfg, default_mount_options, fstab_devs, device_aliases, cloud + ) + updated_cfg = remove_nonexistent_devices(updated_cfg) + updated_cfg = add_comment(updated_cfg) + + swapfile = handle_swapcfg(cfg.get("swap", {})) + if swapfile: + updated_cfg.append([swapfile, "none", "swap", "sw", "0", "0"]) + + if len(updated_cfg) == 0: + # This will only be true if there is no mount configuration at all + # Even if fstab has no functional changes, we'll get past this point + # as we remove any 'comment=cloudconfig' lines and then add them back + # in. LOG.debug("No modifications to fstab needed") return - cc_lines = [] - needswap = False - need_mount_all = False - dirs = [] - for entry in actlist: - # write 'comment' in the fs_mntops, entry, claiming this - entry[3] = "%s,%s" % (entry[3], MNT_COMMENT) - if entry[2] == "swap": - needswap = True - if entry[1].startswith("/"): - dirs.append(entry[1]) - cc_lines.append("\t".join(entry)) - - mount_points = [ - v["mountpoint"] for k, v in util.mounts().items() if "mountpoint" in v - ] + cfg_lines = ["\t".join(entry) for entry in updated_cfg] + + dirs = [d[1] for d in updated_cfg if d[1].startswith("/")] + for d in dirs: try: util.ensure_dir(d) except Exception: util.logexc(LOG, "Failed to make '%s' config-mount", d) - # dirs is list of directories on which a volume should be mounted. - # If any of them does not already show up in the list of current - # mount points, we will definitely need to do mount -a. - if not need_mount_all and d not in mount_points: - need_mount_all = True - sadds = [WS.sub(" ", n) for n in cc_lines] - sdrops = [WS.sub(" ", n) for n in fstab_removed] + sadds = [n.replace("\t", " ") for n in cfg_lines] + sdrops = [n.replace("\t", " ") for n in fstab_removed] - sops = ["- " + drop for drop in sdrops if drop not in sadds] + [ - "+ " + add for add in sadds if add not in sdrops + sops = [f"- {drop}" for drop in sdrops if drop not in sadds] + [ + f"+ {add}" for add in sadds if add not in sdrops ] - fstab_lines.extend(cc_lines) + fstab_lines.extend(cfg_lines) contents = "%s\n" % "\n".join(fstab_lines) util.write_file(FSTAB_PATH, contents) - activate_cmds = [] - if needswap: - activate_cmds.append(["swapon", "-a"]) - - if len(sops) == 0: - LOG.debug("No changes to /etc/fstab made.") - else: + if sops: LOG.debug("Changes to fstab: %s", sops) - need_mount_all = True - - if need_mount_all: - activate_cmds.append(["mount", "-a"]) - if uses_systemd: - activate_cmds.append(["systemctl", "daemon-reload"]) + else: + LOG.debug("No changes to /etc/fstab made.") - fmt = "Activating swap and mounts with: %s" - for cmd in activate_cmds: - fmt = "Activate mounts: %s:" + " ".join(cmd) - try: - subp.subp(cmd) - LOG.debug(fmt, "PASS") - except subp.ProcessExecutionError: - LOG.warning(fmt, "FAIL") - util.logexc(LOG, fmt, "FAIL") + activate_swap_if_needed(updated_cfg) + mount_if_needed(uses_systemd, bool(sops), dirs) diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index 3d659525eef..e2b83191a19 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -24,6 +24,7 @@ distros = [ "almalinux", "alpine", + "aosc", "azurelinux", "centos", "cloudlinux", @@ -109,6 +110,12 @@ "service_name": "ntpd", }, }, + "aosc": { + "systemd-timesyncd": { + "check_exe": "/usr/lib/systemd/systemd-timesyncd", + "confpath": "/etc/systemd/timesyncd.conf", + }, + }, "azurelinux": { "chrony": { "service_name": "chronyd", diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 77a2a26a7c4..b90db58ff88 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -15,7 +15,7 @@ import stat from typing import Optional -from cloudinit import subp, util +from cloudinit import lifecycle, subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config from cloudinit.config.schema import MetaSchema @@ -56,8 +56,8 @@ def _resize_btrfs(mount_point, devpth): # btrfs has exclusive operations and resize may fail if btrfs is busy # doing one of the operations that prevents resize. As of btrfs 5.10 # the resize operation can be queued - btrfs_with_queue = util.Version.from_str("5.10") - system_btrfs_ver = util.Version.from_str( + btrfs_with_queue = lifecycle.Version.from_str("5.10") + system_btrfs_ver = lifecycle.Version.from_str( subp.subp(["btrfs", "--version"])[0].split("v")[-1].strip() ) if system_btrfs_ver >= btrfs_with_queue: diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py index 3edf9972bf9..88ec1c4f3a1 100644 --- a/cloudinit/config/cc_rsyslog.py +++ b/cloudinit/config/cc_rsyslog.py @@ -17,7 +17,7 @@ import re from textwrap import dedent -from cloudinit import log, subp, util +from cloudinit import lifecycle, log, subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config from cloudinit.config.schema import MetaSchema @@ -153,7 +153,7 @@ def load_config(cfg: dict, distro: Distro) -> dict: distro_config = distro_default_rsyslog_config(distro) if isinstance(cfg.get("rsyslog"), list): - util.deprecate( + lifecycle.deprecate( deprecated="The rsyslog key with value of type 'list'", deprecated_version="22.2", ) diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py index 21408105c74..224ae6b85fe 100644 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -12,7 +12,7 @@ from string import ascii_letters, digits from typing import List -from cloudinit import features, subp, util +from cloudinit import features, lifecycle, subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config from cloudinit.config.schema import MetaSchema @@ -71,7 +71,7 @@ def handle_ssh_pwauth(pw_auth, distro: Distro): cfg_name = "PasswordAuthentication" if isinstance(pw_auth, str): - util.deprecate( + lifecycle.deprecate( deprecated="Using a string value for the 'ssh_pwauth' key", deprecated_version="22.2", extra_message="Use a boolean value with 'ssh_pwauth'.", @@ -128,7 +128,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: chfg = cfg["chpasswd"] users_list = util.get_cfg_option_list(chfg, "users", default=[]) if "list" in chfg and chfg["list"]: - util.deprecate( + lifecycle.deprecate( deprecated="Config key 'lists'", deprecated_version="22.3", extra_message="Use 'users' instead.", @@ -137,7 +137,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: LOG.debug("Handling input for chpasswd as list.") plist = util.get_cfg_option_list(chfg, "list", plist) else: - util.deprecate( + lifecycle.deprecate( deprecated="The chpasswd multiline string", deprecated_version="22.2", extra_message="Use string type instead.", diff --git a/cloudinit/config/cc_spacewalk.py b/cloudinit/config/cc_spacewalk.py index 6b364aa938b..7f19e633d21 100644 --- a/cloudinit/config/cc_spacewalk.py +++ b/cloudinit/config/cc_spacewalk.py @@ -18,7 +18,7 @@ LOG = logging.getLogger(__name__) -distros = ["redhat", "fedora"] +distros = ["redhat", "fedora", "openeuler"] required_packages = ["rhn-setup"] def_ca_cert_path = "/usr/share/rhn/RHN-ORG-TRUSTED-SSL-CERT" diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index 00687cf867d..947469b5b6d 100644 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -14,7 +14,7 @@ import sys from typing import List, Optional, Sequence -from cloudinit import ssh_util, subp, util +from cloudinit import lifecycle, ssh_util, subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config from cloudinit.config.schema import MetaSchema @@ -75,7 +75,7 @@ def set_redhat_keyfile_perms(keyfile: str) -> None: """ permissions_public = 0o644 ssh_version = ssh_util.get_opensshd_upstream_version() - if ssh_version and ssh_version < util.Version(9, 0): + if ssh_version and ssh_version < lifecycle.Version(9, 0): # fedora 37, centos 9 stream and below has sshd # versions less than 9 and private key permissions are # set to 0o640 from sshd-keygen. diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py index 45bb2df7d4b..dcd50701a20 100644 --- a/cloudinit/config/cc_update_etc_hosts.py +++ b/cloudinit/config/cc_update_etc_hosts.py @@ -10,7 +10,7 @@ import logging -from cloudinit import templater, util +from cloudinit import lifecycle, templater, util from cloudinit.cloud import Cloud from cloudinit.config import Config from cloudinit.config.schema import MetaSchema @@ -33,7 +33,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: if util.translate_bool(manage_hosts, addons=["template"]): if manage_hosts == "template": - util.deprecate( + lifecycle.deprecate( deprecated="Value 'template' for key 'manage_etc_hosts'", deprecated_version="22.2", extra_message="Use 'true' instead.", diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py index c97d1225afc..004ede438d9 100644 --- a/cloudinit/config/cc_write_files.py +++ b/cloudinit/config/cc_write_files.py @@ -9,8 +9,9 @@ import base64 import logging import os +from typing import Optional -from cloudinit import util +from cloudinit import url_helper, util from cloudinit.cloud import Cloud from cloudinit.config import Config from cloudinit.config.schema import MetaSchema @@ -44,7 +45,8 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: name, ) return - write_files(name, filtered_files, cloud.distro.default_owner) + ssl_details = util.fetch_ssl_details(cloud.paths) + write_files(name, filtered_files, cloud.distro.default_owner, ssl_details) def canonicalize_extraction(encoding_type): @@ -72,7 +74,7 @@ def canonicalize_extraction(encoding_type): return [TEXT_PLAIN_ENC] -def write_files(name, files, owner: str): +def write_files(name, files, owner: str, ssl_details: Optional[dict] = None): if not files: return @@ -86,8 +88,23 @@ def write_files(name, files, owner: str): ) continue path = os.path.abspath(path) - extractions = canonicalize_extraction(f_info.get("encoding")) - contents = extract_contents(f_info.get("content", ""), extractions) + # Read content from provided URL, if any, or decode from inline + contents = read_url_or_decode( + f_info.get("source", None), + ssl_details, + f_info.get("content", None), + f_info.get("encoding", None), + ) + if contents is None: + LOG.warning( + "No content could be loaded for entry %s in module %s;" + " skipping", + i + 1, + name, + ) + continue + # Only create the file if content exists. This will not happen, for + # example, if the URL fails and no inline content was provided (u, g) = util.extract_usergroup(f_info.get("owner", owner)) perms = decode_perms(f_info.get("permissions"), DEFAULT_PERMS) omode = "ab" if util.get_cfg_option_bool(f_info, "append") else "wb" @@ -118,6 +135,43 @@ def decode_perms(perm, default): return default +def read_url_or_decode(source, ssl_details, content, encoding): + url = None if source is None else source.get("uri", None) + use_url = bool(url) + # Special case: empty URL and content. Write a blank file + if content is None and not use_url: + return "" + # Fetch file content from source URL, if provided + result = None + if use_url: + try: + # NOTE: These retry parameters are arbitrarily chosen defaults. + # They have no significance, and may be changed if appropriate + result = url_helper.read_file_or_url( + url, + headers=source.get("headers", None), + retries=3, + sec_between=3, + ssl_details=ssl_details, + ).contents + except Exception: + util.logexc( + LOG, + 'Failed to retrieve contents from source "%s"; falling back to' + ' data from "contents" key', + url, + ) + use_url = False + # If inline content is provided, and URL is not provided or is + # inaccessible, parse the former + if content is not None and not use_url: + # NOTE: This is not simply an "else"! Notice that `use_url` can change + # in the previous "if" block + extractions = canonicalize_extraction(encoding) + result = extract_contents(content, extractions) + return result + + def extract_contents(contents, extraction_types): result = contents for t in extraction_types: diff --git a/cloudinit/config/cc_write_files_deferred.py b/cloudinit/config/cc_write_files_deferred.py index 0dc0662e1e7..87be2b45cfb 100644 --- a/cloudinit/config/cc_write_files_deferred.py +++ b/cloudinit/config/cc_write_files_deferred.py @@ -39,4 +39,5 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: name, ) return - write_files(name, filtered_files, cloud.distro.default_owner) + ssl_details = util.fetch_ssl_details(cloud.paths) + write_files(name, filtered_files, cloud.distro.default_owner, ssl_details) diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index 9a717af3d1a..548c83bab6d 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -141,7 +141,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: n_repo_config[k] = v repo_config = n_repo_config missing_required = 0 - req_fields = ["baseurl", "metalink"] + req_fields = ["baseurl", "metalink", "mirrorlist"] for req_field in req_fields: if req_field not in repo_config: missing_required += 1 diff --git a/cloudinit/config/modules.py b/cloudinit/config/modules.py index f775802d74a..a82e1ff8e8e 100644 --- a/cloudinit/config/modules.py +++ b/cloudinit/config/modules.py @@ -12,7 +12,7 @@ from types import ModuleType from typing import Dict, List, NamedTuple, Optional -from cloudinit import config, importer, type_utils, util +from cloudinit import config, importer, lifecycle, type_utils, util from cloudinit.distros import ALL_DISTROS from cloudinit.helpers import ConfigMerger from cloudinit.reporting.events import ReportEventStack @@ -194,7 +194,7 @@ def _fixup_modules(self, raw_mods) -> List[ModuleDetails]: if not mod_name: continue if freq and freq not in FREQUENCIES: - util.deprecate( + lifecycle.deprecate( deprecated=( f"Config specified module {raw_name} has an unknown" f" frequency {freq}" @@ -205,7 +205,7 @@ def _fixup_modules(self, raw_mods) -> List[ModuleDetails]: # default meta attribute "frequency" value is used. freq = None if mod_name in RENAMED_MODULES: - util.deprecate( + lifecycle.deprecate( deprecated=( f"Module has been renamed from {mod_name} to " f"{RENAMED_MODULES[mod_name]}. Update any" @@ -278,7 +278,7 @@ def _run_modules(self, mostly_mods: List[ModuleDetails]): func_signature = signature(mod.handle) func_params = func_signature.parameters if len(func_params) == 5: - util.deprecate( + lifecycle.deprecate( deprecated="Config modules with a `log` parameter", deprecated_version="23.2", ) diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py index 062ab92ecd8..a2fceecabcb 100644 --- a/cloudinit/config/schema.py +++ b/cloudinit/config/schema.py @@ -31,7 +31,7 @@ import yaml -from cloudinit import features, importer, safeyaml +from cloudinit import features, importer, lifecycle, safeyaml from cloudinit.cmd.devel import read_cfg_paths from cloudinit.handlers import INCLUSION_TYPES_MAP, type_from_starts_with from cloudinit.helpers import Paths @@ -42,7 +42,6 @@ get_modules_from_dir, load_text_file, load_yaml, - should_log_deprecation, write_file, ) @@ -795,8 +794,11 @@ def validate_cloudconfig_schema( if isinstance( schema_error, SchemaDeprecationError ): # pylint: disable=W1116 - if schema_error.version == "devel" or should_log_deprecation( - schema_error.version, features.DEPRECATION_INFO_BOUNDARY + if ( + schema_error.version == "devel" + or lifecycle.should_log_deprecation( + schema_error.version, features.DEPRECATION_INFO_BOUNDARY + ) ): deprecations.append(SchemaProblem(path, schema_error.message)) else: @@ -818,7 +820,7 @@ def validate_cloudconfig_schema( deprecations, prefix="Deprecated cloud-config provided: ", ) - # This warning doesn't fit the standardized util.deprecated() + # This warning doesn't fit the standardized lifecycle.deprecated() # utility format, but it is a deprecation log, so log it directly. LOG.deprecated(message) # type: ignore if strict and (errors or deprecations or info_deprecations): diff --git a/cloudinit/config/schemas/schema-cloud-config-v1.json b/cloudinit/config/schemas/schema-cloud-config-v1.json index f5609c539fc..4ae8b4a8f70 100644 --- a/cloudinit/config/schemas/schema-cloud-config-v1.json +++ b/cloudinit/config/schemas/schema-cloud-config-v1.json @@ -1352,7 +1352,12 @@ }, "chef_license": { "type": "string", - "description": "string that indicates if user accepts or not license related to some of chef products" + "description": "string that indicates if user accepts or not license related to some of chef products. See https://docs.chef.io/licensing/accept/", + "enum": [ + "accept", + "accept-silent", + "accept-no-persist" + ] } } } @@ -2017,12 +2022,12 @@ }, "mount_default_fields": { "type": "array", - "description": "Default mount configuration for any mount entry with less than 6 options provided. When specified, 6 items are required and represent ``/etc/fstab`` entries. Default: ``defaults,nofail,x-systemd.after=cloud-init.service,_netdev``", + "description": "Default mount configuration for any mount entry with less than 6 options provided. When specified, 6 items are required and represent ``/etc/fstab`` entries. Default: ``defaults,nofail,x-systemd.after=cloud-init-network.service,_netdev``", "default": [ null, null, "auto", - "defaults,nofail,x-systemd.after=cloud-init.service", + "defaults,nofail,x-systemd.after=cloud-init-network.service", "0", "2" ], @@ -3386,6 +3391,28 @@ "default": "''", "description": "Optional content to write to the provided ``path``. When content is present and encoding is not 'text/plain', decode the content prior to writing. Default: ``''``" }, + "source": { + "type": "object", + "description": "Optional specification for content loading from an arbitrary URI", + "additionalProperties": false, + "properties": { + "uri": { + "type": "string", + "format": "uri", + "description": "URI from which to load file content. If loading fails repeatedly, ``content`` is used instead." + }, + "headers": { + "type": "object", + "description": "Optional HTTP headers to accompany load request, if applicable", + "additionalProperties": { + "type": "string" + } + } + }, + "required": [ + "uri" + ] + }, "owner": { "type": "string", "default": "root:root", @@ -3452,6 +3479,16 @@ "format": "uri", "description": "URL to the directory where the yum repository's 'repodata' directory lives" }, + "metalink": { + "type": "string", + "format": "uri", + "description": "Specifies a URL to a metalink file for the repomd.xml" + }, + "mirrorlist": { + "type": "string", + "format": "uri", + "description": "Specifies a URL to a file containing a baseurls list" + }, "name": { "type": "string", "description": "Optional human-readable name of the yum repo." @@ -3479,8 +3516,22 @@ "description": "Any supported yum repository configuration options will be written to the yum repo config file. See: man yum.conf" } }, - "required": [ - "baseurl" + "anyOf": [ + { + "required": [ + "baseurl" + ] + }, + { + "required": [ + "metalink" + ] + }, + { + "required": [ + "mirrorlist" + ] + } ] } } diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 4557d4320ee..1afef63de95 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -36,6 +36,7 @@ from cloudinit import ( helpers, importer, + lifecycle, net, persistence, ssh_util, @@ -60,6 +61,7 @@ OSFAMILIES = { "alpine": ["alpine"], + "aosc": ["aosc"], "arch": ["arch"], "debian": ["debian", "ubuntu"], "freebsd": ["freebsd", "dragonfly"], @@ -170,6 +172,7 @@ def __init__(self, name, cfg, paths): self.package_managers: List[PackageManager] = [] self._dhcp_client = None self._fallback_interface = None + self.is_linux = True def _unpickle(self, ci_pkl_version: int) -> None: """Perform deserialization fixes for Distro.""" @@ -186,6 +189,8 @@ def _unpickle(self, ci_pkl_version: int) -> None: self._dhcp_client = None if not hasattr(self, "_fallback_interface"): self._fallback_interface = None + if not hasattr(self, "is_linux"): + self.is_linux = True def _validate_entry(self, entry): if isinstance(entry, str): @@ -706,7 +711,7 @@ def add_user(self, name, **kwargs): groups = groups.split(",") if isinstance(groups, dict): - util.deprecate( + lifecycle.deprecate( deprecated=f"The user {name} has a 'groups' config value " "of type dict", deprecated_version="22.3", @@ -844,7 +849,7 @@ def create_user(self, name, **kwargs): if kwargs["sudo"]: self.write_sudo_rules(name, kwargs["sudo"]) elif kwargs["sudo"] is False: - util.deprecate( + lifecycle.deprecate( deprecated=f"The value of 'false' in user {name}'s " "'sudo' config", deprecated_version="22.2", diff --git a/cloudinit/distros/alpine.py b/cloudinit/distros/alpine.py index a1d0d900c9f..dae4b61564e 100644 --- a/cloudinit/distros/alpine.py +++ b/cloudinit/distros/alpine.py @@ -13,7 +13,7 @@ from datetime import datetime from typing import Any, Dict, Optional -from cloudinit import distros, helpers, subp, util +from cloudinit import distros, helpers, lifecycle, subp, util from cloudinit.distros.parsers.hostname import HostnameConf from cloudinit.settings import PER_ALWAYS, PER_INSTANCE @@ -248,7 +248,7 @@ def add_user(self, name, **kwargs): if isinstance(groups, str): groups = groups.split(",") elif isinstance(groups, dict): - util.deprecate( + lifecycle.deprecate( deprecated=f"The user {name} has a 'groups' config value " "of type dict", deprecated_version="22.3", diff --git a/cloudinit/distros/aosc.py b/cloudinit/distros/aosc.py new file mode 100644 index 00000000000..0460c740d5c --- /dev/null +++ b/cloudinit/distros/aosc.py @@ -0,0 +1,148 @@ +# Copyright (C) 2024 AOSC Developers +# +# Author: Yuanhang Sun +# +# This file is part of cloud-init. See LICENSE file for license information. +import logging + +from cloudinit import distros, helpers, subp, util +from cloudinit.distros import PackageList +from cloudinit.distros.parsers.hostname import HostnameConf +from cloudinit.distros.parsers.sys_conf import SysConf +from cloudinit.settings import PER_INSTANCE + +LOG = logging.getLogger(__name__) + + +class Distro(distros.Distro): + systemd_locale_conf_fn = "/etc/locale.conf" + init_cmd = ["systemctl"] + network_conf_dir = "/etc/sysconfig/network" + resolve_conf_fn = "/etc/systemd/resolved.conf" + tz_local_fn = "/etc/localtime" + + dhclient_lease_directory = "/var/lib/NetworkManager" + dhclient_lease_file_regex = r"dhclient-[\w-]+\.lease" + + renderer_configs = { + "sysconfig": { + "control": "etc/sysconfig/network", + "iface_templates": "%(base)s/network-scripts/ifcfg-%(name)s", + "route_templates": { + "ipv4": "%(base)s/network-scripts/route-%(name)s", + "ipv6": "%(base)s/network-scripts/route6-%(name)s", + }, + } + } + + prefer_fqdn = False + + def __init__(self, name, cfg, paths): + distros.Distro.__init__(self, name, cfg, paths) + self._runner = helpers.Runners(paths) + self.osfamily = "aosc" + self.default_locale = "en_US.UTF-8" + cfg["ssh_svcname"] = "sshd" + + def apply_locale(self, locale, out_fn=None): + if not out_fn: + out_fn = self.systemd_locale_conf_fn + locale_cfg = { + "LANG": locale, + } + update_locale_conf(out_fn, locale_cfg) + + def _write_hostname(self, hostname, filename): + if filename.endswith("/previous-hostname"): + conf = HostnameConf("") + conf.set_hostname(hostname) + util.write_file(filename, str(conf), 0o644) + create_hostname_file = util.get_cfg_option_bool( + self._cfg, "create_hostname_file", True + ) + if create_hostname_file: + subp.subp(["hostnamectl", "set-hostname", str(hostname)]) + else: + subp.subp( + [ + "hostnamectl", + "set-hostname", + "--transient", + str(hostname), + ] + ) + LOG.info("create_hostname_file is False; hostname set transiently") + + def _read_hostname(self, filename, default=None): + if filename.endswith("/previous-hostname"): + return util.load_text_file(filename).strip() + (out, _err) = subp.subp(["hostname"]) + out = out.strip() + if len(out): + return out + else: + return default + + def _read_system_hostname(self): + sys_hostname = self._read_hostname(self.hostname_conf_fn) + return (self.hostname_conf_fn, sys_hostname) + + def set_timezone(self, tz): + tz_file = self._find_tz_file(tz) + util.del_file(self.tz_local_fn) + util.sym_link(tz_file, self.tz_local_fn) + + def package_command(self, command, args=None, pkgs=None): + if pkgs is None: + pkgs = [] + + cmd = ["oma"] + if command: + cmd.append(command) + cmd.append("-y") + cmd.extend(pkgs) + + subp.subp(cmd, capture=False) + + def install_packages(self, pkglist: PackageList): + self.package_command("install", pkgs=pkglist) + + def update_package_sources(self): + self._runner.run( + "update-sources", + self.package_command, + "refresh", + freq=PER_INSTANCE, + ) + + +def read_locale_conf(sys_path): + exists = False + try: + contents = util.load_text_file(sys_path).splitlines() + exists = True + except IOError: + contents = [] + return (exists, SysConf(contents)) + + +def update_locale_conf(sys_path, locale_cfg): + if not locale_cfg: + return + (exists, contents) = read_locale_conf(sys_path) + updated_am = 0 + for (k, v) in locale_cfg.items(): + if v is None: + continue + v = str(v) + if len(v) == 0: + continue + contents[k] = v + updated_am += 1 + if updated_am: + lines = [ + str(contents), + ] + if not exists: + lines.insert(0, util.make_header()) + util.write_file(sys_path, "\n".join(lines) + "\n", 0o644) diff --git a/cloudinit/distros/azurelinux.py b/cloudinit/distros/azurelinux.py index 5098a45942d..591b870020e 100644 --- a/cloudinit/distros/azurelinux.py +++ b/cloudinit/distros/azurelinux.py @@ -22,6 +22,8 @@ class Distro(rhel.Distro): + usr_lib_exec = "/usr/lib" + def __init__(self, name, cfg, paths): super().__init__(name, cfg, paths) self.osfamily = "azurelinux" diff --git a/cloudinit/distros/bsd.py b/cloudinit/distros/bsd.py index 25b374ba3bc..15be9c36714 100644 --- a/cloudinit/distros/bsd.py +++ b/cloudinit/distros/bsd.py @@ -40,6 +40,13 @@ def __init__(self, name, cfg, paths): cfg["rsyslog_svcname"] = "rsyslogd" self.osfamily = platform.system().lower() self.net_ops = bsd_netops.BsdNetOps + self.is_linux = False + + def _unpickle(self, ci_pkl_version: int) -> None: + super()._unpickle(ci_pkl_version) + + # this needs to be after the super class _unpickle to override it + self.is_linux = False def _read_system_hostname(self): sys_hostname = self._read_hostname(self.hostname_conf_fn) diff --git a/cloudinit/distros/ug_util.py b/cloudinit/distros/ug_util.py index b8d14937488..2d0a887e7c4 100644 --- a/cloudinit/distros/ug_util.py +++ b/cloudinit/distros/ug_util.py @@ -11,7 +11,7 @@ import logging -from cloudinit import type_utils, util +from cloudinit import lifecycle, type_utils, util LOG = logging.getLogger(__name__) @@ -175,7 +175,7 @@ def normalize_users_groups(cfg, distro): # Translate it into a format that will be more useful going forward if isinstance(old_user, str): old_user = {"name": old_user} - util.deprecate( + lifecycle.deprecate( deprecated="'user' of type string", deprecated_version="22.2", extra_message="Use 'users' list instead.", @@ -208,7 +208,7 @@ def normalize_users_groups(cfg, distro): base_users = cfg.get("users", []) if isinstance(base_users, (dict, str)): - util.deprecate( + lifecycle.deprecate( deprecated=f"'users' of type {type(base_users)}", deprecated_version="22.2", extra_message="Use 'users' as a list.", diff --git a/cloudinit/features.py b/cloudinit/features.py index c3fdae18658..4f9a59e9925 100644 --- a/cloudinit/features.py +++ b/cloudinit/features.py @@ -107,6 +107,11 @@ the different log levels is that logs at DEPRECATED level result in a return code of 2 from `cloud-init status`. +This may may also be used in some limited cases where new error messages may be +logged which increase the risk of regression in stable downstreams where the +error was previously unreported yet downstream users expected stable behavior +across new cloud-init releases. + format: :: = | diff --git a/cloudinit/lifecycle.py b/cloudinit/lifecycle.py new file mode 100644 index 00000000000..871333ef6fb --- /dev/null +++ b/cloudinit/lifecycle.py @@ -0,0 +1,242 @@ +# This file is part of cloud-init. See LICENSE file for license information. +import collections +import functools +import logging +from typing import NamedTuple, Optional + +from cloudinit import features, log + +LOG = logging.getLogger(__name__) + + +class DeprecationLog(NamedTuple): + log_level: int + message: str + + +@functools.total_ordering +class Version( + collections.namedtuple("Version", ["major", "minor", "patch", "rev"]) +): + """A class for comparing versions. + + Implemented as a named tuple with all ordering methods. Comparisons + between X.Y.N and X.Y always treats the more specific number as larger. + + :param major: the most significant number in a version + :param minor: next greatest significant number after major + :param patch: next greatest significant number after minor + :param rev: the least significant number in a version + + :raises TypeError: If invalid arguments are given. + :raises ValueError: If invalid arguments are given. + + Examples: + >>> Version(2, 9) == Version.from_str("2.9") + True + >>> Version(2, 9, 1) > Version.from_str("2.9.1") + False + >>> Version(3, 10) > Version.from_str("3.9.9.9") + True + >>> Version(3, 7) >= Version.from_str("3.7") + True + + """ + + def __new__( + cls, major: int = -1, minor: int = -1, patch: int = -1, rev: int = -1 + ) -> "Version": + """Default of -1 allows us to tiebreak in favor of the most specific + number""" + return super(Version, cls).__new__(cls, major, minor, patch, rev) + + @classmethod + def from_str(cls, version: str) -> "Version": + """Create a Version object from a string. + + :param version: A period-delimited version string, max 4 segments. + + :raises TypeError: Raised if invalid arguments are given. + :raises ValueError: Raised if invalid arguments are given. + + :return: A Version object. + """ + return cls(*(list(map(int, version.split("."))))) + + def __gt__(self, other): + return 1 == self._compare_version(other) + + def __eq__(self, other): + return ( + self.major == other.major + and self.minor == other.minor + and self.patch == other.patch + and self.rev == other.rev + ) + + def __iter__(self): + """Iterate over the version (drop sentinels)""" + for n in (self.major, self.minor, self.patch, self.rev): + if n != -1: + yield str(n) + else: + break + + def __str__(self): + return ".".join(self) + + def __hash__(self): + return hash(str(self)) + + def _compare_version(self, other: "Version") -> int: + """Compare this Version to another. + + :param other: A Version object. + + :return: -1 if self > other, 1 if self < other, else 0 + """ + if self == other: + return 0 + if self.major > other.major: + return 1 + if self.minor > other.minor: + return 1 + if self.patch > other.patch: + return 1 + if self.rev > other.rev: + return 1 + return -1 + + +def should_log_deprecation(version: str, boundary_version: str) -> bool: + """Determine if a deprecation message should be logged. + + :param version: The version in which the thing was deprecated. + :param boundary_version: The version at which deprecation level is logged. + + :return: True if the message should be logged, else False. + """ + return boundary_version == "devel" or Version.from_str( + version + ) <= Version.from_str(boundary_version) + + +def log_with_downgradable_level( + *, + logger: logging.Logger, + version: str, + requested_level: int, + msg: str, + args, +): + """Log a message at the requested level, if that is acceptable. + + If the log level is too high due to the version boundary, log at DEBUG + level. Useful to add new warnings to previously unguarded code without + disrupting stable downstreams. + + :param logger: Logger object to log with + :param version: Version string of the version that this log was introduced + :param level: Preferred level at which this message should be logged + :param msg: Message, as passed to the logger. + :param args: Message formatting args, ass passed to the logger + + :return: True if the message should be logged, else False. + """ + if should_log_deprecation(version, features.DEPRECATION_INFO_BOUNDARY): + logger.log(requested_level, msg, args) + else: + logger.debug(msg, args) + + +def deprecate( + *, + deprecated: str, + deprecated_version: str, + extra_message: Optional[str] = None, + schedule: int = 5, + skip_log: bool = False, +) -> DeprecationLog: + """Mark a "thing" as deprecated. Deduplicated deprecations are + logged. + + :param deprecated: Noun to be deprecated. Write this as the start + of a sentence, with no period. Version and extra message will + be appended. + :param deprecated_version: The version in which the thing was + deprecated + :param extra_message: A remedy for the user's problem. A good + message will be actionable and specific (i.e., don't use a + generic "Use updated key." if the user used a deprecated key). + End the string with a period. + :param schedule: Manually set the deprecation schedule. Defaults to + 5 years. Leave a comment explaining your reason for deviation if + setting this value. + :param skip_log: Return log text rather than logging it. Useful for + running prior to logging setup. + :return: NamedTuple containing log level and log message + DeprecationLog(level: int, message: str) + + Note: uses keyword-only arguments to improve legibility + """ + if not hasattr(deprecate, "log"): + setattr(deprecate, "log", set()) + message = extra_message or "" + dedup = hash(deprecated + message + deprecated_version + str(schedule)) + version = Version.from_str(deprecated_version) + version_removed = Version(version.major + schedule, version.minor) + deprecate_msg = ( + f"{deprecated} is deprecated in " + f"{deprecated_version} and scheduled to be removed in " + f"{version_removed}. {message}" + ).rstrip() + if not should_log_deprecation( + deprecated_version, features.DEPRECATION_INFO_BOUNDARY + ): + level = logging.INFO + elif hasattr(LOG, "deprecated"): + level = log.DEPRECATED + else: + level = logging.WARN + log_cache = getattr(deprecate, "log") + if not skip_log and dedup not in log_cache: + log_cache.add(dedup) + LOG.log(level, deprecate_msg) + return DeprecationLog(level, deprecate_msg) + + +def deprecate_call( + *, deprecated_version: str, extra_message: str, schedule: int = 5 +): + """Mark a "thing" as deprecated. Deduplicated deprecations are + logged. + + :param deprecated_version: The version in which the thing was + deprecated + :param extra_message: A remedy for the user's problem. A good + message will be actionable and specific (i.e., don't use a + generic "Use updated key." if the user used a deprecated key). + End the string with a period. + :param schedule: Manually set the deprecation schedule. Defaults to + 5 years. Leave a comment explaining your reason for deviation if + setting this value. + + Note: uses keyword-only arguments to improve legibility + """ + + def wrapper(func): + @functools.wraps(func) + def decorator(*args, **kwargs): + # don't log message multiple times + out = func(*args, **kwargs) + deprecate( + deprecated_version=deprecated_version, + deprecated=func.__name__, + extra_message=extra_message, + schedule=schedule, + ) + return out + + return decorator + + return wrapper diff --git a/cloudinit/log.py b/cloudinit/log.py index 08d0efa3001..983b426b7ce 100644 --- a/cloudinit/log.py +++ b/cloudinit/log.py @@ -23,6 +23,23 @@ DEFAULT_LOG_FORMAT = "%(asctime)s - %(filename)s[%(levelname)s]: %(message)s" DEPRECATED = 35 +TRACE = logging.DEBUG - 5 + + +class CustomLoggerType(logging.Logger): + """A hack to get mypy to stop complaining about custom logging methods. + + When using deprecated or trace logging, rather than: + LOG = logging.getLogger(__name__) + Instead do: + LOG = cast(CustomLoggerType, logging.getLogger(__name__)) + """ + + def trace(self, *args, **kwargs): + pass + + def deprecated(self, *args, **kwargs): + pass def setup_basic_logging(level=logging.DEBUG, formatter=None): @@ -45,14 +62,20 @@ def flush_loggers(root): flush_loggers(root.parent) -def define_deprecation_logger(lvl=DEPRECATED): - logging.addLevelName(lvl, "DEPRECATED") +def define_extra_loggers() -> None: + """Add DEPRECATED and TRACE log levels to the logging module.""" - def deprecated(self, message, *args, **kwargs): - if self.isEnabledFor(lvl): - self._log(lvl, message, args, **kwargs) + def new_logger(level): + def log_at_level(self, message, *args, **kwargs): + if self.isEnabledFor(level): + self._log(level, message, args, **kwargs) - logging.Logger.deprecated = deprecated + return log_at_level + + logging.addLevelName(DEPRECATED, "DEPRECATED") + logging.addLevelName(TRACE, "TRACE") + setattr(logging.Logger, "deprecated", new_logger(DEPRECATED)) + setattr(logging.Logger, "trace", new_logger(TRACE)) def setup_logging(cfg=None): @@ -129,6 +152,9 @@ def emit(self, record: logging.LogRecord): def export_logs(self): return copy.deepcopy(self.holder) + def clean_logs(self): + self.holder = defaultdict(list) + def flush(self): pass @@ -183,7 +209,7 @@ def configure_root_logger(): # Always format logging timestamps as UTC time logging.Formatter.converter = time.gmtime - define_deprecation_logger() + define_extra_loggers() setup_backup_logging() reset_logging() diff --git a/cloudinit/net/network_manager.py b/cloudinit/net/network_manager.py index a13d4c14f69..06305668fe4 100644 --- a/cloudinit/net/network_manager.py +++ b/cloudinit/net/network_manager.py @@ -239,7 +239,10 @@ def _add_nameserver(self, dns: str) -> None: Extends the ipv[46].dns property with a name server. """ family = "ipv6" if is_ipv6_address(dns) else "ipv4" - if self.config.has_section(family): + if ( + self.config.has_section(family) + and self._get_config_option(family, "method") != "disabled" + ): self._set_default(family, "dns", "") self.config[family]["dns"] = self.config[family]["dns"] + dns + ";" @@ -248,7 +251,10 @@ def _add_dns_search(self, dns_search: List[str]) -> None: Extends the ipv[46].dns-search property with a name server. """ for family in ["ipv4", "ipv6"]: - if self.config.has_section(family): + if ( + self.config.has_section(family) + and self._get_config_option(family, "method") != "disabled" + ): self._set_default(family, "dns-search", "") self.config[family]["dns-search"] = ( self.config[family]["dns-search"] @@ -431,6 +437,10 @@ def render_interface(self, iface, network_state, renderer): self.config["vlan"]["parent"] = renderer.con_ref( iface["vlan-raw-device"] ) + if if_type == "bond" and ipv4_mtu is not None: + if "ethernet" not in self.config: + self.config["ethernet"] = {} + self.config["ethernet"]["mtu"] = str(ipv4_mtu) if if_type == "bridge": # Bridge is ass-backwards compared to bond for port in iface["bridge_ports"]: diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index 9f34467be78..25471dc172c 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -9,7 +9,7 @@ import logging from typing import TYPE_CHECKING, Any, Dict, Optional -from cloudinit import safeyaml, util +from cloudinit import lifecycle, safeyaml, util from cloudinit.net import ( find_interface_name_from_mac, get_interfaces_by_mac, @@ -86,7 +86,7 @@ def warn_deprecated_all_devices(dikt: dict) -> None: """Warn about deprecations of v2 properties for all devices""" if "gateway4" in dikt or "gateway6" in dikt: - util.deprecate( + lifecycle.deprecate( deprecated="The use of `gateway4` and `gateway6`", deprecated_version="22.4", extra_message="For more info check out: " diff --git a/cloudinit/net/openbsd.py b/cloudinit/net/openbsd.py index 3a4cdf2707c..83b33e0380c 100644 --- a/cloudinit/net/openbsd.py +++ b/cloudinit/net/openbsd.py @@ -27,7 +27,7 @@ def write_config(self): ) mtu = v.get("mtu") if mtu: - content += " mtu %d" % mtu + content += "\nmtu %d" % mtu content += "\n" + self.interface_routes util.write_file(fn, content) diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py index 8b3db620018..6aee531638d 100644 --- a/cloudinit/netinfo.py +++ b/cloudinit/netinfo.py @@ -15,7 +15,7 @@ from ipaddress import IPv4Network from typing import Dict, List, Union -from cloudinit import subp, util +from cloudinit import lifecycle, subp, util from cloudinit.net.network_state import net_prefix_to_ipv4_mask from cloudinit.simpletable import SimpleTable @@ -95,7 +95,7 @@ def _netdev_info_iproute_json(ipaddr_json): return devs -@util.deprecate_call( +@lifecycle.deprecate_call( deprecated_version="22.1", extra_message="Required by old iproute2 versions that don't " "support ip json output. Consider upgrading to a more recent version.", diff --git a/cloudinit/socket.py b/cloudinit/socket.py new file mode 100644 index 00000000000..7ef19f43798 --- /dev/null +++ b/cloudinit/socket.py @@ -0,0 +1,174 @@ +# This file is part of cloud-init. See LICENSE file for license information. +"""A module for common socket helpers.""" +import logging +import os +import socket +import sys +import time +from contextlib import suppress + +from cloudinit.settings import DEFAULT_RUN_DIR + +LOG = logging.getLogger(__name__) + + +def sd_notify(message: str): + """Send a sd_notify message. + + :param message: sd-notify message (must be valid ascii) + """ + socket_path = os.environ.get("NOTIFY_SOCKET", "") + + if not socket_path: + # not running under systemd, no-op + return + + elif socket_path[0] == "@": + # abstract + socket_path.replace("@", "\0", 1) + + # unix domain + elif socket_path[0] != "/": + raise OSError("Unsupported socket type") + + with socket.socket( + socket.AF_UNIX, socket.SOCK_DGRAM | socket.SOCK_CLOEXEC + ) as sock: + LOG.info("Sending sd_notify(%s)", str(message)) + sock.connect(socket_path) + sock.sendall(message.encode("ascii")) + + +class SocketSync: + """A two way synchronization protocol over Unix domain sockets.""" + + def __init__(self, *names: str): + """Initialize a synchronization context. + + 1) Ensure that the socket directory exists. + 2) Bind a socket for each stage. + + Binding the sockets on initialization allows receipt of stage + "start" notifications prior to the cloud-init stage being ready to + start. + + :param names: stage names, used as a unique identifiers + """ + self.stage = "" + self.remote = "" + self.first_exception = "" + self.systemd_exit_code = 0 + self.experienced_any_error = False + self.sockets = { + name: socket.socket( + socket.AF_UNIX, socket.SOCK_DGRAM | socket.SOCK_CLOEXEC + ) + for name in names + } + # ensure the directory exists + os.makedirs(f"{DEFAULT_RUN_DIR}/share", mode=0o700, exist_ok=True) + # removing stale sockets and bind + for name, sock in self.sockets.items(): + socket_path = f"{DEFAULT_RUN_DIR}/share/{name}.sock" + with suppress(FileNotFoundError): + os.remove(socket_path) + sock.bind(socket_path) + + def __call__(self, stage: str): + """Set the stage before entering context. + + This enables the context manager to be initialized separately from + each stage synchronization. + + :param stage: the name of a stage to synchronize + + Example: + sync = SocketSync("stage 1", "stage 2"): + with sync("stage 1"): + pass + with sync("stage 2"): + pass + """ + if stage not in self.sockets: + raise ValueError(f"Invalid stage name: {stage}") + self.stage = stage + return self + + def __enter__(self): + """Wait until a message has been received on this stage's socket. + + Once the message has been received, enter the context. + """ + if os.isatty(sys.stdin.fileno()): + LOG.info( + "Stdin is a tty, so skipping stage synchronization protocol" + ) + return + self.systemd_exit_code = 0 + sd_notify( + "STATUS=Waiting on external services to " + f"complete before starting the {self.stage} stage." + ) + start_time = time.monotonic() + # block until init system sends us data + # the first value returned contains a message from the init system + # (should be "start") + # the second value contains the path to a unix socket on which to + # reply, which is expected to be /path/to/{self.stage}-return.sock + sock = self.sockets[self.stage] + chunk, self.remote = sock.recvfrom(5) + + if b"start" != chunk: + # The protocol expects to receive a command "start" + self.__exit__(None, None, None) + raise ValueError(f"Received invalid message: [{str(chunk)}]") + elif f"{DEFAULT_RUN_DIR}/share/{self.stage}-return.sock" != str( + self.remote + ): + # assert that the return path is in a directory with appropriate + # permissions + self.__exit__(None, None, None) + raise ValueError(f"Unexpected path to unix socket: {self.remote}") + + total = time.monotonic() - start_time + time_msg = f"took {total: .3f}s to " if total > 0.01 else "" + sd_notify(f"STATUS=Running ({self.stage} stage)") + LOG.debug("sync(%s): synchronization %scomplete", self.stage, time_msg) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + """Notify the socket that this stage is complete.""" + message = f"Completed socket interaction for boot stage {self.stage}" + if exc_type: + # handle exception thrown in context + self.systemd_exit_code = 1 + self.experienced_any_error = True + status = f"{repr(exc_val)} in {exc_tb.tb_frame}" + message = ( + 'fatal error, run "systemctl status cloud-init-main.service" ' + 'and "cloud-init status --long" for more details' + ) + if not self.first_exception: + self.first_exception = status + LOG.fatal(status) + sd_notify(f"STATUS={status}") + + self.experienced_any_error = self.experienced_any_error or bool( + self.systemd_exit_code + ) + sock = self.sockets[self.stage] + sock.connect(self.remote) + + # the returned message will be executed in a subshell + # hardcode this message rather than sending a more informative message + # to avoid having to sanitize inputs (to prevent escaping the shell) + sock.sendall( + f"echo '{message}'; exit {self.systemd_exit_code};".encode() + ) + sock.close() + + # suppress exception - the exception was logged and the init system + # notified of stage completion (and the exception received as a status + # message). Raising an exception would block the rest of boot, so carry + # on in a degraded state. + return True diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 44b1e194fa4..be4b5a1fbaf 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -553,6 +553,44 @@ def _is_ephemeral_networking_up(self) -> bool: or self._ephemeral_dhcp_ctx.lease is None ) + def _check_azure_proxy_agent_status(self) -> None: + """Check if azure-proxy-agent is ready for communication with WS/IMDS. + + If ProvisionGuestProxyAgent is true, query azure-proxy-agent status, + waiting up to 120 seconds for the proxy to negotiate with Wireserver + and configure an eBPF proxy. Once azure-proxy-agent is ready, + it will exit with code 0 and cloud-init can then expect to be able to + communicate with these services. + + Fail deployment if azure-proxy-agent is not found or otherwise returns + an error. + + For more information, check out: + https://github.com/azure/guestproxyagent + """ + try: + cmd = [ + "azure-proxy-agent", + "--status", + "--wait", + "120", + ] + out, err = subp.subp(cmd) + report_diagnostic_event( + "Running azure-proxy-agent %s resulted" + "in stderr output: %s with stdout: %s" % (cmd, err, out), + logger_func=LOG.debug, + ) + except subp.ProcessExecutionError as error: + if isinstance(error.reason, FileNotFoundError): + report_error = errors.ReportableErrorProxyAgentNotFound() + self._report_failure(report_error) + else: + reportable_error = ( + errors.ReportableErrorProxyAgentStatusFailure(error) + ) + self._report_failure(reportable_error) + @azure_ds_telemetry_reporter def crawl_metadata(self): """Walk all instance metadata sources returning a dict on success. @@ -632,6 +670,11 @@ def crawl_metadata(self): imds_md = {} if self._is_ephemeral_networking_up(): + # check if azure-proxy-agent is enabled in the ovf-env.xml file. + # azure-proxy-agent feature is opt-in and disabled by default. + if cfg.get("ProvisionGuestProxyAgent"): + self._check_azure_proxy_agent_status() + imds_md = self.get_metadata_from_imds(report_failure=True) if not imds_md and ovf_source is None: diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 3f82c89ea42..5ca6c27d176 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -9,7 +9,7 @@ import logging import os -from cloudinit import sources, subp, util +from cloudinit import lifecycle, sources, subp, util from cloudinit.event import EventScope, EventType from cloudinit.net import eni from cloudinit.sources.DataSourceIBMCloud import get_ibm_platform @@ -176,6 +176,14 @@ def network_config(self): elif self.network_eni is not None: self._network_config = eni.convert_eni_data(self.network_eni) LOG.debug("network config provided via converted eni data") + lifecycle.deprecate( + deprecated="Eni network configuration in ConfigDrive", + deprecated_version="24.3", + extra_message=( + "You can use openstack's network " + "configuration format instead" + ), + ) else: LOG.debug("no network configuration available") return self._network_config diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py index 951006ed815..ec35af782f0 100644 --- a/cloudinit/sources/DataSourceDigitalOcean.py +++ b/cloudinit/sources/DataSourceDigitalOcean.py @@ -9,7 +9,7 @@ import logging import cloudinit.sources.helpers.digitalocean as do_helper -from cloudinit import sources, util +from cloudinit import lifecycle, sources, util LOG = logging.getLogger(__name__) @@ -55,7 +55,7 @@ def _unpickle(self, ci_pkl_version: int) -> None: self._deprecate() def _deprecate(self): - util.deprecate( + lifecycle.deprecate( deprecated="DataSourceDigitalOcean", deprecated_version="23.2", extra_message="Deprecated in favour of DataSourceConfigDrive.", diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py index 89edd79f7ea..9b3ce8d9f83 100644 --- a/cloudinit/sources/DataSourceIBMCloud.py +++ b/cloudinit/sources/DataSourceIBMCloud.py @@ -96,6 +96,7 @@ import json import logging import os +from typing import Any, Callable, Dict, Optional, Tuple from cloudinit import atomic_helper, sources, subp, util from cloudinit.sources.helpers import openstack @@ -176,7 +177,7 @@ def network_config(self): # environment handles networking configuration. Not cloud-init. return {"config": "disabled", "version": 1} if self._network_config is None: - if self.network_json is not None: + if self.network_json not in (sources.UNSET, None): LOG.debug("network config provided via network_json") self._network_config = openstack.convert_net_json( self.network_json, known_macs=None @@ -186,7 +187,12 @@ def network_config(self): return self._network_config -def _read_system_uuid(): +def _read_system_uuid() -> Optional[str]: + """ + Read the system uuid. + + :return: the system uuid or None if not available. + """ uuid_path = "/sys/hypervisor/uuid" if not os.path.isfile(uuid_path): return None @@ -194,6 +200,11 @@ def _read_system_uuid(): def _is_xen(): + """ + Return boolean indicating if this is a xen hypervisor. + + :return: True if this is a xen hypervisor, False otherwise. + """ return os.path.exists("/proc/xen") @@ -201,7 +212,7 @@ def _is_ibm_provisioning( prov_cfg="/root/provisioningConfiguration.cfg", inst_log="/root/swinstall.log", boot_ref="/proc/1/environ", -): +) -> bool: """Return boolean indicating if this boot is ibm provisioning boot.""" if os.path.exists(prov_cfg): msg = "config '%s' exists." % prov_cfg @@ -229,7 +240,7 @@ def _is_ibm_provisioning( return result -def get_ibm_platform(): +def get_ibm_platform() -> Tuple[Optional[str], Optional[str]]: """Return a tuple (Platform, path) If this is Not IBM cloud, then the return value is (None, None). @@ -242,7 +253,7 @@ def get_ibm_platform(): return not_found # fslabels contains only the first entry with a given label. - fslabels = {} + fslabels: Dict[str, Dict] = {} try: devs = util.blkid() except subp.ProcessExecutionError as e: @@ -289,10 +300,10 @@ def get_ibm_platform(): return not_found -def read_md(): +def read_md() -> Optional[Dict[str, Any]]: """Read data from IBM Cloud. - @return: None if not running on IBM Cloud. + :return: None if not running on IBM Cloud. dictionary with guaranteed fields: metadata, version and optional fields: userdata, vendordata, networkdata. Also includes the system uuid from /sys/hypervisor/uuid.""" @@ -300,7 +311,7 @@ def read_md(): if platform is None: LOG.debug("This is not an IBMCloud platform.") return None - elif platform in PROVISIONING: + elif platform in PROVISIONING or path is None: LOG.debug("Cloud-init is disabled during provisioning: %s.", platform) return None @@ -325,71 +336,76 @@ def read_md(): return ret -def metadata_from_dir(source_dir): +def metadata_from_dir(source_dir: str) -> Dict[str, Any]: """Walk source_dir extracting standardized metadata. Certain metadata keys are renamed to present a standardized set of metadata keys. This function has a lot in common with ConfigDriveReader.read_v2 but - there are a number of inconsistencies, such key renames and as only - presenting a 'latest' version which make it an unlikely candidate to share + there are a number of inconsistencies, such as key renames and only + presenting a 'latest' version, which make it an unlikely candidate to share code. - @return: Dict containing translated metadata, userdata, vendordata, + :return: Dict containing translated metadata, userdata, vendordata, networkdata as present. """ - def opath(fname): + def opath(fname: str) -> str: return os.path.join("openstack", "latest", fname) - def load_json_bytes(blob): + def load_json_bytes(blob: bytes) -> Dict[str, Any]: + """ + Load JSON from a byte string. + + This technically could return a list or a str, but we are only + assuming a dict here. + + :param blob: The byte string to load JSON from. + :return: The loaded JSON object. + """ return json.loads(blob.decode("utf-8")) + def load_file(path: str, translator: Callable[[bytes], Any]) -> Any: + try: + raw = util.load_binary_file(path) + return translator(raw) + except IOError as e: + LOG.debug("Failed reading path '%s': %s", path, e) + return None + except Exception as e: + raise sources.BrokenMetadata(f"Failed decoding {path}: {e}") + files = [ # tuples of (results_name, path, translator) ("metadata_raw", opath("meta_data.json"), load_json_bytes), - ("userdata", opath("user_data"), None), + ("userdata", opath("user_data"), lambda x: x), ("vendordata", opath("vendor_data.json"), load_json_bytes), ("networkdata", opath("network_data.json"), load_json_bytes), ] - results = {} - for (name, path, transl) in files: - fpath = os.path.join(source_dir, path) - raw = None - try: - raw = util.load_binary_file(fpath) - except IOError as e: - LOG.debug("Failed reading path '%s': %s", fpath, e) - - if raw is None or transl is None: - data = raw - else: - try: - data = transl(raw) - except Exception as e: - raise sources.BrokenMetadata( - "Failed decoding %s: %s" % (path, e) - ) + results: Dict[str, Any] = {} - results[name] = data + for name, path, transl in files: + fpath = os.path.join(source_dir, path) + results[name] = load_file(fpath, transl) - if results.get("metadata_raw") is None: + if results["metadata_raw"] is None: raise sources.BrokenMetadata( - "%s missing required file 'meta_data.json'" % source_dir + f"{source_dir} missing required file 'meta_data.json'", ) results["metadata"] = {} md_raw = results["metadata_raw"] md = results["metadata"] + if "random_seed" in md_raw: try: md["random_seed"] = base64.b64decode(md_raw["random_seed"]) except (ValueError, TypeError) as e: raise sources.BrokenMetadata( - "Badly formatted metadata random_seed entry: %s" % e + f"Badly formatted metadata random_seed entry: {e}" ) renames = ( @@ -397,9 +413,10 @@ def load_json_bytes(blob): ("hostname", "local-hostname"), ("uuid", "instance-id"), ) - for mdname, newname in renames: - if mdname in md_raw: - md[newname] = md_raw[mdname] + + for old_key, new_key in renames: + if old_key in md_raw: + md[new_key] = md_raw[old_key] return results diff --git a/cloudinit/sources/DataSourceLXD.py b/cloudinit/sources/DataSourceLXD.py index 4f69d90eb70..43be28e0a15 100644 --- a/cloudinit/sources/DataSourceLXD.py +++ b/cloudinit/sources/DataSourceLXD.py @@ -210,8 +210,8 @@ def _get_data(self) -> bool: config = self._crawled_metadata.get("config", {}) user_metadata = config.get("user.meta-data", {}) if user_metadata: - user_metadata = _raw_instance_data_to_dict( - "user.meta-data", user_metadata + self.metadata.update( + _raw_instance_data_to_dict("user.meta-data", user_metadata) ) if "user-data" in self._crawled_metadata: self.userdata_raw = self._crawled_metadata["user-data"] diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index a7d3f3adfaa..23bb2663bf8 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -13,7 +13,7 @@ import os from functools import partial -from cloudinit import dmi, sources, util +from cloudinit import dmi, lifecycle, sources, util from cloudinit.net import eni LOG = logging.getLogger(__name__) @@ -36,8 +36,17 @@ def __init__(self, sys_cfg, distro, paths): self._network_eni = None def __str__(self): - root = sources.DataSource.__str__(self) - return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode) + """append seed and dsmode info when they contain non-default values""" + return ( + super().__str__() + + " " + + (f"[seed={self.seed}]" if self.seed else "") + + ( + f"[dsmode={self.dsmode}]" + if self.dsmode != sources.DSMODE_NETWORK + else "" + ) + ) def _get_devices(self, label): fslist = util.find_devs_with("TYPE=vfat") @@ -121,6 +130,12 @@ def _pp2d_callback(mp, data): label = self.ds_cfg.get("fs_label", "cidata") if label is not None: + if label.lower() != "cidata": + lifecycle.deprecate( + deprecated="Custom fs_label keys", + deprecated_version="24.3", + extra_message="This key isn't supported by ds-identify.", + ) for dev in self._get_devices(label): try: LOG.debug("Attempting to use data from %s", dev) @@ -167,7 +182,7 @@ def _pp2d_callback(mp, data): seedfound = proto break if not seedfound: - LOG.debug("Seed from %s not supported by %s", seedfrom, self) + self._log_unusable_seedfrom(seedfrom) return False # check and replace instances of known dmi. such as # chassis-serial-number or baseboard-product-name @@ -175,7 +190,7 @@ def _pp2d_callback(mp, data): # This could throw errors, but the user told us to do it # so if errors are raised, let them raise - (md_seed, ud, vd) = util.read_seeded(seedfrom, timeout=None) + md_seed, ud, vd, network = util.read_seeded(seedfrom, timeout=None) LOG.debug("Using seeded cache data from %s", seedfrom) # Values in the command line override those from the seed @@ -184,6 +199,7 @@ def _pp2d_callback(mp, data): ) mydata["user-data"] = ud mydata["vendor-data"] = vd + mydata["network-config"] = network found.append(seedfrom) # Now that we have exhausted any other places merge in the defaults @@ -215,6 +231,16 @@ def platform_type(self): self._platform_type = "lxd" if util.is_lxd() else "nocloud" return self._platform_type + def _log_unusable_seedfrom(self, seedfrom: str): + """Stage-specific level and message.""" + LOG.info( + "%s only uses seeds starting with %s - will try to use %s " + "in the network stage.", + self, + self.supported_seed_starts, + seedfrom, + ) + def _get_cloud_name(self): """Return unknown when 'cloud-name' key is absent from metadata.""" return sources.METADATA_UNKNOWN @@ -246,6 +272,13 @@ def check_instance_id(self, sys_cfg): def network_config(self): if self._network_config is None: if self._network_eni is not None: + lifecycle.deprecate( + deprecated="Eni network configuration in NoCloud", + deprecated_version="24.3", + extra_message=( + "You can use network v1 or network v2 instead" + ), + ) self._network_config = eni.convert_eni_data(self._network_eni) return self._network_config @@ -374,6 +407,15 @@ def __init__(self, sys_cfg, distro, paths): "ftps://", ) + def _log_unusable_seedfrom(self, seedfrom: str): + """Stage-specific level and message.""" + LOG.warning( + "%s only uses seeds starting with %s - %s is not valid.", + self, + self.supported_seed_starts, + seedfrom, + ) + def ds_detect(self): """Check dmi and kernel command line for dsname @@ -382,7 +424,7 @@ def ds_detect(self): For backwards compatiblity, check for that dsname. """ log_deprecated = partial( - util.deprecate, + lifecycle.deprecate, deprecated="The 'nocloud-net' datasource name", deprecated_version="24.1", extra_message=( @@ -408,6 +450,17 @@ def ds_detect(self): if serial == "nocloud-net": log_deprecated() return True + elif ( + self.sys_cfg.get("datasource", {}) + .get("NoCloud", {}) + .key("seedfrom") + ): + LOG.debug( + "Machine is configured by system configuration to run on " + "single datasource %s.", + self, + ) + return True return False @@ -428,7 +481,8 @@ def get_datasource_list(depends): logging.basicConfig(level=logging.DEBUG) seedfrom = argv[1] - md_seed, ud, vd = util.read_seeded(seedfrom) + md_seed, ud, vd, network = util.read_seeded(seedfrom) print(f"seeded: {md_seed}") print(f"ud: {ud}") print(f"vd: {vd}") + print(f"network: {network}") diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index bd12f636a34..89fc5de8d66 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -92,7 +92,7 @@ def _get_data(self): LOG.debug("Seed from %s not supported by %s", seedfrom, self) return False - (md_seed, ud, vd) = util.read_seeded(seedfrom, timeout=None) + (md_seed, ud, vd, _) = util.read_seeded(seedfrom, timeout=None) LOG.debug("Using seeded cache data from %s", seedfrom) md = util.mergemanydict([md, md_seed]) diff --git a/cloudinit/sources/DataSourceWSL.py b/cloudinit/sources/DataSourceWSL.py index b81298927a0..7a75ff4e691 100644 --- a/cloudinit/sources/DataSourceWSL.py +++ b/cloudinit/sources/DataSourceWSL.py @@ -328,9 +328,13 @@ def _get_data(self) -> bool: # provides them instead. # That's the reason for not using util.mergemanydict(). merged: dict = {} + user_tags: str = "" overridden_keys: typing.List[str] = [] if user_data: merged = user_data + user_tags = ( + merged.get("landscape", {}).get("client", {}).get("tags", "") + ) if agent_data: if user_data: LOG.debug("Merging both user_data and agent.yaml configs.") @@ -345,6 +349,13 @@ def _get_data(self) -> bool: ", ".join(overridden_keys) ) ) + if user_tags and merged.get("landscape", {}).get("client"): + LOG.debug( + "Landscape client conf updated with user-data" + " landscape.client.tags: %s", + user_tags, + ) + merged["landscape"]["client"]["tags"] = user_tags self.userdata_raw = "#cloud-config\n%s" % yaml.dump(merged) return True diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 27c37ee1e13..a3958d9b918 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -19,7 +19,7 @@ from enum import Enum, unique from typing import Any, Dict, List, Optional, Tuple, Union -from cloudinit import atomic_helper, dmi, importer, net, type_utils +from cloudinit import atomic_helper, dmi, importer, lifecycle, net, type_utils from cloudinit import user_data as ud from cloudinit import util from cloudinit.atomic_helper import write_json @@ -325,7 +325,7 @@ def __init__(self, sys_cfg, distro: Distro, paths: Paths, ud_proc=None): self.vendordata_raw = None self.vendordata2_raw = None self.metadata_address = None - self.network_json = UNSET + self.network_json: Optional[str] = UNSET self.ec2_metadata = UNSET self.ds_cfg = util.get_cfg_by_path( @@ -393,14 +393,13 @@ def override_ds_detect(self) -> bool: """ if self.dsname.lower() == parse_cmdline().lower(): LOG.debug( - "Machine is configured by the kernel command line to run on " - "single datasource %s.", + "Kernel command line set to use a single datasource %s.", self, ) return True elif self.sys_cfg.get("datasource_list", []) == [self.dsname]: LOG.debug( - "Machine is configured to run on single datasource %s.", self + "Datasource list set to use a single datasource %s.", self ) return True return False @@ -411,12 +410,12 @@ def _check_and_get_data(self): return self._get_data() elif self.ds_detect(): LOG.debug( - "Detected platform: %s. Checking for active instance data", + "Detected %s", self, ) return self._get_data() else: - LOG.debug("Datasource type %s is not detected.", self) + LOG.debug("Did not detect %s", self) return False def _get_standardized_metadata(self, instance_data): @@ -1231,7 +1230,7 @@ def parse_cmdline_or_dmi(input: str) -> str: deprecated = ds_parse_1 or ds_parse_2 if deprecated: dsname = deprecated.group(1).strip() - util.deprecate( + lifecycle.deprecate( deprecated=( f"Defining the datasource on the command line using " f"ci.ds={dsname} or " diff --git a/cloudinit/sources/azure/errors.py b/cloudinit/sources/azure/errors.py index 851a9b6f956..2f715e0c4c7 100644 --- a/cloudinit/sources/azure/errors.py +++ b/cloudinit/sources/azure/errors.py @@ -13,7 +13,7 @@ import requests -from cloudinit import version +from cloudinit import subp, version from cloudinit.sources.azure import identity from cloudinit.url_helper import UrlError @@ -195,3 +195,20 @@ def __init__(self, exception: Exception) -> None: self.supporting_data["exception"] = repr(exception) self.supporting_data["traceback_base64"] = trace_base64 + + +class ReportableErrorProxyAgentNotFound(ReportableError): + def __init__(self) -> None: + super().__init__( + "Unable to activate Azure Guest Proxy Agent." + "azure-proxy-agent not found" + ) + + +class ReportableErrorProxyAgentStatusFailure(ReportableError): + def __init__(self, exception: subp.ProcessExecutionError) -> None: + super().__init__("azure-proxy-agent status failure") + + self.supporting_data["exit_code"] = exception.exit_code + self.supporting_data["stdout"] = exception.stdout + self.supporting_data["stderr"] = exception.stderr diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 70998dda2ee..9b46a22c37d 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -578,8 +578,8 @@ def convert_net_json(network_json=None, known_macs=None): "scope", "dns_nameservers", "dns_search", - "routes", ], + "routes": ["network", "destination", "netmask", "gateway", "metric"], } links = network_json.get("links", []) @@ -620,6 +620,20 @@ def convert_net_json(network_json=None, known_macs=None): (k, v) for k, v in network.items() if k in valid_keys["subnet"] ) + # Filter the route entries as they may contain extra elements such + # as DNS which are required elsewhere by the cloudinit schema + routes = [ + dict( + (k, v) + for k, v in route.items() + if k in valid_keys["routes"] + ) + for route in network.get("routes", []) + ] + + if routes: + subnet.update({"routes": routes}) + if network["type"] == "ipv4_dhcp": subnet.update({"type": "dhcp4"}) elif network["type"] == "ipv6_dhcp": @@ -646,11 +660,22 @@ def convert_net_json(network_json=None, known_macs=None): } ) + # Look for either subnet or network specific DNS servers + # and add them as subnet level DNS entries. + # Subnet specific nameservers dns_nameservers = [ service["address"] - for service in network.get("services", []) + for route in network.get("routes", []) + for service in route.get("services", []) if service.get("type") == "dns" ] + # Network specific nameservers + for service in network.get("services", []): + if service.get("type") != "dns": + continue + if service["address"] in dns_nameservers: + continue + dns_nameservers.append(service["address"]) if dns_nameservers: subnet["dns_nameservers"] = dns_nameservers diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py index b07214a228b..254518af9e3 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_nic.py +++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py @@ -207,7 +207,7 @@ def gen_ipv6(self, name, nic): """ if not nic.staticIpv6: - return ([], []) + return ([{"type": "dhcp6"}], []) subnet_list = [] # Static Ipv6 diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index cad85d596b8..70002086738 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -12,7 +12,7 @@ from contextlib import suppress from typing import List, Sequence, Tuple -from cloudinit import subp, util +from cloudinit import lifecycle, subp, util LOG = logging.getLogger(__name__) @@ -671,7 +671,7 @@ def get_opensshd_upstream_version(): upstream_version = "9.0" full_version = get_opensshd_version() if full_version is None: - return util.Version.from_str(upstream_version) + return lifecycle.Version.from_str(upstream_version) if "p" in full_version: upstream_version = full_version[: full_version.find("p")] elif " " in full_version: @@ -679,7 +679,7 @@ def get_opensshd_upstream_version(): else: upstream_version = full_version try: - upstream_version = util.Version.from_str(upstream_version) + upstream_version = lifecycle.Version.from_str(upstream_version) return upstream_version except (ValueError, TypeError): LOG.warning("Could not parse sshd version: %s", upstream_version) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 52876e72434..1d911aaf3ac 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -11,6 +11,7 @@ import sys from collections import namedtuple from contextlib import suppress +from pathlib import Path from typing import Dict, Iterable, List, Optional, Set, Tuple, Union from cloudinit import ( @@ -21,6 +22,7 @@ handlers, helpers, importer, + lifecycle, net, sources, type_utils, @@ -406,6 +408,7 @@ def _get_data_source(self, existing) -> sources.DataSource: ds, ) else: + util.del_file(self.paths.instance_link) raise e self.datasource = ds # Ensure we adjust our path members datasource @@ -458,8 +461,13 @@ def _reflect_cur_instance(self): # Remove the old symlink and attach a new one so # that further reads/writes connect into the right location idir = self._get_ipath() - util.del_file(self.paths.instance_link) - util.sym_link(idir, self.paths.instance_link) + destination = Path(self.paths.instance_link).resolve().absolute() + already_instancified = destination == Path(idir).absolute() + if already_instancified: + LOG.info("Instance link already exists, not recreating it.") + else: + util.del_file(self.paths.instance_link) + util.sym_link(idir, self.paths.instance_link) # Ensures these dirs exist dir_list = [] @@ -498,10 +506,16 @@ def _reflect_cur_instance(self): ) self._write_to_cache() - # Ensure needed components are regenerated - # after change of instance which may cause - # change of configuration - self._reset() + if already_instancified and previous_ds == ds: + LOG.info( + "Not re-loading configuration, instance " + "id and datasource have not changed." + ) + # Ensure needed components are regenerated + # after change of instance which may cause + # change of configuration + else: + self._reset() return iid def previous_iid(self): @@ -901,7 +915,7 @@ def _consume_vendordata(self, vendor_source, frequency=PER_INSTANCE): return if isinstance(enabled, str): - util.deprecate( + lifecycle.deprecate( deprecated=f"Use of string '{enabled}' for " "'vendor_data:enabled' field", deprecated_version="23.1", diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index d409e322858..eb2442993b5 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -739,7 +739,7 @@ def read_url_handle_exceptions( time_taken = int(time.monotonic() - start_time) max_wait_str = "%ss" % max_wait if max_wait else "unlimited" status_msg = "Calling '%s' failed [%s/%s]: %s" % ( - url or getattr(url_exc, "url", "url ? None"), + url or getattr(url_exc, "url", "url"), time_taken, max_wait_str, reason, diff --git a/cloudinit/util.py b/cloudinit/util.py index 98dd66d59fc..34d3623a7f7 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -12,7 +12,6 @@ import contextlib import copy as obj_copy import email -import functools import glob import grp import gzip @@ -38,9 +37,11 @@ from collections import deque, namedtuple from contextlib import contextmanager, suppress from errno import ENOENT -from functools import lru_cache, total_ordering +from functools import lru_cache from pathlib import Path +from types import ModuleType from typing import ( + IO, TYPE_CHECKING, Any, Callable, @@ -49,11 +50,11 @@ Generator, List, Mapping, - NamedTuple, Optional, Sequence, TypeVar, Union, + cast, ) from urllib import parse @@ -62,7 +63,6 @@ from cloudinit import ( features, importer, - log, mergers, net, settings, @@ -91,11 +91,6 @@ FALSE_STRINGS = ("off", "0", "no", "false") -class DeprecationLog(NamedTuple): - log_level: int - message: str - - def kernel_version(): return tuple(map(int, os.uname().release.split(".")[:2])) @@ -190,6 +185,7 @@ class SeLinuxGuard: def __init__(self, path, recursive=False): # Late import since it might not always # be possible to use this + self.selinux: Optional[ModuleType] try: self.selinux = importer.import_module("selinux") except ImportError: @@ -495,6 +491,12 @@ def multi_log( @lru_cache() def is_Linux(): + """deprecated: prefer Distro object's `is_linux` property + + Multiple sources of truth is bad, and already know whether we are + working with Linux from the Distro class. Using Distro offers greater code + reusablity, cleaner code, and easier maintenance. + """ return "Linux" in platform.system() @@ -630,7 +632,7 @@ def get_linux_distro(): dist = ("", "", "") try: # Was removed in 3.8 - dist = platform.dist() # pylint: disable=W1505,E1101 + dist = platform.dist() # type: ignore # pylint: disable=W1505,E1101 except Exception: pass finally: @@ -656,6 +658,7 @@ def _get_variant(info): if linux_dist in ( "almalinux", "alpine", + "aosc", "arch", "azurelinux", "centos", @@ -834,7 +837,9 @@ def set_subprocess_umask_and_gid(): stdin=subprocess.PIPE, preexec_fn=set_subprocess_umask_and_gid, ) - new_fp = proc.stdin + # As stdin is PIPE, then proc.stdin is IO[bytes] + # https://docs.python.org/3/library/subprocess.html#subprocess.Popen.stdin + new_fp = cast(IO[Any], proc.stdin) else: raise TypeError("Invalid type for output format: %s" % outfmt) @@ -861,7 +866,9 @@ def set_subprocess_umask_and_gid(): stdin=subprocess.PIPE, preexec_fn=set_subprocess_umask_and_gid, ) - new_fp = proc.stdin + # As stdin is PIPE, then proc.stdin is IO[bytes] + # https://docs.python.org/3/library/subprocess.html#subprocess.Popen.stdin + new_fp = cast(IO[Any], proc.stdin) else: raise TypeError("Invalid type for error format: %s" % errfmt) @@ -962,10 +969,11 @@ def read_optional_seed(fill, base="", ext="", timeout=5): 'meta-data' entries """ try: - md, ud, vd = read_seeded(base=base, ext=ext, timeout=timeout) + md, ud, vd, network = read_seeded(base=base, ext=ext, timeout=timeout) fill["user-data"] = ud fill["vendor-data"] = vd fill["meta-data"] = md + fill["network-config"] = md return True except url_helper.UrlError as e: if e.code == url_helper.NOT_FOUND: @@ -1051,6 +1059,7 @@ def read_seeded(base="", ext="", timeout=5, retries=10): ud_url = base.replace("%s", "user-data" + ext) vd_url = base.replace("%s", "vendor-data" + ext) md_url = base.replace("%s", "meta-data" + ext) + network_url = base.replace("%s", "network-config" + ext) else: if features.NOCLOUD_SEED_URL_APPEND_FORWARD_SLASH: if base[-1] != "/" and parse.urlparse(base).query == "": @@ -1059,12 +1068,23 @@ def read_seeded(base="", ext="", timeout=5, retries=10): ud_url = "%s%s%s" % (base, "user-data", ext) vd_url = "%s%s%s" % (base, "vendor-data", ext) md_url = "%s%s%s" % (base, "meta-data", ext) + network_url = "%s%s%s" % (base, "network-config", ext) + network = None + try: + network_resp = url_helper.read_file_or_url( + network_url, timeout=timeout, retries=retries + ) + except url_helper.UrlError as e: + LOG.debug("No network config provided: %s", e) + else: + if network_resp.ok(): + network = load_yaml(network_resp.contents) md_resp = url_helper.read_file_or_url( md_url, timeout=timeout, retries=retries ) md = None if md_resp.ok(): - md = load_yaml(decode_binary(md_resp.contents), default={}) + md = load_yaml(md_resp.contents, default={}) ud_resp = url_helper.read_file_or_url( ud_url, timeout=timeout, retries=retries @@ -1086,7 +1106,7 @@ def read_seeded(base="", ext="", timeout=5, retries=10): else: LOG.debug("Error in vendor-data response") - return (md, ud, vd) + return md, ud, vd, network def read_conf_d(confd, *, instance_data_file=None) -> dict: @@ -1695,8 +1715,8 @@ def chownbyname(fname, user=None, group=None): # output: "| logger -p" # error: "> /dev/null" # this returns the specific 'mode' entry, cleanly formatted, with value -def get_output_cfg(cfg, mode): - ret = [None, None] +def get_output_cfg(cfg, mode) -> List[Optional[str]]: + ret: List[Optional[str]] = [None, None] if not cfg or "output" not in cfg: return ret @@ -1735,10 +1755,10 @@ def get_output_cfg(cfg, mode): ret[1] = ret[0] swlist = [">>", ">", "|"] - for i in range(len(ret)): - if not ret[i]: + for i, r in enumerate(ret): + if not r: continue - val = ret[i].lstrip() + val = r.lstrip() found = False for s in swlist: if val.startswith(s): @@ -1758,7 +1778,7 @@ def get_config_logfiles(cfg): @param cfg: The cloud-init merged configuration dictionary. """ - logs = [] + logs: List = [] rotated_logs = [] if not cfg or not isinstance(cfg, dict): return logs @@ -1918,21 +1938,23 @@ def mounts(): out = subp.subp("mount") mount_locs = out.stdout.splitlines() method = "mount" - mountre = r"^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$" + mountre = re.compile(r"^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$") for mpline in mount_locs: # Linux: /dev/sda1 on /boot type ext4 (rw,relatime,data=ordered) # FreeBSD: /dev/vtbd0p2 on / (ufs, local, journaled soft-updates) - try: - if method == "proc": - (dev, mp, fstype, opts, _freq, _passno) = mpline.split() - else: - m = re.search(mountre, mpline) - dev = m.group(1) - mp = m.group(2) - fstype = m.group(3) - opts = m.group(4) - except Exception: - continue + if method == "proc": + words = mpline.split() + if len(words) != 6: + continue + (dev, mp, fstype, opts, _freq, _passno) = words + else: + m = mountre.search(mpline) + if m is None or len(m.groups()) < 4: + continue + dev = m.group(1) + mp = m.group(2) + fstype = m.group(3) + opts = m.group(4) # If the name of the mount point contains spaces these # can be escaped as '\040', so undo that.. mp = mp.replace("\\040", " ") @@ -2444,26 +2466,27 @@ def is_lxd(): return os.path.exists("/dev/lxd/sock") -def get_proc_env(pid, encoding="utf-8", errors="replace"): +def get_proc_env( + pid, encoding: str = "utf-8", errors: str = "replace" +) -> Dict[str, str]: """ Return the environment in a dict that a given process id was started with. - @param encoding: if true, then decoding will be done with - .decode(encoding, errors) and text will be returned. - if false then binary will be returned. - @param errors: only used if encoding is true.""" + @param encoding: decoding will be done with .decode(encoding, errors) and + text will be returned. + @param errors: passed through .decode(encoding, errors). + """ fn = os.path.join("/proc", str(pid), "environ") + contents: Union[str, bytes] try: contents = load_binary_file(fn) except (IOError, OSError): return {} env = {} - null, equal = (b"\x00", b"=") - if encoding: - null, equal = ("\x00", "=") - contents = contents.decode(encoding, errors) + null, equal = ("\x00", "=") + contents = contents.decode(encoding, errors) for tok in contents.split(null): if not tok: @@ -2528,7 +2551,7 @@ def parse_mount_info(path, mountinfo_lines, log=LOG, get_mnt_opts=False): devpth = None fs_type = None match_mount_point = None - match_mount_point_elements = None + match_mount_point_elements: Optional[List[str]] = None for i, line in enumerate(mountinfo_lines): parts = line.split() @@ -2667,7 +2690,7 @@ def parse_mount(path, get_mnt_opts=False): devpth = None mount_point = None match_mount_point = None - match_mount_point_elements = None + match_mount_point_elements: Optional[List[str]] = None for line in mountoutput.splitlines(): m = re.search(regex, line) if not m: @@ -3116,204 +3139,6 @@ def error(msg, rc=1, fmt="Error:\n{}", sys_exit=False): return rc -@total_ordering -class Version(namedtuple("Version", ["major", "minor", "patch", "rev"])): - """A class for comparing versions. - - Implemented as a named tuple with all ordering methods. Comparisons - between X.Y.N and X.Y always treats the more specific number as larger. - - :param major: the most significant number in a version - :param minor: next greatest significant number after major - :param patch: next greatest significant number after minor - :param rev: the least significant number in a version - - :raises TypeError: If invalid arguments are given. - :raises ValueError: If invalid arguments are given. - - Examples: - >>> Version(2, 9) == Version.from_str("2.9") - True - >>> Version(2, 9, 1) > Version.from_str("2.9.1") - False - >>> Version(3, 10) > Version.from_str("3.9.9.9") - True - >>> Version(3, 7) >= Version.from_str("3.7") - True - - """ - - def __new__( - cls, major: int = -1, minor: int = -1, patch: int = -1, rev: int = -1 - ) -> "Version": - """Default of -1 allows us to tiebreak in favor of the most specific - number""" - return super(Version, cls).__new__(cls, major, minor, patch, rev) - - @classmethod - def from_str(cls, version: str) -> "Version": - """Create a Version object from a string. - - :param version: A period-delimited version string, max 4 segments. - - :raises TypeError: Raised if invalid arguments are given. - :raises ValueError: Raised if invalid arguments are given. - - :return: A Version object. - """ - return cls(*(list(map(int, version.split("."))))) - - def __gt__(self, other): - return 1 == self._compare_version(other) - - def __eq__(self, other): - return ( - self.major == other.major - and self.minor == other.minor - and self.patch == other.patch - and self.rev == other.rev - ) - - def __iter__(self): - """Iterate over the version (drop sentinels)""" - for n in (self.major, self.minor, self.patch, self.rev): - if n != -1: - yield str(n) - else: - break - - def __str__(self): - return ".".join(self) - - def __hash__(self): - return hash(str(self)) - - def _compare_version(self, other: "Version") -> int: - """Compare this Version to another. - - :param other: A Version object. - - :return: -1 if self > other, 1 if self < other, else 0 - """ - if self == other: - return 0 - if self.major > other.major: - return 1 - if self.minor > other.minor: - return 1 - if self.patch > other.patch: - return 1 - if self.rev > other.rev: - return 1 - return -1 - - -def should_log_deprecation(version: str, boundary_version: str) -> bool: - """Determine if a deprecation message should be logged. - - :param version: The version in which the thing was deprecated. - :param boundary_version: The version at which deprecation level is logged. - - :return: True if the message should be logged, else False. - """ - return boundary_version == "devel" or Version.from_str( - version - ) <= Version.from_str(boundary_version) - - -def deprecate( - *, - deprecated: str, - deprecated_version: str, - extra_message: Optional[str] = None, - schedule: int = 5, - skip_log: bool = False, -) -> DeprecationLog: - """Mark a "thing" as deprecated. Deduplicated deprecations are - logged. - - @param deprecated: Noun to be deprecated. Write this as the start - of a sentence, with no period. Version and extra message will - be appended. - @param deprecated_version: The version in which the thing was - deprecated - @param extra_message: A remedy for the user's problem. A good - message will be actionable and specific (i.e., don't use a - generic "Use updated key." if the user used a deprecated key). - End the string with a period. - @param schedule: Manually set the deprecation schedule. Defaults to - 5 years. Leave a comment explaining your reason for deviation if - setting this value. - @param skip_log: Return log text rather than logging it. Useful for - running prior to logging setup. - @return: NamedTuple containing log level and log message - DeprecationLog(level: int, message: str) - - Note: uses keyword-only arguments to improve legibility - """ - if not hasattr(deprecate, "log"): - setattr(deprecate, "log", set()) - message = extra_message or "" - dedup = hash(deprecated + message + deprecated_version + str(schedule)) - version = Version.from_str(deprecated_version) - version_removed = Version(version.major + schedule, version.minor) - deprecate_msg = ( - f"{deprecated} is deprecated in " - f"{deprecated_version} and scheduled to be removed in " - f"{version_removed}. {message}" - ).rstrip() - if not should_log_deprecation( - deprecated_version, features.DEPRECATION_INFO_BOUNDARY - ): - level = logging.INFO - elif hasattr(LOG, "deprecated"): - level = log.DEPRECATED - else: - level = logging.WARN - log_cache = getattr(deprecate, "log") - if not skip_log and dedup not in log_cache: - log_cache.add(dedup) - LOG.log(level, deprecate_msg) - return DeprecationLog(level, deprecate_msg) - - -def deprecate_call( - *, deprecated_version: str, extra_message: str, schedule: int = 5 -): - """Mark a "thing" as deprecated. Deduplicated deprecations are - logged. - - @param deprecated_version: The version in which the thing was - deprecated - @param extra_message: A remedy for the user's problem. A good - message will be actionable and specific (i.e., don't use a - generic "Use updated key." if the user used a deprecated key). - End the string with a period. - @param schedule: Manually set the deprecation schedule. Defaults to - 5 years. Leave a comment explaining your reason for deviation if - setting this value. - - Note: uses keyword-only arguments to improve legibility - """ - - def wrapper(func): - @functools.wraps(func) - def decorator(*args, **kwargs): - # don't log message multiple times - out = func(*args, **kwargs) - deprecate( - deprecated_version=deprecated_version, - deprecated=func.__name__, - extra_message=extra_message, - schedule=schedule, - ) - return out - - return decorator - - return wrapper - - def read_hotplug_enabled_file(paths: "Paths") -> dict: content: dict = {"scopes": []} try: diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index 68175cd0ad9..bc3e6067ec4 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -11,7 +11,7 @@ "netbsd": "NetBSD", "openbsd": "openBSD", "openmandriva": "OpenMandriva admin", "photon": "PhotonOS", "ubuntu": "Ubuntu", "unknown": "Ubuntu"}) %} -{% set groups = ({"alpine": "adm, wheel", "arch": "wheel, users", +{% set groups = ({"alpine": "adm, wheel", "aosc": "wheel", "arch": "wheel, users", "azurelinux": "wheel", "debian": "adm, audio, cdrom, dialout, dip, floppy, netdev, plugdev, sudo, video", "gentoo": "users, wheel", "mariner": "wheel", @@ -62,7 +62,7 @@ disable_root: true "openmandriva", "photon", "TencentOS"] or is_rhel %} {% if is_rhel %} -mount_default_fields: [~, ~, 'auto', 'defaults,nofail,x-systemd.after=cloud-init.service,_netdev', '0', '2'] +mount_default_fields: [~, ~, 'auto', 'defaults,nofail,x-systemd.after=cloud-init-network.service,_netdev', '0', '2'] {% else %} mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2'] {% endif %} @@ -220,7 +220,7 @@ cloud_final_modules: # (not accessible to handlers/transforms) system_info: # This will affect which distro class gets used -{% if variant in ["alpine", "amazon", "arch", "azurelinux", "debian", "fedora", +{% if variant in ["alpine", "amazon", "aosc", "arch", "azurelinux", "debian", "fedora", "freebsd", "gentoo", "mariner", "netbsd", "openbsd", "OpenCloudOS", "openeuler", "openmandriva", "photon", "suse", "TencentOS", "ubuntu"] or is_rhel %} @@ -238,7 +238,7 @@ system_info: {% else %} name: {{ variant }} {% endif %} -{% if variant in ["alpine", "amazon", "arch", "azurelinux", "debian", "fedora", +{% if variant in ["alpine", "amazon", "aosc", "arch", "azurelinux", "debian", "fedora", "gentoo", "mariner", "OpenCloudOS", "openeuler", "openmandriva", "photon", "suse", "TencentOS", "ubuntu", "unknown"] @@ -320,7 +320,7 @@ system_info: # Automatically discover the best ntp_client ntp_client: auto {% endif %} -{% if variant in ["alpine", "amazon", "arch", "azurelinux", "debian", "fedora", +{% if variant in ["alpine", "amazon", "aosc", "arch", "azurelinux", "debian", "fedora", "gentoo", "mariner", "OpenCloudOS", "openeuler", "openmandriva", "photon", "suse", "TencentOS", "ubuntu", "unknown"] @@ -368,7 +368,7 @@ system_info: {% endif %} {% if variant in ["debian", "ubuntu", "unknown"] %} ssh_svcname: ssh -{% elif variant in ["alpine", "amazon", "arch", "azurelinux", "fedora", +{% elif variant in ["alpine", "amazon", "aosc", "arch", "azurelinux", "fedora", "gentoo", "mariner", "OpenCloudOS", "openeuler", "openmandriva", "photon", "suse", "TencentOS"] or is_rhel %} diff --git a/debian/changelog b/debian/changelog index 8b19df48b80..e8e0255e54a 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,12 @@ +cloud-init (24.2-0ubuntu1~22.04.2) UNRELEASED; urgency=medium + + * Upstream snapshot based on upstream/main at f93a6b5a. + * refresh patches: + - d/p/cli-retain-file-argument-as-main-cmd-arg.patch + - d/p/revert-551f560d-cloud-config-after-snap-seeding.patch + + -- James Falcon Tue, 06 Aug 2024 12:36:24 -0500 + cloud-init (24.2-0ubuntu1~22.04.1) jammy; urgency=medium * d/control: remove netifaces due to GH-4634 diff --git a/debian/patches/cli-retain-file-argument-as-main-cmd-arg.patch b/debian/patches/cli-retain-file-argument-as-main-cmd-arg.patch index 98a3f1cd6aa..62ae666bec4 100644 --- a/debian/patches/cli-retain-file-argument-as-main-cmd-arg.patch +++ b/debian/patches/cli-retain-file-argument-as-main-cmd-arg.patch @@ -9,7 +9,7 @@ Bug: https://bugs.launchpad.net/ubuntu/+source/cloud-init/+bug/2064300 Last-Update: 2024-04-30 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py -@@ -121,6 +121,11 @@ def extract_fns(args): +@@ -147,6 +147,11 @@ def extract_fns(args): # since it would of broke if it couldn't have # read that file already... fn_cfgs = [] @@ -21,7 +21,7 @@ Last-Update: 2024-04-30 /etc/hosts diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt index 43b344184d4..fd95b3a81fb 100644 --- a/doc/examples/cloud-config-datasources.txt +++ b/doc/examples/cloud-config-datasources.txt @@ -34,14 +34,11 @@ datasource: # seedfrom: http://my.example.com/i-abcde/ seedfrom: None - # fs_label: the label on filesystems to be searched for NoCloud source - fs_label: cidata - # these are optional, but allow you to basically provide a datasource # right here user-data: | # This is the user-data verbatim - meta-data: + meta-data: | instance-id: i-87018aed local-hostname: myhost.internal diff --git a/doc/examples/cloud-config-yum-repo.txt b/doc/examples/cloud-config-yum-repo.txt index 6a4037e2462..cee26677b49 100644 --- a/doc/examples/cloud-config-yum-repo.txt +++ b/doc/examples/cloud-config-yum-repo.txt @@ -11,9 +11,10 @@ yum_repos: # Any repository configuration options # See: man yum.conf # - # At least one of 'baseurl' or 'metalink' is required! + # At least one of 'baseurl' or 'metalink' or 'mirrorlist' is required! baseurl: http://download.fedoraproject.org/pub/epel/testing/5/$basearch metalink: https://mirrors.fedoraproject.org/metalink?repo=epel-$releasever&arch=$basearch&infra=$infra&content=$contentdir + mirrorlist: https://mirrors.fedoraproject.org/metalink?repo=fedora-$releasever& enabled: false failovermethod: priority gpgcheck: true diff --git a/doc/examples/part-handler.txt b/doc/examples/part-handler.txt index 7cc356f6346..020c8302f80 100644 --- a/doc/examples/part-handler.txt +++ b/doc/examples/part-handler.txt @@ -1,22 +1,58 @@ #part-handler +"""This is a trivial example part-handler that creates a file with the path +specified in the payload. It performs no input checking or error handling. + +To use it, first save the file you are currently viewing into your current +working directory. Then run the following: +``` +$ echo '/var/tmp/my_path' > part +$ cloud-init devel make-mime -a part-handler.py:part-handler -a part:x-my-path --force > user-data +``` + +This will create a mime file with the contents of 'part' and the +part-handler. You can now pass 'user-data' to your cloud of choice. + +When run, cloud-init will have created an empty file at /var/tmp/my_path. +""" + +import pathlib +from typing import Any + +from cloudinit.cloud import Cloud + + def list_types(): - # return a list of mime-types that are handled by this module - return(["text/plain", "text/go-cubs-go"]) - -def handle_part(data, ctype, filename, payload): - # data: the cloudinit object - # ctype: '__begin__', '__end__', or the specific mime-type of the part - # filename: the filename for the part, or dynamically generated part if - # no filename is given attribute is present - # payload: the content of the part (empty for begin or end) + """Return a list of mime-types that are handled by this module.""" + return ["text/x-my-path"] + + +def handle_part(data: Cloud, ctype: str, filename: str, payload: Any): + """Handle a part with the given mime-type. + + This function will get called multiple times. The first time is + to allow any initial setup needed to handle parts. It will then get + called once for each part matching the mime-type returned by `list_types`. + Finally, it will get called one last time to allow for any final + teardown. + + :data: A `Cloud` instance. This will be the same instance for each call + to handle_part. + :ctype: '__begin__', '__end__', or the mime-type + (for this example 'text/x-my-path') of the part + :filename: The filename for the part as defined in the MIME archive, + or dynamically generated part if no filename is given + :payload: The content of the part. This will be + `None` when `ctype` is '__begin__' or '__end__'. + """ if ctype == "__begin__": - print("my handler is beginning") - return + # Any custom setup needed before handling payloads + return + if ctype == "__end__": - print("my handler is ending") - return + # Any custom teardown needed after handling payloads can happen here + return - print(f"==== received ctype={ctype} filename={filename} ====") - print(payload) - print(f"==== end ctype={ctype} filename={filename}") + # If we've made it here, we're dealing with a real payload, so handle + # it appropriately + pathlib.Path(payload.strip()).touch() diff --git a/doc/module-docs/cc_mounts/data.yaml b/doc/module-docs/cc_mounts/data.yaml index 751b301d501..18193f062d3 100644 --- a/doc/module-docs/cc_mounts/data.yaml +++ b/doc/module-docs/cc_mounts/data.yaml @@ -18,7 +18,7 @@ cc_mounts: .. code-block:: yaml mounts: - - ["ephemeral0", "/mnt", "auto", "defaults,nofail,x-systemd.after=cloud-init.service", "0", "2"] + - ["ephemeral0", "/mnt", "auto", "defaults,nofail,x-systemd.after=cloud-init-network.service", "0", "2"] - ["swap", "none", "swap", "sw", "0", "0"] In order to remove a previously-listed mount, an entry can be added to the @@ -32,7 +32,7 @@ cc_mounts: .. code-block:: yaml - mount_default_fields: [none, none, "auto", "defaults,nofail,x-systemd.after=cloud-init.service", "0", "2"] + mount_default_fields: [none, none, "auto", "defaults,nofail,x-systemd.after=cloud-init-network.service", "0", "2"] Non-systemd init systems will vary in ``mount_default_fields``. diff --git a/doc/module-docs/cc_runcmd/example1.yaml b/doc/module-docs/cc_runcmd/example1.yaml index 03812f926e9..8c5efee96ab 100644 --- a/doc/module-docs/cc_runcmd/example1.yaml +++ b/doc/module-docs/cc_runcmd/example1.yaml @@ -4,4 +4,3 @@ runcmd: - [sh, -xc, 'echo $(date) '': hello world!'''] - [sh, -c, echo "=========hello world'========="] - ls -l /root -- [wget, 'http://example.org', -O, /tmp/index.html] diff --git a/doc/module-docs/cc_write_files/data.yaml b/doc/module-docs/cc_write_files/data.yaml index c59b8e2ea75..3d4b04da492 100644 --- a/doc/module-docs/cc_write_files/data.yaml +++ b/doc/module-docs/cc_write_files/data.yaml @@ -3,8 +3,9 @@ cc_write_files: Write out arbitrary content to files, optionally setting permissions. Parent folders in the path are created if absent. Content can be specified in plain text or binary. Data encoded with either base64 or binary gzip - data can be specified and will be decoded before being written. For empty - file creation, content can be omitted. + data can be specified and will be decoded before being written. Data can + also be loaded from an arbitrary URI. For empty file creation, content can + be omitted. .. note:: If multi-line data is provided, care should be taken to ensure it @@ -36,5 +37,10 @@ cc_write_files: Example 5: Defer writing the file until after the package (Nginx) is installed and its user is created. file: cc_write_files/example5.yaml + - comment: > + Example 6: Retrieve file contents from a URI source, rather than inline. + Especially useful with an external config-management repo, or for large + binaries. + file: cc_write_files/example6.yaml name: Write Files title: Write arbitrary files diff --git a/doc/module-docs/cc_write_files/example6.yaml b/doc/module-docs/cc_write_files/example6.yaml new file mode 100644 index 00000000000..40112a58e17 --- /dev/null +++ b/doc/module-docs/cc_write_files/example6.yaml @@ -0,0 +1,9 @@ +#cloud-config +write_files: +- source: + uri: https://gitlab.example.com/some_ci_job/artifacts/hello + headers: + Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ== + User-Agent: cloud-init on myserver.example.com + path: /usr/bin/hello + permissions: '0755' diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py index 1ca6a85a208..cfa1f63df63 100644 --- a/doc/rtd/conf.py +++ b/doc/rtd/conf.py @@ -44,6 +44,7 @@ "sphinx.ext.autosectionlabel", "sphinx.ext.viewcode", "sphinxcontrib.datatemplates", + "sphinxcontrib.mermaid", "sphinxcontrib.spelling", ] @@ -55,6 +56,7 @@ templates_path = ["templates"] # Uses case-independent spelling matches from doc/rtd/spelling_word_list.txt spelling_filters = ["spelling.WordListFilter"] +spelling_word_list_filename = "spelling_word_list.txt" # The suffix of source filenames. source_suffix = ".rst" diff --git a/doc/rtd/development/datasource_creation.rst b/doc/rtd/development/datasource_creation.rst index 98f9f88419a..1b6e525b122 100644 --- a/doc/rtd/development/datasource_creation.rst +++ b/doc/rtd/development/datasource_creation.rst @@ -170,6 +170,8 @@ Datasources included in upstream cloud-init benefit from ongoing maintenance, compatibility with the rest of the codebase, and security fixes by the upstream development team. +If this is not possible, one can add +:ref:`custom out-of-tree datasources` to cloud-init. .. _make-mime: https://cloudinit.readthedocs.io/en/latest/explanation/instancedata.html#storage-locations .. _DMI: https://www.dmtf.org/sites/default/files/standards/documents/DSP0005.pdf diff --git a/doc/rtd/development/integration_tests.rst b/doc/rtd/development/integration_tests.rst index 5fe5845dd4b..aecb0224455 100644 --- a/doc/rtd/development/integration_tests.rst +++ b/doc/rtd/development/integration_tests.rst @@ -27,30 +27,169 @@ Test execution ============== Test execution happens via ``pytest``. A ``tox`` definition exists to run -integration tests. To run all integration tests, you would run: +integration tests. When using this, normal ``pytest`` arguments can be +passed to the ``tox`` command by appending them after the ``--``. See the +following commands for examples. -.. code-block:: bash +.. tab-set:: - $ tox -e integration-tests + .. tab-item:: All integration tests -``pytest`` arguments may also be passed. For example: + .. code-block:: bash + + tox -e integration-tests + + .. tab-item:: Tests inside file or directory + + .. code-block:: bash + + tox -e integration-tests tests/integration_tests/modules/test_combined.py + + .. tab-item:: A specific test + + .. code-block:: bash + + tox -e integration-tests tests/integration_tests/modules/test_combined.py::test_bootcmd -.. code-block:: bash - $ tox -e integration-tests tests/integration_tests/modules/test_combined.py Configuration ============= All possible configuration values are defined in -`tests/integration_tests/integration_settings.py`_. Defaults can be overridden -by supplying values in :file:`tests/integration_tests/user_settings.py` or by +`tests/integration_tests/integration_settings.py`_. Look in this file for +the full list of variables that are available and for context on what each +variable does and what the default values are. +Defaults can be overriden by supplying values in +:file:`tests/integration_tests/user_settings.py` or by providing an environment variable of the same name prepended with ``CLOUD_INIT_``. For example, to set the ``PLATFORM`` setting: .. code-block:: bash - CLOUD_INIT_PLATFORM='ec2' pytest tests/integration_tests/ + CLOUD_INIT_PLATFORM='ec2' tox -e integration_tests -- tests/integration_tests/ + + +Common integration test run configurations +========================================== + + +Keep instance after test run +------------------------------- + +By default, the test instance is torn down after the test run. To keep +the instance running after the test run, set the ``KEEP_INSTANCE`` variable +to ``True``. + +.. tab-set:: + + .. tab-item:: Inline environment variable + + .. code-block:: bash + + CLOUD_INIT_KEEP_INSTANCE=True tox -e integration_tests + + .. tab-item:: user_settings.py file + + .. code-block:: python + + KEEP_INSTANCE = True + + +Use in-place cloud-init source code +------------------------------------- + +The simplest way to test an integraton test using your current cloud-init +changes is to set the ``CLOUD_INIT_SOURCE`` to ``IN_PLACE``. This works ONLY +on LXD containers. This will mount the source code as-is directly into +the container to override the pre-existing cloud-init code within the +container. This won't work for non-local LXD remotes and won't run any +installation code since the source code is mounted directly. + +.. tab-set:: + + .. tab-item:: Inline environment variable + + .. code-block:: bash + + CLOUD_INIT_CLOUD_INIT_SOURCE=IN_PLACE tox -e integration_tests + + .. tab-item:: user_settings.py file + + .. code-block:: python + + CLOUD_INIT_SOURCE = 'IN_PLACE' + + +Collecting logs after test run +------------------------------- + +By default, logs are collected only when a test fails, by running ``cloud-init +collect-logs`` on the instance. To collect logs after every test run, set the +``COLLECT_LOGS`` variable to ``ALWAYS``. + +By default, the logs are collected to the ``/tmp/cloud_init_test_logs`` +directory. To change the directory, set the ``LOCAL_LOG_PATH`` variable to +the desired path. + +.. tab-set:: + + .. tab-item:: Inline environment variable + + .. code-block:: bash + + CLOUD_INIT_COLLECT_LOGS=ALWAYS CLOUD_INIT_LOCAL_LOG_PATH=/tmp/your-local-directory tox -e integration_tests + + .. tab-item:: user_settings.py file + + .. code-block:: python + + COLLECT_LOGS = "ALWAYS" + LOCAL_LOG_PATH = "/tmp/logs" + + +Advanced test reporting and profiling +------------------------------------- + +For advanced test reporting, set the ``INCLUDE_COVERAGE`` variable to ``True``. +This will generate a coverage report for the integration test run, and the +report will be stored in an ``html`` directory inside the directory specified +by ``LOCAL_LOG_PATH``. + +.. tab-set:: + + .. tab-item:: Inline environment variable + + .. code-block:: bash + + CLOUD_INIT_INCLUDE_COVERAGE=True tox -e integration_tests + + .. tab-item:: user_settings.py file + + .. code-block:: python + + INCLUDE_COVERAGE = True + + +Addtionally, for profiling the integration tests, set the ``INCLUDE_PROFILE`` +variable to ``True``. This will generate a profile report for the integration +test run, and the report will be stored in the directory specified by +``LOCAL_LOG_PATH``. + +.. tab-set:: + + .. tab-item:: Inline environment variable + + .. code-block:: bash + + CLOUD_INIT_INCLUDE_PROFILE=True tox -e integration_tests + + .. tab-item:: user_settings.py file + + .. code-block:: python + + INCLUDE_PROFILE = True + Cloud interaction ================= @@ -65,6 +204,39 @@ For a minimal setup using LXD, write the following to [lxd] + +For more information on configuring pycloudlib, see the +`pycloudlib configuration documentation`_. + +To specify a specific cloud to test against, first, ensure that your pycloudlib +configuration is set up correctly. Then, modify the ``PLATFORM`` variable to be +on of: + +- ``azure``: Microsoft Azure +- ``ec2``: Amazon EC2 +- ``gce``: Google Compute Engine +- ``ibm``: IBM Cloud +- ``lxd_container``: LXD container +- ``lxd_vm``: LXD VM +- ``oci``: Oracle Cloud Infrastructure +- ``openstack``: OpenStack +- ``qemu``: QEMU + +.. tab-set:: + + .. tab-item:: Inline environment variable + + .. code-block:: bash + + CLOUD_INIT_PLATFORM='lxd_container' tox -e integration_tests + + .. tab-item:: user_settings.py file + + .. code-block:: python + + PLATFORM = 'lxd_container' + + Image selection =============== @@ -87,14 +259,32 @@ tests against the image in question. If it's a RHEL8 image, then we would expect Ubuntu-specific tests to fail (and vice versa). To address this, a full image specification can be given. This is of -the form: ``[::[::]]`` where ``image_id`` is a +the form: ``[::::::]`` where ``image_id`` is a cloud's image ID, ``os`` is the OS name, and ``release`` is the OS -release name. So, for example, Ubuntu 18.04 (Bionic Beaver) on LXD is -``ubuntu:bionic::ubuntu::bionic`` or RHEL8 on Amazon is +release name. So, for example, Ubuntu 24.04 LTS (Noble Numbat) on LXD is +``ubuntu:noble::ubuntu::noble::24.04`` or RHEL8 on Amazon is ``ami-justanexample::rhel::8``. When a full specification is given, only tests which are intended for use on that OS and release will be executed. +To run integration tests on a specific image, modify the ``OS_IMAGE`` +variable to be the desired image specification. + +.. tab-set:: + + .. tab-item:: Inline environment variable + + .. code-block:: bash + + CLOUD_INIT_OS_IMAGE='jammy' tox -e integration_tests + + .. tab-item:: user_settings.py file + + .. code-block:: python + + OS_IMAGE = 'jammy' + + Image setup =========== @@ -108,6 +298,29 @@ via fixture. Image setup roughly follows these steps: * Take a snapshot of the instance to be used as a new image from which new instances can be launched. + +Keep image after test run +-------------------------- + +By default, the image created during the test run is torn down after +the test run. If further debugging is needed, you can keep the image snapshot +for further use by setting the ``KEEP_IMAGE`` variable to ``True``. + +.. tab-set:: + + .. tab-item:: Inline environment variable + + .. code-block:: bash + + CLOUD_INIT_KEEP_IMAGE=True tox -e integration_tests + + .. tab-item:: user_settings.py file + + .. code-block:: python + + KEEP_IMAGE = True + + Test setup ========== @@ -155,7 +368,7 @@ The ``client`` fixture should be used for most test cases. It ensures: ``module_client`` and ``class_client`` fixtures also exist for the purpose of running multiple tests against a single launched instance. They provide the exact same functionality as ``client``, but are -scoped to the module or class respectively. +scoped to the module or class respectively.ci ``session_cloud`` ----------------- @@ -213,3 +426,4 @@ Customizing the launch arguments before launching an instance manually: .. _first be configured: https://pycloudlib.readthedocs.io/en/latest/configuration.html#configuration .. _Pytest marks: https://github.com/canonical/cloud-init/blob/af7eb1deab12c7208853c5d18b55228e0ba29c4d/tests/integration_tests/conftest.py#L220-L224 .. _IntegrationCloud: https://github.com/canonical/cloud-init/blob/af7eb1deab12c7208853c5d18b55228e0ba29c4d/tests/integration_tests/clouds.py#L102 +.. _pycloudlib configuration documentation: https://pycloudlib.readthedocs.io/en/latest/configuration.html diff --git a/doc/rtd/development/module_creation.rst b/doc/rtd/development/module_creation.rst index 32240ab3e91..3e10a1ee00b 100644 --- a/doc/rtd/development/module_creation.rst +++ b/doc/rtd/development/module_creation.rst @@ -163,6 +163,17 @@ in the correct location based on dependencies. If your module has no particular dependencies or is not necessary for a later boot stage, it should be placed in the ``cloud_final_modules`` section before the ``final-message`` module. +Benefits of including your config module in upstream cloud-init +=============================================================== + +Config modules included in upstream cloud-init benefit from ongoing +maintenance, +compatibility with the rest of the codebase, and security fixes by the upstream +development team. + +If this is not possible, one can add +:ref:`custom out-of-tree config modules` +to cloud-init. .. _MetaSchema: https://github.com/canonical/cloud-init/blob/3bcffacb216d683241cf955e4f7f3e89431c1491/cloudinit/config/schema.py#L58 .. _OSFAMILIES: https://github.com/canonical/cloud-init/blob/3bcffacb216d683241cf955e4f7f3e89431c1491/cloudinit/distros/__init__.py#L35 diff --git a/doc/rtd/explanation/boot.rst b/doc/rtd/explanation/boot.rst index 6aff2856b66..ff3b65ebd28 100644 --- a/doc/rtd/explanation/boot.rst +++ b/doc/rtd/explanation/boot.rst @@ -3,13 +3,42 @@ Boot stages *********** -There are five stages to boot: +There are five stages to boot which are run seqentially: ``Detect``, ``Local``, +``Network``, ``Config`` and ``Final`` -1. Detect -2. Local -3. Network -4. Config -5. Final +Visual representation of cloud-init boot stages with respect to network config +and system accessibility: + +.. mermaid:: + + graph TB + + D["Detect"] ---> L + + L --> NU([Network up]) + L & NU --> N + subgraph L["Local"] + FI[Fetch IMDS] + end + + N --> NO([Network online]) + N & NO --> C + N --> S([SSH]) + N --> Login([Login]) + + subgraph N["Network"] + cloud_init_modules + end + %% cloud_config_modules + + subgraph C["Config"] + cloud_config_modules + end + + C --> F + subgraph F["Final"] + cloud_final_modules + end .. _boot-Detect: @@ -79,11 +108,11 @@ Network ======= +------------------+----------------------------------------------------------+ -| systemd service | ``cloud-init.service`` | +| systemd service | ``cloud-init-network.service`` | +---------+--------+----------------------------------------------------------+ | runs | after local stage and configured networking is up | +---------+--------+----------------------------------------------------------+ -| blocks | as much of remaining boot as possible | +| blocks | majority of remaining boot (e.g. SSH and console login) | +---------+--------+----------------------------------------------------------+ | modules | *cloud_init_modules* in ``/etc/cloud/cloud.cfg`` | +---------+--------+----------------------------------------------------------+ @@ -108,9 +137,12 @@ mounted, including ones that have stale (previous instance) references in :file:`/etc/fstab`. As such, entries in :file:`/etc/fstab` other than those necessary for cloud-init to run should not be done until after this stage. -A part-handler and :ref:`boothooks` +A part-handler and :ref:`boothooks` will run at this stage. +After this stage completes, expect to be able to access the system via serial +console login or SSH. + .. _boot-Config: Config diff --git a/doc/rtd/explanation/format.rst b/doc/rtd/explanation/format.rst index c1eda9006d9..bed2b61af11 100644 --- a/doc/rtd/explanation/format.rst +++ b/doc/rtd/explanation/format.rst @@ -3,18 +3,53 @@ User data formats ***************** -User data is opaque configuration data provided by a platform to an instance at -launch configure the instance. User data can be one of the following types. +User data is configuration data provided by a user of a cloud platform to an +instance at launch. User data can be passed to cloud-init in any of many +formats documented here. + +Configuration types +=================== + +User data formats can be categorized into those that directly configure the +instance, and those that serve as a container, template, or means to obtain +or modify another configuration. + +Formats that directly configure the instance: + +- `Cloud config data`_ +- `User data script`_ +- `Cloud boothook`_ + +Formats that deal with other user data formats: + +- `Include file`_ +- `Jinja template`_ +- `MIME multi-part archive`_ +- `Cloud config archive`_ +- `Part handler`_ +- `Gzip compressed content`_ .. _user_data_formats-cloud_config: Cloud config data ================= -Cloud-config is the preferred user data format. The cloud config format is a -declarative syntax which uses `YAML version 1.1`_ with keys which describe -desired instance state. Cloud-config can be used to define how an instance -should be configured in a human-friendly format. +Example +------- + +.. code-block:: yaml + + #cloud-config + password: password + chpasswd: + expire: False + +Explanation +----------- + +Cloud-config can be used to define how an instance should be configured +in a human-friendly format. The cloud config format uses `YAML`_ with +keys which describe desired instance state. These things may include: @@ -24,93 +59,190 @@ These things may include: - importing certain SSH keys or host keys - *and many more...* -See the :ref:`yaml_examples` section for a commented set of examples of -supported cloud config formats. +Many modules are available to process cloud-config data. These modules +may run once per instance, every boot, or once ever. See the associated +module to determine the run frequency. -Begins with: ``#cloud-config`` or ``Content-Type: text/cloud-config`` when -using a MIME archive. - -.. note:: - Cloud config data can also render cloud instance metadata variables using - :ref:`jinja templates `. +For more information, see the cloud config +:ref:`example configurations ` or the cloud config +:ref:`modules reference`. .. _user_data_script: User data script ================ -Typically used by those who just want to execute a shell script. +Example +------- + +.. code-block:: shell + + #!/bin/sh + echo "Hello World" > /var/tmp/output.txt + +Explanation +----------- + +A user data script is a single script to be executed once per instance. +User data scripts are run relatively late in the boot process, during +cloud-init's :ref:`final stage` as part of the +:ref:`cc_scripts_user` module. When run, +the environment variable ``INSTANCE_ID`` is set to the current instance ID +for use within the script. -Begins with: ``#!`` or ``Content-Type: text/x-shellscript`` when using a MIME -archive. +.. _user_data_formats-cloud_boothook: -User data scripts can optionally render cloud instance metadata variables using -:ref:`jinja templates `. +Cloud boothook +============== -Example script +Simple Example -------------- -Create a script file :file:`myscript.sh` that contains the following: +.. code-block:: shell -.. code-block:: + #cloud-boothook + #!/bin/sh + echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts + +Example of once-per-instance script +----------------------------------- +.. code-block:: bash + + #cloud-boothook #!/bin/sh - echo "Hello World. The time is now $(date -R)!" | tee /root/output.txt -Now run: + PERSIST_ID=/var/lib/cloud/first-instance-id + _id="" + if [ -r $PERSIST_ID ]; then + _id=$(cat /var/lib/cloud/first-instance-id) + fi -.. code-block:: shell-session + if [ -z $_id ] || [ $INSTANCE_ID != $_id ]; then + echo 192.168.1.130 us.archive.ubuntu.com >> /etc/hosts + fi + sudo echo $INSTANCE_ID > $PERSIST_ID - $ euca-run-instances --key mykey --user-data-file myscript.sh ami-a07d95c9 +Explanation +----------- -Kernel command line -=================== +A cloud boothook is similar to a :ref:`user data script` +in that it is a script run on boot. When run, +the environment variable ``INSTANCE_ID`` is set to the current instance ID +for use within the script. -When using the NoCloud datasource, users can pass user data via the kernel -command line parameters. See the :ref:`NoCloud datasource` -and :ref:`explanation/kernel-command-line:Kernel command line` documentation -for more details. +The boothook is different in that: -Gzip compressed content -======================= +* It is run very early in boot, during the :ref:`network` stage, + before any cloud-init modules are run. +* It is run on every boot -Content found to be gzip compressed will be uncompressed. -The uncompressed data will then be used as if it were not compressed. -This is typically useful because user data is limited to ~16384 [#]_ bytes. +Include file +============ + +Example +------- + +.. code-block:: text + + #include + https://raw.githubusercontent.com/canonical/cloud-init/403f70b930e3ce0f05b9b6f0e1a38d383d058b53/doc/examples/cloud-config-run-cmds.txt + https://raw.githubusercontent.com/canonical/cloud-init/403f70b930e3ce0f05b9b6f0e1a38d383d058b53/doc/examples/cloud-config-boot-cmds.txt + +Explanation +----------- + +An include file contains a list of URLs, one per line. Each of the URLs will +be read and their content can be any kind of user data format, both base +config and meta config. If an error occurs reading a file the remaining files +will not be read. + +Jinja template +============== + +Example cloud-config +-------------------- + +.. code-block:: yaml + + ## template: jinja + #cloud-config + runcmd: + - echo 'Running on {{ v1.cloud_name }}' > /var/tmp/cloud_name + +Example user data script +------------------------ + +.. code-block:: shell + + ## template: jinja + #!/bin/sh + echo 'Current instance id: {{ v1.instance_id }}' > /var/tmp/instance_id + +Explanation +----------- + +`Jinja templating `_ may be used for +cloud-config and user data scripts. Any +:ref:`instance-data variables` may be used +as jinja template variables. Any jinja templated configuration must contain +the original header along with the new jinja header above it. + +.. note:: + Use of Jinja templates is ONLY supported for cloud-config and user data + scripts. Jinja templates are not supported for cloud-boothooks or + meta configs. + +.. _user_data_formats-mime_archive: MIME multi-part archive ======================= -This list of rules is applied to each part of this multi-part file. +Example +------- + +.. code-block:: + + Content-Type: multipart/mixed; boundary="===============2389165605550749110==" + MIME-Version: 1.0 + Number-Attachments: 2 + + --===============2389165605550749110== + Content-Type: text/cloud-boothook; charset="us-ascii" + MIME-Version: 1.0 + Content-Transfer-Encoding: 7bit + Content-Disposition: attachment; filename="part-001" + + #!/bin/sh + echo "this is from a boothook." > /var/tmp/boothook.txt + + --===============2389165605550749110== + Content-Type: text/cloud-config; charset="us-ascii" + MIME-Version: 1.0 + Content-Transfer-Encoding: 7bit + Content-Disposition: attachment; filename="part-002" + + bootcmd: + - echo "this is from a cloud-config." > /var/tmp/bootcmd.txt + --===============2389165605550749110==-- + +Explanation +----------- + Using a MIME multi-part file, the user can specify more than one type of data. For example, both a user data script and a cloud-config type could be specified. -Supported content-types are listed from the ``cloud-init`` subcommand +Each part must specify a valid +:ref:`content types`. Supported content-types +may also be listed from the ``cloud-init`` subcommand :command:`make-mime`: .. code-block:: shell-session $ cloud-init devel make-mime --list-types -Example output: - -.. code-block:: - - cloud-boothook - cloud-config - cloud-config-archive - cloud-config-jsonp - jinja2 - part-handler - x-include-once-url - x-include-url - x-shellscript - x-shellscript-per-boot - x-shellscript-per-instance - x-shellscript-per-once - Helper subcommand to generate MIME messages ------------------------------------------- @@ -121,8 +253,7 @@ The :command:`make-mime` subcommand takes pairs of (filename, "text/" mime subtype) separated by a colon (e.g., ``config.yaml:cloud-config``) and emits a MIME multipart message to :file:`stdout`. -Examples --------- +**MIME subcommand Examples** Create user data containing both a cloud-config (:file:`config.yaml`) and a shell script (:file:`script.sh`) @@ -141,66 +272,55 @@ Create user data containing 3 shell scripts: $ cloud-init devel make-mime -a always.sh:x-shellscript-per-boot -a instance.sh:x-shellscript-per-instance -a once.sh:x-shellscript-per-once -``include`` file -================ - -This content is an :file:`include` file. -The file contains a list of URLs, one per line. Each of the URLs will be read -and their content will be passed through this same set of rules, i.e., the -content read from the URL can be gzipped, MIME multi-part, or plain text. If -an error occurs reading a file the remaining files will not be read. +Cloud config archive +==================== -Begins with: ``#include`` or ``Content-Type: text/x-include-url`` when using -a MIME archive. +Example +------- -``cloud-boothook`` -================== +.. code-block:: shell -This content is `boothook` data. It is stored in a file under -:file:`/var/lib/cloud` and executed immediately. This is the earliest `hook` -available. Note, that there is no mechanism provided for running only once. The -`boothook` must take care of this itself. + #cloud-config-archive + - type: "text/cloud-boothook" + content: | + #!/bin/sh + echo "this is from a boothook." > /var/tmp/boothook.txt + - type: "text/cloud-config" + content: | + bootcmd: + - echo "this is from a cloud-config." > /var/tmp/bootcmd.txt -It is provided with the instance id in the environment variable -``INSTANCE_ID``. This could be made use of to provide a 'once-per-instance' -type of functionality. +Explanation +----------- -Begins with: ``#cloud-boothook`` or ``Content-Type: text/cloud-boothook`` when -using a MIME archive. +A cloud-config-archive is a way to specify more than one type of data +using YAML. Since building a MIME multipart archive can be somewhat unwieldly +to build by hand or requires using a cloud-init helper utility, the +cloud-config-archive provides a simpler alternative to building the MIME +multi-part archive for those that would prefer to use YAML. -Part-handler -============ +The format is a list of dictionaries. -This is a `part-handler`: It contains custom code for either supporting new -mime-types in multi-part user data, or overriding the existing handlers for -supported mime-types. It will be written to a file in -:file:`/var/lib/cloud/data` based on its filename (which is generated). +Required fields: -This must be Python code that contains a ``list_types`` function and a -``handle_part`` function. Once the section is read the ``list_types`` method -will be called. It must return a list of mime-types that this `part-handler` -handles. Since MIME parts are processed in order, a `part-handler` part -must precede any parts with mime-types it is expected to handle in the same -user data. +* ``type``: The :ref:`Content-Type` + identifier for the type of user data in content +* ``content``: The user data configuration -The ``handle_part`` function must be defined like: +Optional fields: -.. code-block:: python +* ``launch-index``: The EC2 Launch-Index (if applicable) +* ``filename``: This field is only used if using a user data format that + requires a filename in a MIME part. This is unrelated to any local system + file. - def handle_part(data, ctype, filename, payload): - # data = the cloudinit object - # ctype = "__begin__", "__end__", or the mime-type of the part that is being handled. - # filename = the filename of the part (or a generated filename if none is present in mime data) - # payload = the parts' content +All other fields will be interpreted as a MIME part header. -``Cloud-init`` will then call the ``handle_part`` function once before it -handles any parts, once per part received, and once after all parts have been -handled. The ``'__begin__'`` and ``'__end__'`` sentinels allow the part -handler to do initialisation or teardown before or after receiving any parts. +.. _user_data_formats-part_handler: -Begins with: ``#part-handler`` or ``Content-Type: text/part-handler`` when -using a MIME archive. +Part handler +============ Example ------- @@ -209,17 +329,63 @@ Example :language: python :linenos: -Also, `this blog post`_ offers another example for more advanced usage. -Disabling user data -=================== +Explanation +----------- + +A part handler contains custom code for either supporting new +mime-types in multi-part user data or for overriding the existing handlers for +supported mime-types. + +See the :ref:`custom part handler` reference documentation +for details on writing custom handlers along with an annotated example. + +`This blog post`_ offers another example for more advanced usage. + +Gzip compressed content +======================= + +Content found to be gzip compressed will be uncompressed. +The uncompressed data will then be used as if it were not compressed. +This is typically useful because user data size may be limited based on +cloud platform. + +.. _user_data_formats-content_types: + +Headers and content types +========================= + +In order for cloud-init to recognize which user data format is being used, +the user data must contain a header. Additionally, if the user data +is being passed as a multi-part message, such as MIME, cloud-config-archive, +or part-handler, the content-type for each part must also be set +appropriately. + +The table below lists the headers and content types for each user data format. +Note that gzip compressed content is not represented here as it gets passed +as binary data and so may be processed automatically. + ++--------------------+-----------------------------+-------------------------+ +|User data format |Header |Content-Type | ++====================+=============================+=========================+ +|Cloud config data |#cloud-config |text/cloud-config | ++--------------------+-----------------------------+-------------------------+ +|User data script |#! |text/x-shellscript | ++--------------------+-----------------------------+-------------------------+ +|Cloud boothook |#cloud-boothook |text/cloud-boothook | ++--------------------+-----------------------------+-------------------------+ +|MIME multi-part |Content-Type: multipart/mixed|multipart/mixed | ++--------------------+-----------------------------+-------------------------+ +|Cloud config archive|#cloud-config-archive |text/cloud-config-archive| ++--------------------+-----------------------------+-------------------------+ +|Jinja template |## template: jinja |text/jinja | ++--------------------+-----------------------------+-------------------------+ +|Include file |#include |text/x-include-url | ++--------------------+-----------------------------+-------------------------+ +|Part handler |#part-handler |text/part-handler | ++--------------------+-----------------------------+-------------------------+ -``Cloud-init`` can be configured to ignore any user data provided to instance. -This allows custom images to prevent users from accidentally breaking closed -appliances. Setting ``allow_userdata: false`` in the configuration will disable -``cloud-init`` from processing user data. .. _make-mime: https://github.com/canonical/cloud-init/blob/main/cloudinit/cmd/devel/make_mime.py -.. _YAML version 1.1: https://yaml.org/spec/1.1/current.html -.. [#] See your cloud provider for applicable user-data size limitations... -.. _this blog post: http://foss-boss.blogspot.com/2011/01/advanced-cloud-init-custom-handlers.html +.. _YAML: https://yaml.org/spec/1.1/current.html +.. _This blog post: http://foss-boss.blogspot.com/2011/01/advanced-cloud-init-custom-handlers.html diff --git a/doc/rtd/explanation/instancedata.rst b/doc/rtd/explanation/instancedata.rst index 650efa79452..d2aadc083ee 100644 --- a/doc/rtd/explanation/instancedata.rst +++ b/doc/rtd/explanation/instancedata.rst @@ -165,7 +165,10 @@ Storage locations unredacted JSON blob. * :file:`/run/cloud-init/combined-cloud-config.json`: root-readable unredacted JSON blob. Any meta-data, vendor-data and user-data overrides - are applied to the :file:`/run/cloud-init/combined-cloud-config.json` config values. + are applied to the :file:`/run/cloud-init/combined-cloud-config.json` config + values. + +.. _instance_metadata-keys: :file:`instance-data.json` top level keys ----------------------------------------- diff --git a/doc/rtd/explanation/vendordata.rst b/doc/rtd/explanation/vendordata.rst index 621fcdeb3d9..a2340c2fab9 100644 --- a/doc/rtd/explanation/vendordata.rst +++ b/doc/rtd/explanation/vendordata.rst @@ -20,19 +20,7 @@ caveats: required for the instance to run, then vendor data should not be used. 4. User-supplied cloud-config is merged over cloud-config from vendor data. -Users providing cloud-config data can use the ``#cloud-config-jsonp`` method -to more finely control their modifications to the vendor-supplied -cloud-config. For example, if both vendor and user have provided ``runcmd`` -then the default merge handler will cause the user's ``runcmd`` to override -the one provided by the vendor. To append to ``runcmd``, the user could better -provide multi-part input with a ``cloud-config-jsonp`` part like: - -.. code:: yaml - - #cloud-config-jsonp - [{ "op": "add", "path": "/runcmd", "value": ["my", "command", "here"]}] - -Further, we strongly advise vendors to not "be evil". By evil, we mean any +Further, we strongly advise vendors to ensure you protect against any action that could compromise a system. Since users trust you, please take care to make sure that any vendor data is safe, atomic, idempotent and does not put your users at risk. diff --git a/doc/rtd/howto/debugging.rst b/doc/rtd/howto/debugging.rst index c8b2a2634bc..546e8dd9f45 100644 --- a/doc/rtd/howto/debugging.rst +++ b/doc/rtd/howto/debugging.rst @@ -55,7 +55,7 @@ Cloud-init did not run .. code-block:: - systemctl status cloud-init-local.service cloud-init.service\ + systemctl status cloud-init-local.service cloud-init-network.service\ cloud-config.service cloud-final.service Cloud-init may have started to run, but not completed. This shows how many, diff --git a/doc/rtd/howto/run_cloud_init_locally.rst b/doc/rtd/howto/run_cloud_init_locally.rst index 0111bc1da42..2510eadd067 100644 --- a/doc/rtd/howto/run_cloud_init_locally.rst +++ b/doc/rtd/howto/run_cloud_init_locally.rst @@ -70,6 +70,12 @@ Download an Ubuntu image to run: wget https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img +.. note:: + This example uses emulated CPU instructions on non-x86 hosts, so it may be + slow. To make it faster on non-x86 architectures, one can change the image + type and :spelling:ignore:`qemu-system-` command name to match the + architecture of your host machine. + Boot the image with the ISO attached ------------------------------------ @@ -78,8 +84,9 @@ Boot the cloud image with our configuration, :file:`seed.img`, to QEMU: .. code-block:: shell-session $ qemu-system-x86_64 -m 1024 -net nic -net user \ - -hda jammy-server-cloudimg-amd64.img \ - -hdb seed.img + -drive file=jammy-server-cloudimg-amd64.img,index=0,format=qcow2,media=disk \ + -drive file=seed.img,index=1,media=cdrom \ + -machine accel=kvm:tcg The now-booted image will allow for login using the password provided above. diff --git a/doc/rtd/reference/base_config_reference.rst b/doc/rtd/reference/base_config_reference.rst index 9686d456d11..2d13675e68c 100644 --- a/doc/rtd/reference/base_config_reference.rst +++ b/doc/rtd/reference/base_config_reference.rst @@ -28,6 +28,8 @@ distribution supported by ``cloud-init``. Base configuration keys ======================= +.. _base_config_module_keys: + Module keys ----------- @@ -221,6 +223,8 @@ Other keys The :ref:`network configuration` to be applied to this instance. +.. _base_config_datasource_pkg_list: + ``datasource_pkg_list`` ^^^^^^^^^^^^^^^^^^^^^^^ @@ -263,6 +267,14 @@ Format is a dict with ``enabled`` and ``prefix`` keys: ``vendor_data``. * ``prefix``: A path to prepend to any ``vendor_data``-provided script. +``allow_userdata`` +^^^^^^^^^^^^^^^^^^ + +A boolean value to disable the use of user data. +This allows custom images to prevent users from accidentally breaking closed +appliances. Setting ``allow_userdata: false`` in the configuration will disable +``cloud-init`` from processing user data. + ``manual_cache_clean`` ^^^^^^^^^^^^^^^^^^^^^^ diff --git a/doc/rtd/reference/cli.rst b/doc/rtd/reference/cli.rst index 0a6bc55ff1f..eb800b22a75 100644 --- a/doc/rtd/reference/cli.rst +++ b/doc/rtd/reference/cli.rst @@ -83,30 +83,8 @@ re-run all stages as it did on first boot. .. note:: - Cloud-init provides the directory :file:`/etc/cloud/clean.d/` for third party - applications which need additional configuration artifact cleanup from - the filesystem when the `clean` command is invoked. - - The :command:`clean` operation is typically performed by image creators - when preparing a golden image for clone and redeployment. The clean command - removes any cloud-init semaphores, allowing cloud-init to treat the next - boot of this image as the "first boot". When the image is next booted - cloud-init will performing all initial configuration based on any valid - datasource meta-data and user-data. - - Any executable scripts in this subdirectory will be invoked in lexicographical - order with run-parts when running the :command:`clean` command. - - Typical format of such scripts would be a ##- like the following: - :file:`/etc/cloud/clean.d/99-live-installer` - - An example of a script is: - - .. code-block:: bash - - sudo rm -rf /var/lib/installer_imgs/ - sudo rm -rf /var/log/installer/ - + The operations performed by `clean` can be supplemented / customized. See: + :ref:`custom_clean_scripts`. .. _cli_collect_logs: @@ -212,9 +190,10 @@ Example output: Generally run by OS init systems to execute ``cloud-init``'s stages: *init* and *init-local*. See :ref:`boot_stages` for more info. -Can be run on the command line, but is generally gated to run only once -due to semaphores in :file:`/var/lib/cloud/instance/sem/` and -:file:`/var/lib/cloud/sem`. +Can be run on the command line, but is deprecated, because incomplete +configuration can be applied when run later in boot. The boot stages are +generally gated to run only once due to semaphores in +:file:`/var/lib/cloud/instance/sem/` and :file:`/var/lib/cloud/sem`. * :command:`--local`: Run *init-local* stage instead of *init*. * :command:`--file` : Use additional yaml configuration files. @@ -226,16 +205,19 @@ due to semaphores in :file:`/var/lib/cloud/instance/sem/` and Generally run by OS init systems to execute ``modules:config`` and ``modules:final`` boot stages. This executes cloud config :ref:`modules` -configured to run in the Init, Config and Final stages. The modules are -declared to run in various boot stages in the file +configured to run in the Init, Config and Final stages. Can be run on the +command line, but this is not recommended and will generate a warning because +incomplete configuration can be applied when run later in boot. +The modules are declared to run in various boot stages in the file :file:`/etc/cloud/cloud.cfg` under keys: * ``cloud_init_modules`` * ``cloud_config_modules`` * ``cloud_final_modules`` -Can be run on the command line, but each module is gated to run only once due -to semaphores in :file:`/var/lib/cloud/`. +Can be run on the command line, but is deprecated, because incomplete +configuration can be applied when run later in boot. Each module is gated to +run only once due to semaphores in :file:`/var/lib/cloud/`. * :command:`--mode [init|config|final]`: Run ``modules:init``, ``modules:config`` or ``modules:final`` ``cloud-init`` stages. diff --git a/doc/rtd/reference/custom_modules.rst b/doc/rtd/reference/custom_modules.rst new file mode 100644 index 00000000000..4ce423dd52b --- /dev/null +++ b/doc/rtd/reference/custom_modules.rst @@ -0,0 +1,18 @@ +Custom Modules +************** + +This includes reference documentation on how to extend cloud-init with +custom / out-of-tree functionality. + +.. _custom_formats: + +----- + +.. toctree:: + :maxdepth: 1 + + custom_modules/custom_clean_scripts.rst + custom_modules/custom_configuration_module.rst + custom_modules/custom_datasource.rst + custom_modules/custom_mergers.rst + custom_modules/custom_part_handlers.rst diff --git a/doc/rtd/reference/custom_modules/custom_clean_scripts.rst b/doc/rtd/reference/custom_modules/custom_clean_scripts.rst new file mode 100644 index 00000000000..955668fb266 --- /dev/null +++ b/doc/rtd/reference/custom_modules/custom_clean_scripts.rst @@ -0,0 +1,25 @@ +.. _custom_clean_scripts: + +Custom Clean Scripts +******************** + +Cloud-init provides the directory :file:`/etc/cloud/clean.d/` for third party +applications which need additional configuration artifact cleanup from +the filesystem when the :ref:`cloud-init clean` command is invoked. + +The :command:`clean` operation is typically performed by image creators +when preparing a golden image for clone and redeployment. The clean command +removes any cloud-init internal state, allowing cloud-init to treat the next +boot of this image as the "first boot". +Any executable scripts in this subdirectory will be invoked in lexicographical +order when running the :command:`clean` command. + +Example +======= + +.. code-block:: bash + + $ cat /etc/cloud/clean.d/99-live-installer + #!/bin/sh + sudo rm -rf /var/lib/installer_imgs/ + sudo rm -rf /var/log/installer/ diff --git a/doc/rtd/reference/custom_modules/custom_configuration_module.rst b/doc/rtd/reference/custom_modules/custom_configuration_module.rst new file mode 100644 index 00000000000..a26adf26eb4 --- /dev/null +++ b/doc/rtd/reference/custom_modules/custom_configuration_module.rst @@ -0,0 +1,23 @@ +.. _custom_configuration_module: + +Custom Configuration Module +*************************** + +Custom 3rd-party out-of-tree configuration modules can be added to cloud-init +by: + +#. :ref:`Implement a config module` in a Python file with its + name starting with ``cc_``. + +#. Place the file where the rest of config modules are located. + On Ubuntu this path is typically: + `/usr/lib/python3/dist-packages/cloudinit/config/`. + +#. Extend the :ref:`base-configuration's ` + ``cloud_init_modules``, ``cloud_config_modules`` or ``cloud_final_modules`` + to let the config module run on one of those stages. + +.. warning :: + The config jsonschema validation functionality is going to complain about + unknown config keys introduced by custom modules and there is not an easy + way for custom modules to define their keys schema-wise. diff --git a/doc/rtd/reference/custom_modules/custom_datasource.rst b/doc/rtd/reference/custom_modules/custom_datasource.rst new file mode 100644 index 00000000000..2d5aa6c8463 --- /dev/null +++ b/doc/rtd/reference/custom_modules/custom_datasource.rst @@ -0,0 +1,19 @@ +.. _custom_datasource: + +Custom DataSource +***************** + +Custom 3rd-party out-of-tree DataSources can be added to cloud-init by: + +#. :ref:`Implement a DataSource` in a Python file. + +#. Place that file in as a single Python module or package in folder included + in ``$PYTHONPATH``. + +#. Extend the base configuration's + :ref:`datasource_pkg_list` to include the + Python package where the DataSource is located. + +#. Extend the :ref:`base-configuration`'s + :ref:`datasource_list` to include the name of + the custom DataSource. diff --git a/doc/rtd/reference/custom_modules/custom_mergers.rst b/doc/rtd/reference/custom_modules/custom_mergers.rst new file mode 100644 index 00000000000..b1af2c1d9f6 --- /dev/null +++ b/doc/rtd/reference/custom_modules/custom_mergers.rst @@ -0,0 +1,60 @@ +.. _custom_mergers: + +Custom Mergers +************** + +It is possible for users to inject their own :ref:`merging` +files to handle specific types of merging as they choose (the +basic ones included will handle lists, dicts, and strings). + +A `merge class` is a class definition providing functions that can be used +to merge a given type with another given type. + +An example of one of these `merging classes` is the following: + +.. code-block:: python + + class Merger: + def __init__(self, merger, opts): + self._merger = merger + self._overwrite = 'overwrite' in opts + + # This merging algorithm will attempt to merge with + # another dictionary, on encountering any other type of object + # it will not merge with said object, but will instead return + # the original value + # + # On encountering a dictionary, it will create a new dictionary + # composed of the original and the one to merge with, if 'overwrite' + # is enabled then keys that exist in the original will be overwritten + # by keys in the one to merge with (and associated values). Otherwise + # if not in overwrite mode the 2 conflicting keys themselves will + # be merged. + def _on_dict(self, value, merge_with): + if not isinstance(merge_with, (dict)): + return value + merged = dict(value) + for (k, v) in merge_with.items(): + if k in merged: + if not self._overwrite: + merged[k] = self._merger.merge(merged[k], v) + else: + merged[k] = v + else: + merged[k] = v + return merged + +There is an ``_on_dict`` method here that will be given a +source value, and a value to merge with. The result will be the merged object. + +This code itself is called by another merging class which "directs" the +merging to happen by analysing the object types to merge, and attempting to +find a known object that will merge that type. An example of this can be found +in the :file:`mergers/__init__.py` file (see ``LookupMerger`` and +``UnknownMerger``). + +Note how each +merge can have options associated with it, which affect how the merging is +performed. For example, a dictionary merger can be told to overwrite instead +of attempting to merge, or a string merger can be told to append strings +instead of discarding other strings to merge with. diff --git a/doc/rtd/reference/custom_modules/custom_part_handlers.rst b/doc/rtd/reference/custom_modules/custom_part_handlers.rst new file mode 100644 index 00000000000..501dc7af7be --- /dev/null +++ b/doc/rtd/reference/custom_modules/custom_part_handlers.rst @@ -0,0 +1,32 @@ +.. _custom_part_handler: + +Custom Part Handler +******************* + +This must be Python code that contains a ``list_types`` function and a +``handle_part`` function. + +The ``list_types`` function takes no arguments and must return a list +of :ref:`content types` that this +part handler handles. These can include custom content types or built-in +content types that this handler will override. + +The ``handle_part`` function takes 4 arguments and returns nothing. See the +example for how exactly each argument is used. + +To use this part handler, it must be included in a MIME multipart file as +part of the :ref:`user data`. +Since MIME parts are processed in order, a part handler part must precede +any parts with mime-types that it is expected to handle in the same user data. + +``Cloud-init`` will then call the ``handle_part`` function once before it +handles any parts, once per part received, and once after all parts have been +handled. These additional calls allow for initialisation or teardown before +or after receiving any parts. + +Example +======= + +.. literalinclude:: ../../../examples/part-handler.txt + :language: python + :linenos: diff --git a/doc/rtd/reference/datasources/nocloud.rst b/doc/rtd/reference/datasources/nocloud.rst index 3033869f682..bf32ad3458b 100644 --- a/doc/rtd/reference/datasources/nocloud.rst +++ b/doc/rtd/reference/datasources/nocloud.rst @@ -4,99 +4,241 @@ NoCloud ******* The data source ``NoCloud`` is a flexible datasource that can be used in -multiple different ways. With NoCloud, one can provide configurations to -the instance without running a network service (or even without having a -network at all). Alternatively, one can use HTTP/HTTPS or FTP/FTPS to provide -a configuration. +multiple different ways. -Configuration Methods: +With NoCloud, one can provide configuration to the instance locally (without +network access) or alternatively NoCloud can fetch the configuration from a +remote server. + +Much of the following documentation describes how to tell cloud-init where +to get its configuration. + +Runtime configurations ====================== -.. warning:: - User data placed under ``/etc/cloud/`` will **not** be recognized as a - source of configuration data by the NoCloud datasource. While it may - be acted upon by cloud-init, using - :ref:`DataSourceNone` should be preferred. +Cloud-init discovers four types of configuration at runtime. The source of +these configuration types is configurable with a discovery configuration. This +discovery configuration can be delivered to cloud-init in different ways, but +is different from the configurations that cloud-init uses to configure the +instance at runtime. + +user data +--------- + +User data is a :ref:`configuration format` that allows a +user to configure an instance. + +metadata +-------- + +The ``meta-data`` file is a YAML-formatted file. + +vendor data +----------- + +Vendor data may be used to provide default cloud-specific configurations which +may be overriden by user data. This may be useful, for example, to configure an +instance with a cloud provider's repository mirror for faster package +installation. + +network config +-------------- + +Network configuration typically comes from the cloud provider to set +cloud-specific network configurations, or a reasonable default is set by +cloud-init (typically cloud-init brings up an interface using DHCP). -Method 1: Labeled filesystem +Since NoCloud is a generic datasource, network configuration may be set the +same way as user data, metadata, vendor data. + +See the :ref:`network configuration` documentation for +information on network configuration formats. + +Discovery configuration +======================= + +The purpose of the discovery configuration is to tell cloud-init where it can +find the runtime configurations described above. + +There are two methods for cloud-init to receive a discovery configuration. + +Method 1: Line configuration ---------------------------- -A labeled `vfat`_ or `iso9660` filesystem may be used. The filesystem volume -must be labelled ``CIDATA``. +The "line configuration" is a single string of text which is passed to an +instance at boot time via either the kernel command line or in the serial +number exposed via DMI (sometimes called SMBIOS). +Example: :: -Method 2: Custom webserver --------------------------- + ds=nocloud;s=https://10.42.42.42/configs/ -Configuration files can be provided to cloud-init over HTTP(s). To tell -cloud-init the URI to use, arguments must be passed to the instance via the -kernel command line or SMBIOS serial number. This argument might look like: :: +In the above line configuration, ``ds=nocloud`` tells cloud-init to use the +NoCloud datasource, and ``s=https://10.42.42.42/configs/`` tells cloud-init to +fetch configurations using ``https`` from the URI +``https://10.42.42.42/configs/``. - ds=nocloud;s=https://10.42.42.42/cloud-init/configs/ +We will describe the possible values in a line configuration in the following +sections. See :ref:`this section` for more details on line +configuration. .. note:: + If using kernel command line arguments with GRUB, note that an unescaped semicolon is intepreted as the end of a statement. - Consider using single-quotes to avoid this pitfall. See: `GRUB quoting`_ - ds=nocloud;s=http://10.42.42.42/cloud-init/configs/ + See: `GRUB quoting`_ + +Method 2: System configuration +------------------------------ + +System configurations are YAML-formatted files and have names that end in +``.cfg``. These are located under :file:`/etc/cloud/cloud.cfg.d/`. + +Example: + +.. code-block:: yaml + + datasource: + NoCloud: + seedfrom: https://10.42.42.42/configs/ + +The above system configuration tells cloud-init that it is using NoCloud and +that it can find configurations at ``https://10.42.42.42/configs/``. + +The scope of this section is limited to its use for selecting the source of +its configuration, however it is worth mentioning that the system configuration +provides more than just the discovery configuration. + +In addition to defining where cloud-init can find runtime configurations, the +system configuration also controls many of cloud-init's default behaviors. +Most users shouldn't need to modify these defaults, however it is worth noting +that downstream distributions often use them to set reasonable default +behaviors for cloud-init. This includes things such as which distro to behave +as and which networking backend to use. + +The default values in :file:`/etc/cloud/cloud.cfg` may be overriden by drop-in +files which are stored in :file:`/etc/cloud/cloud.cfg.d`. + +Configuration sources +===================== -Alternatively, this URI may be defined in a configuration in a file -:file:`/etc/cloud/cloud.cfg.d/*.cfg` like this: :: +User-data, metadata, network config, and vendor data may be sourced from one +of several possible locations, either locally or remotely. + +Source 1: Local filesystem +-------------------------- + +System configuration may provide cloud-init runtime configuration directly + +.. code-block:: yaml + + datasource: + NoCloud: + meta-data: | + instance-id: l-eadfbe + user-data: | + #cloud-config + runcmd: [ echo "it worked!" > /tmp/example.txt ] + +Local filesystem: custom location +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Cloud-init makes it possible to find system configuration in a custom +filesystem path for those that require more flexibility. This may be +done with a line configuration: :: + + + ds=nocloud;s=file://path/to/directory/ + +Or a system configuration: + +.. code-block:: yaml + + datasource: + NoCloud: + seedfrom: file://path/to/directory + +Source 2: Drive with labeled filesystem +--------------------------------------- + +A labeled `vfat`_ or `iso9660` filesystem may be used. The filesystem volume +must be labelled ``CIDATA``. The :ref:`configuration files` must +be in the root directory of the filesystem. + +Source 3: Custom webserver +-------------------------- + +Configuration files can be provided to cloud-init over HTTP(S) using a +line configuration: :: + + ds=nocloud;s=https://10.42.42.42/cloud-init/configs/ + +or using system configuration: + +.. code-block:: yaml datasource: NoCloud: seedfrom: https://10.42.42.42/cloud-init/configs/ -Method 3: FTP Server +Source 4: FTP Server -------------------- Configuration files can be provided to cloud-init over unsecured FTP -or alternatively with FTP over TLS. To tell cloud-init the URL to use, -arguments must be passed to the instance via the kernel command line or SMBIOS -serial number. This argument might look like: :: +or alternatively with FTP over TLS using a line configuration :: ds=nocloud;s=ftps://10.42.42.42/cloud-init/configs/ -Alternatively, this URI may be defined in a configuration in a file -:file:`/etc/cloud/cloud.cfg.d/*.cfg` like this: :: +or using system configuration + +.. code-block:: yaml datasource: NoCloud: seedfrom: ftps://10.42.42.42/cloud-init/configs/ -Method 4: Local filesystem --------------------------- +.. _source_files: -Configuration files can be provided on the local filesystem at specific -filesystem paths using kernel command line arguments or SMBIOS serial number to -tell cloud-init where on the filesystem to look. +Source files +------------ -.. note:: - Unless arbitrary filesystem paths are required, one might prefer to use - :ref:`DataSourceNone`, since it does not require - modifying the kernel command line or SMBIOS. +The base path pointed to by the URI in the above sources provides content +using the following final path components: -This argument might look like: :: +* ``user-data`` +* ``meta-data`` +* ``vendor-data`` +* ``network-config`` - ds=nocloud;s=file://path/to/directory/ +For example, if the ``seedfrom`` value of ``seedfrom`` is +``https://10.42.42.42/``, then the following files will be fetched from the +webserver at first boot: -Alternatively, this URI may be defined in a configuration in a file -:file:`/etc/cloud/cloud.cfg.d/*.cfg` like this: :: +.. code-block:: sh - datasource: - NoCloud: - seedfrom: file://10.42.42.42/cloud-init/configs/ + https://10.42.42.42/user-data + https://10.42.42.42/vendor-data + https://10.42.42.42/meta-data + https://10.42.42.42/network-config + +If the required files don't exist, this datasource will be skipped. + +.. _line_config_detail: +Line configuration in detail +============================ -Permitted keys -============== +The line configuration has several options. -Currently three keys (and their aliases) are permitted for configuring -cloud-init. +Permitted keys (DMI and kernel command line) +-------------------------------------------- -The only required key is: +Currently three keys (and their aliases) are permitted in cloud-init's kernel +command line and DMI (sometimes called SMBIOS) serial number. -* ``seedfrom`` alias: ``s`` +There is only one required key in a line configuration: + +* ``seedfrom`` (alternatively ``s``) A valid ``seedfrom`` value consists of a URI which must contain a trailing ``/``. @@ -104,15 +246,11 @@ A valid ``seedfrom`` value consists of a URI which must contain a trailing Some optional keys may be used, but their use is discouraged and may be removed in the future. -* ``local-hostname`` alias: ``h`` (:ref:`cloud-config` - preferred) -* ``instance-id`` alias: ``i`` (set instance id in :file:`meta-data` instead) -.. note:: +* ``local-hostname`` (alternatively ``h``) +* ``instance-id`` (alternatively ``i``) - The aliases ``s`` , ``h`` and ``i`` are only supported by kernel - command line or SMBIOS. When configured in a ``*.cfg`` file, the long key - name is required. +Both of these can be set in :file:`meta-data` instead. Seedfrom: HTTP and HTTPS ------------------------ @@ -138,26 +276,37 @@ Where ``scheme`` can be ``ftp`` or ``ftps``, ``userinfo`` will be ``host`` can be an IP address or DNS name, and ``port`` is which network port to use (default is ``21``). -Seedfrom: Files ---------------- +Discovery configuration considerations +====================================== + +Above, we describe the two methods of providing discovery configuration (system +configuration and line configuration). Two methods exist because there are +advantages and disadvantages to each option, neither is clearly a better +choice - so it is left to the user to decide. + +Line configuration +------------------ + +**Advantages** -The path pointed to by the URI can contain the following -files: +* it may be possible to set kernel command line and DMI variables at boot time + without modifying the base image -``user-data`` (required) -``meta-data`` (required) -``vendor-data`` (optional) -``network-config`` (optional) +**Disadvantages** -If the seedfrom URI doesn't contain the required files, this datasource -will be skipped. +* requires control and modification of the hypervisor or the bootloader +* DMI / SMBIOS is architecture specific -The ``user-data`` file uses :ref:`user data format`. The -``meta-data`` file is a YAML-formatted file. +System configuration +-------------------- + +**Advantages** + +* simple: requires only modifying a file -The ``vendor-data`` file adheres to -:ref:`user data formats`. The ``network-config`` file -follows cloud-init's :ref:`Network Configuration Formats`. +**Disadvantages** + +* requires modifying the filesystem prior to booting an instance DMI-specific kernel command line ================================ @@ -189,7 +338,7 @@ wanted. - ``dmi.system-uuid`` - ``dmi.system-version`` -For example, you can pass this option to QEMU: :: +For example, you can pass this line configuration to QEMU: :: -smbios type=1,serial=ds=nocloud;s=http://10.10.0.1:8000/__dmi.chassis-serial-number__/ @@ -268,14 +417,10 @@ sufficient disk by following the following example. user data you will also have to change the ``instance-id``, or start the disk fresh. -Also, you can inject an :file:`/etc/network/interfaces` file by providing the -content for that file in the ``network-interfaces`` field of -:file:`meta-data`. - Example ``meta-data`` --------------------- -:: +.. code-block:: yaml instance-id: iid-abcdefg network-interfaces: | @@ -288,17 +433,14 @@ Example ``meta-data`` hostname: myhost +``network-config`` +------------------ + Network configuration can also be provided to ``cloud-init`` in either :ref:`network_config_v1` or :ref:`network_config_v2` by providing that -YAML formatted data in a file named :file:`network-config`. If found, -this file will override a :file:`network-interfaces` file. +YAML formatted data in a file named :file:`network-config`. -See an example below. Note specifically that this file does not -have a top level ``network`` key as it is already assumed to -be network configuration based on the filename. - -Example config --------------- +Example network v1: .. code-block:: yaml @@ -314,6 +456,8 @@ Example config gateway: 192.168.1.254 +Example network v2: + .. code-block:: yaml version: 2 diff --git a/doc/rtd/reference/datasources/ovf.rst b/doc/rtd/reference/datasources/ovf.rst index a233df13a78..0ee33d0b821 100644 --- a/doc/rtd/reference/datasources/ovf.rst +++ b/doc/rtd/reference/datasources/ovf.rst @@ -3,9 +3,35 @@ OVF *** -The OVF datasource provides a datasource for reading data from an +The OVF datasource provides a generic datasource for reading data from an `Open Virtualization Format`_ ISO transport. +What platforms support OVF +-------------------------- + +OFV is an open standard which is supported by various virtualization +platforms, including (but not limited to): + +GCP +OpenShift +Proxmox +vSphere +VirtualBox +Xen + +While these (and many more) platforms support OVF, in some cases cloud-init +has alternative datasources which provide better platform integration. +Make sure to check whether another datasource is exists which is specific to +your platform of choice before trying to use OVF. + +Configuration +------------- + +Cloud-init gets configurations from an OVF XML file. User-data and network +configuration are provided by properties in the XML which contain key / value +pairs. The user-data is provided by a key named ``user-data``, and network +configuration is provided by a key named ``network-config``. + Graceful rpctool fallback ------------------------- diff --git a/doc/rtd/reference/datasources/wsl.rst b/doc/rtd/reference/datasources/wsl.rst index ab96f9490c4..c6970448b5c 100644 --- a/doc/rtd/reference/datasources/wsl.rst +++ b/doc/rtd/reference/datasources/wsl.rst @@ -66,7 +66,10 @@ following paths: the Ubuntu Pro for WSL agent. If this file is present, its modules will be merged with (1), overriding any conflicting modules. If (1) is not provided, then this file will be merged with any valid user-provided configuration - instead. + instead. Exception is made for Landscape client config computer tags. If + user provided data contains a value for ``landscape.client.tags`` it will be + used instead of the one provided by the ``agent.yaml``, which is treated as + a default. Then, if a file from (1) is not found, a user-provided configuration will be looked for instead in the following order: diff --git a/doc/rtd/reference/distros.rst b/doc/rtd/reference/distros.rst index 59309ece211..d54cb889153 100644 --- a/doc/rtd/reference/distros.rst +++ b/doc/rtd/reference/distros.rst @@ -7,6 +7,7 @@ Unix family of operating systems. See the complete list below. * AlmaLinux * Alpine Linux +* AOSC OS * Arch Linux * CentOS * CloudLinux diff --git a/doc/rtd/reference/examples.rst b/doc/rtd/reference/examples.rst index c9829e49cd2..fe2703031ac 100644 --- a/doc/rtd/reference/examples.rst +++ b/doc/rtd/reference/examples.rst @@ -77,6 +77,13 @@ Run commands on first boot :language: yaml :linenos: +Run commands on very early at every boot +======================================== + +.. literalinclude:: ../../examples/boothook.txt + :language: bash + :linenos: + Install arbitrary packages ========================== diff --git a/doc/rtd/reference/faq.rst b/doc/rtd/reference/faq.rst index 45ec431d910..146dc66774d 100644 --- a/doc/rtd/reference/faq.rst +++ b/doc/rtd/reference/faq.rst @@ -15,21 +15,15 @@ Having trouble? We would like to help! - Find a bug? Check out the :ref:`reporting_bugs` topic to find out how to report one -``autoinstall``, ``preruncmd``, ``postruncmd`` -============================================== - -Since ``cloud-init`` ignores top level user data ``cloud-config`` keys, other -projects such as `Juju`_ and `Subiquity autoinstaller`_ use a YAML-formatted -config that combines ``cloud-init``'s user data cloud-config YAML format with -their custom YAML keys. Since ``cloud-init`` ignores unused top level keys, -these combined YAML configurations may be valid ``cloud-config`` files, -however keys such as ``autoinstall``, ``preruncmd``, and ``postruncmd`` are -not used by ``cloud-init`` to configure anything. - -Please direct bugs and questions about other projects that use ``cloud-init`` -to their respective support channels. For Subiquity autoinstaller that is via -IRC (``#ubuntu-server`` on Libera) or Discourse. For Juju support see their -`discourse page`_. +``autoinstall`` +=============== + +Other projects, such as `Subiquity autoinstaller`_, use cloud-init to implement +a subset of their features and have a YAML configuration format which combines +``cloud-init``'s cloud-config with additional keys. + +If you are an autoinstall user, please direct questions to their IRC channel +(``#ubuntu-server`` on Libera). Can I use cloud-init as a library? ================================== @@ -83,8 +77,6 @@ Whitepapers: .. _mailing list: https://launchpad.net/~cloud-init .. _IRC channel on Libera: https://kiwiirc.com/nextclient/irc.libera.chat/cloud-init -.. _Juju: https://ubuntu.com/blog/topics/juju -.. _discourse page: https://discourse.charmhub.io .. _do: https://github.com/canonical/ubuntu-pro-client/blob/9b46480b9e4b88e918bac5ced0d4b8edb3cbbeab/lib/auto_attach.py#L35 .. _cloud-init - The Good Parts: https://www.youtube.com/watch?v=2_m6EUo6VOI @@ -106,5 +98,3 @@ Whitepapers: .. _cloud-init Summit 2018: https://powersj.io/post/cloud-init-summit18/ .. _cloud-init Summit 2017: https://powersj.io/post/cloud-init-summit17/ .. _Subiquity autoinstaller: https://ubuntu.com/server/docs/install/autoinstall -.. _juju_project: https://discourse.charmhub.io/t/model-config-cloudinit-userdata/512 -.. _discourse page: https://discourse.charmhub.io diff --git a/doc/rtd/reference/index.rst b/doc/rtd/reference/index.rst index 14e754b295f..d1791fa9631 100644 --- a/doc/rtd/reference/index.rst +++ b/doc/rtd/reference/index.rst @@ -25,3 +25,4 @@ matrices and so on. ubuntu_stable_release_updates.rst breaking_changes.rst user_files.rst + custom_modules.rst diff --git a/doc/rtd/reference/merging.rst b/doc/rtd/reference/merging.rst index 7f1fc022f17..097892e2536 100644 --- a/doc/rtd/reference/merging.rst +++ b/doc/rtd/reference/merging.rst @@ -94,64 +94,8 @@ merging is done on other types. Customisation ============= -Because the above merging algorithm may not always be desired (just as the -previous merging algorithm was not always the preferred one), the concept of -customised merging was introduced through `merge classes`. - -A `merge class` is a class definition providing functions that can be used -to merge a given type with another given type. - -An example of one of these `merging classes` is the following: - -.. code-block:: python - - class Merger: - def __init__(self, merger, opts): - self._merger = merger - self._overwrite = 'overwrite' in opts - - # This merging algorithm will attempt to merge with - # another dictionary, on encountering any other type of object - # it will not merge with said object, but will instead return - # the original value - # - # On encountering a dictionary, it will create a new dictionary - # composed of the original and the one to merge with, if 'overwrite' - # is enabled then keys that exist in the original will be overwritten - # by keys in the one to merge with (and associated values). Otherwise - # if not in overwrite mode the 2 conflicting keys themselves will - # be merged. - def _on_dict(self, value, merge_with): - if not isinstance(merge_with, (dict)): - return value - merged = dict(value) - for (k, v) in merge_with.items(): - if k in merged: - if not self._overwrite: - merged[k] = self._merger.merge(merged[k], v) - else: - merged[k] = v - else: - merged[k] = v - return merged - -As you can see, there is an ``_on_dict`` method here that will be given a -source value, and a value to merge with. The result will be the merged object. - -This code itself is called by another merging class which "directs" the -merging to happen by analysing the object types to merge, and attempting to -find a known object that will merge that type. An example of this can be found -in the :file:`mergers/__init__.py` file (see ``LookupMerger`` and -``UnknownMerger``). - -So, following the typical ``cloud-init`` approach of allowing source code to -be downloaded and used dynamically, it is possible for users to inject their -own merging files to handle specific types of merging as they choose (the -basic ones included will handle lists, dicts, and strings). Note how each -merge can have options associated with it, which affect how the merging is -performed. For example, a dictionary merger can be told to overwrite instead -of attempting to merge, or a string merger can be told to append strings -instead of discarding other strings to merge with. +Custom 3rd party mergers can be defined, for more info visit +:ref:`custom_mergers`. How to activate =============== diff --git a/doc/rtd/reference/modules.rst b/doc/rtd/reference/modules.rst index 2a7d26d3068..f56e3ffa8e1 100644 --- a/doc/rtd/reference/modules.rst +++ b/doc/rtd/reference/modules.rst @@ -17,6 +17,9 @@ version ``22.1`` (the first release in 2022) it is scheduled to be removed in the logs. If a key's expected value changes, the key will be marked ``changed`` with a date. A 5 year timeline also applies to changed keys. +Modules +======= + .. datatemplate:yaml:: ../../module-docs/cc_ansible/data.yaml :template: modules.tmpl .. datatemplate:yaml:: ../../module-docs/cc_apk_configure/data.yaml diff --git a/doc/rtd/reference/network-config-format-eni.rst b/doc/rtd/reference/network-config-format-eni.rst deleted file mode 100644 index be7bbeb29ec..00000000000 --- a/doc/rtd/reference/network-config-format-eni.rst +++ /dev/null @@ -1,20 +0,0 @@ -.. _network_config_eni: - -Network configuration ENI (legacy) -********************************** - -``Cloud-init`` supports reading and writing network config in the ``ENI`` -format which is consumed by the ``ifupdown`` tool to parse and apply network -configuration. - -As an input format this is **legacy**. In cases where ENI format is available -and another format is also available, ``cloud-init`` will prefer to use the -other, newer format. - -This can happen in either :ref:`datasource_nocloud` or -:ref:`datasource_openstack` datasources. - -Please reference existing `documentation`_ for the -:file:`/etc/network/interfaces(5)` format. - -.. _documentation: http://manpages.ubuntu.com/manpages/trusty/en/man5/interfaces.5.html diff --git a/doc/rtd/reference/network-config.rst b/doc/rtd/reference/network-config.rst index 2e95550e61a..61a12167d74 100644 --- a/doc/rtd/reference/network-config.rst +++ b/doc/rtd/reference/network-config.rst @@ -126,7 +126,6 @@ The following datasources optionally provide network configuration: - :ref:`datasource_config_drive` - `OpenStack Metadata Service Network`_ - - :ref:`network_config_eni` - :ref:`datasource_digital_ocean` @@ -140,15 +139,9 @@ The following datasources optionally provide network configuration: - :ref:`network_config_v1` - :ref:`network_config_v2` - - :ref:`network_config_eni` - -- :ref:`datasource_opennebula` - - - :ref:`network_config_eni` - :ref:`datasource_openstack` - - :ref:`network_config_eni` - `OpenStack Metadata Service Network`_ - :ref:`datasource_smartos` @@ -168,7 +161,6 @@ For more information on network configuration formats: .. toctree:: :maxdepth: 1 - network-config-format-eni.rst network-config-format-v1.rst network-config-format-v2.rst diff --git a/doc/rtd/spelling_word_list.txt b/doc/rtd/spelling_word_list.txt index 239b3b49475..5f4783af65b 100644 --- a/doc/rtd/spelling_word_list.txt +++ b/doc/rtd/spelling_word_list.txt @@ -24,6 +24,7 @@ bigstep boolean bootcmd boothook +boothooks btrfs busybox byobu @@ -211,6 +212,7 @@ scaleway seedurl serverurl setup-keymap +shellscript shortid sigonly sk diff --git a/doc/rtd/tutorial/qemu.rst b/doc/rtd/tutorial/qemu.rst index 4c1afedd8a1..caa79cd39dd 100644 --- a/doc/rtd/tutorial/qemu.rst +++ b/doc/rtd/tutorial/qemu.rst @@ -80,6 +80,12 @@ server image using :command:`wget`: $ wget https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img +.. note:: + This example uses emulated CPU instructions on non-x86 hosts, so it may be + slow. To make it faster on non-x86 architectures, one can change the image + type and :spelling:ignore:`qemu-system-` command name to match the + architecture of your host machine. + Define our user data ==================== @@ -203,7 +209,6 @@ take a few moments to complete. -net nic \ -net user \ -machine accel=kvm:tcg \ - -cpu host \ -m 512 \ -nographic \ -hda jammy-server-cloudimg-amd64.img \ diff --git a/packages/debian/cloud-init.postinst b/packages/debian/cloud-init.postinst deleted file mode 100644 index cdd0466d6da..00000000000 --- a/packages/debian/cloud-init.postinst +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/sh -cleanup_lp1552999() { - local oldver="$1" last_bad_ver="0.7.7~bzr1178" - dpkg --compare-versions "$oldver" le "$last_bad_ver" || return 0 - local edir="/etc/systemd/system/multi-user.target.wants" - rm -f "$edir/cloud-config.service" "$edir/cloud-final.service" \ - "$edir/cloud-init-local.service" "$edir/cloud-init.service" -} - - -#DEBHELPER# - -if [ "$1" = "configure" ]; then - oldver="$2" - cleanup_lp1552999 "$oldver" -fi diff --git a/packages/debian/cloud-init.preinst b/packages/debian/cloud-init.preinst deleted file mode 100644 index 3c2af06d38d..00000000000 --- a/packages/debian/cloud-init.preinst +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/sh -# vi: ts=4 expandtab - -cleanup_lp1552999() { - local oldver="$1" last_bad_ver="0.7.7~bzr1178" - dpkg --compare-versions "$oldver" le "$last_bad_ver" || return 0 - local hdir="/var/lib/systemd/deb-systemd-helper-enabled" - hdir="$hdir/multi-user.target.wants" - local edir="/etc/systemd/system/multi-user.target.wants" - rm -f "$hdir/cloud-config.service" "$hdir/cloud-final.service" \ - "$hdir/cloud-init-local.service" "$hdir/cloud-init.service" -} - - -if [ "$1" = "upgrade" ]; then - oldver="$2" - cleanup_lp1552999 "$oldver" -fi - -#DEBHELPER# diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in index bc57fe9aac9..672cd426673 100644 --- a/packages/redhat/cloud-init.spec.in +++ b/packages/redhat/cloud-init.spec.in @@ -124,7 +124,7 @@ if [ $1 -eq 1 ] then /bin/systemctl enable cloud-config.service >/dev/null 2>&1 || : /bin/systemctl enable cloud-final.service >/dev/null 2>&1 || : - /bin/systemctl enable cloud-init.service >/dev/null 2>&1 || : + /bin/systemctl enable cloud-init-network.service >/dev/null 2>&1 || : /bin/systemctl enable cloud-init-local.service >/dev/null 2>&1 || : fi %else @@ -141,7 +141,7 @@ if [ $1 -eq 0 ] then /bin/systemctl --no-reload disable cloud-config.service >/dev/null 2>&1 || : /bin/systemctl --no-reload disable cloud-final.service >/dev/null 2>&1 || : - /bin/systemctl --no-reload disable cloud-init.service >/dev/null 2>&1 || : + /bin/systemctl --no-reload disable cloud-init-network.service >/dev/null 2>&1 || : /bin/systemctl --no-reload disable cloud-init-local.service >/dev/null 2>&1 || : fi %else diff --git a/pyproject.toml b/pyproject.toml index 7408488f975..df969290451 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,6 +39,7 @@ module = [ ignore_missing_imports = true no_implicit_optional = true +# See GH-5445 [[tool.mypy.overrides]] module = [ "cloudinit.analyze", @@ -94,7 +95,6 @@ module = [ "cloudinit.sources.DataSourceExoscale", "cloudinit.sources.DataSourceGCE", "cloudinit.sources.DataSourceHetzner", - "cloudinit.sources.DataSourceIBMCloud", "cloudinit.sources.DataSourceMAAS", "cloudinit.sources.DataSourceNoCloud", "cloudinit.sources.DataSourceOVF", @@ -118,7 +118,6 @@ module = [ "cloudinit.temp_utils", "cloudinit.templater", "cloudinit.user_data", - "cloudinit.util", "tests.integration_tests.instances", "tests.unittests.analyze.test_show", "tests.unittests.cmd.devel.test_hotplug_hook", diff --git a/systemd/cloud-config.service.tmpl b/systemd/cloud-config.service.tmpl index 79c75c71ae6..9067d6e4bc0 100644 --- a/systemd/cloud-config.service.tmpl +++ b/systemd/cloud-config.service.tmpl @@ -10,7 +10,14 @@ ConditionEnvironment=!KERNEL_CMDLINE=cloud-init=disabled [Service] Type=oneshot -ExecStart=/usr/bin/cloud-init modules --mode=config +# This service is a shim which preserves systemd ordering while allowing a +# single Python process to run cloud-init's logic. This works by communicating +# with the cloud-init process over a unix socket to tell the process that this +# stage can start, and then wait on a return socket until the cloud-init +# process has completed this stage. The output from the return socket is piped +# into a shell so that the process can send a completion message (defaults to +# "done", otherwise includes an error message) and an exit code to systemd. +ExecStart=sh -c 'echo "start" | nc.openbsd -Uu -W1 /run/cloud-init/share/config.sock -s /run/cloud-init/share/config-return.sock | sh' RemainAfterExit=yes TimeoutSec=0 diff --git a/systemd/cloud-config.target b/systemd/cloud-config.target index 2d65e3433ce..be754bbd19d 100644 --- a/systemd/cloud-config.target +++ b/systemd/cloud-config.target @@ -14,5 +14,5 @@ [Unit] Description=Cloud-config availability -Wants=cloud-init-local.service cloud-init.service -After=cloud-init-local.service cloud-init.service +Wants=cloud-init-local.service cloud-init-network.service +After=cloud-init-local.service cloud-init-network.service diff --git a/systemd/cloud-final.service.tmpl b/systemd/cloud-final.service.tmpl index b66533643d3..9fb2f681f73 100644 --- a/systemd/cloud-final.service.tmpl +++ b/systemd/cloud-final.service.tmpl @@ -15,10 +15,16 @@ ConditionEnvironment=!KERNEL_CMDLINE=cloud-init=disabled [Service] Type=oneshot -ExecStart=/usr/bin/cloud-init modules --mode=final +# This service is a shim which preserves systemd ordering while allowing a +# single Python process to run cloud-init's logic. This works by communicating +# with the cloud-init process over a unix socket to tell the process that this +# stage can start, and then wait on a return socket until the cloud-init +# process has completed this stage. The output from the return socket is piped +# into a shell so that the process can send a completion message (defaults to +# "done", otherwise includes an error message) and an exit code to systemd. +ExecStart=sh -c 'echo "start" | nc.openbsd -Uu -W1 /run/cloud-init/share/final.sock -s /run/cloud-init/share/final-return.sock | sh' RemainAfterExit=yes TimeoutSec=0 -KillMode=process {% if variant in ["almalinux", "cloudlinux", "rhel"] %} # Restart NetworkManager if it is present and running. ExecStartPost=/bin/sh -c 'u=NetworkManager.service; \ diff --git a/systemd/cloud-init-local.service.tmpl b/systemd/cloud-init-local.service.tmpl index 0da2d8337e9..b0a534b8f9a 100644 --- a/systemd/cloud-init-local.service.tmpl +++ b/systemd/cloud-init-local.service.tmpl @@ -7,7 +7,6 @@ DefaultDependencies=no {% endif %} Wants=network-pre.target After=hv_kvp_daemon.service -After=systemd-remount-fs.service {% if variant in ["almalinux", "cloudlinux", "rhel"] %} Requires=dbus.socket After=dbus.socket @@ -38,7 +37,14 @@ ExecStartPre=/bin/mkdir -p /run/cloud-init ExecStartPre=/sbin/restorecon /run/cloud-init ExecStartPre=/usr/bin/touch /run/cloud-init/enabled {% endif %} -ExecStart=/usr/bin/cloud-init init --local +# This service is a shim which preserves systemd ordering while allowing a +# single Python process to run cloud-init's logic. This works by communicating +# with the cloud-init process over a unix socket to tell the process that this +# stage can start, and then wait on a return socket until the cloud-init +# process has completed this stage. The output from the return socket is piped +# into a shell so that the process can send a completion message (defaults to +# "done", otherwise includes an error message) and an exit code to systemd. +ExecStart=sh -c 'echo "start" | nc.openbsd -Uu -W1 /run/cloud-init/share/local.sock -s /run/cloud-init/share/local-return.sock | sh' RemainAfterExit=yes TimeoutSec=0 diff --git a/systemd/cloud-init-main.service.tmpl b/systemd/cloud-init-main.service.tmpl new file mode 100644 index 00000000000..1ddfd62073e --- /dev/null +++ b/systemd/cloud-init-main.service.tmpl @@ -0,0 +1,52 @@ +## template:jinja +# systemd ordering resources +# ========================== +# https://systemd.io/NETWORK_ONLINE/ +# https://docs.cloud-init.io/en/latest/explanation/boot.html +# https://www.freedesktop.org/wiki/Software/systemd/NetworkTarget/ +# https://www.freedesktop.org/software/systemd/man/latest/systemd.special.html +# https://www.freedesktop.org/software/systemd/man/latest/systemd-remount-fs.service.html +[Unit] +Description=Cloud-init: Single Process +Wants=network-pre.target +{% if variant in ["almalinux", "cloudlinux", "ubuntu", "unknown", "debian", "rhel"] %} +DefaultDependencies=no +{% endif %} +{% if variant in ["almalinux", "cloudlinux", "rhel"] %} +Requires=dbus.socket +After=dbus.socket +Before=network.service +Before=firewalld.target +Conflicts=shutdown.target +{% endif %} +{% if variant in ["ubuntu", "unknown", "debian"] %} +Before=sysinit.target +Conflicts=shutdown.target +{% endif %} + +After=systemd-remount-fs.service +Before=sysinit.target +Before=cloud-init-local.service +Conflicts=shutdown.target +RequiresMountsFor=/var/lib/cloud +ConditionPathExists=!/etc/cloud/cloud-init.disabled +ConditionKernelCommandLine=!cloud-init=disabled +ConditionEnvironment=!KERNEL_CMDLINE=cloud-init=disabled + +[Service] +Type=notify +ExecStart=/usr/bin/cloud-init --all-stages +KillMode=process +TasksMax=infinity +TimeoutStartSec=infinity +{% if variant in ["almalinux", "cloudlinux", "rhel"] %} +ExecStartPre=/bin/mkdir -p /run/cloud-init +ExecStartPre=/sbin/restorecon /run/cloud-init +ExecStartPre=/usr/bin/touch /run/cloud-init/enabled +{% endif %} + +# Output needs to appear in instance console output +StandardOutput=journal+console + +[Install] +WantedBy=cloud-init.target diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init-network.service.tmpl similarity index 71% rename from systemd/cloud-init.service.tmpl rename to systemd/cloud-init-network.service.tmpl index 58031cc4331..6957b39f1ee 100644 --- a/systemd/cloud-init.service.tmpl +++ b/systemd/cloud-init-network.service.tmpl @@ -46,7 +46,14 @@ ConditionEnvironment=!KERNEL_CMDLINE=cloud-init=disabled [Service] Type=oneshot -ExecStart=/usr/bin/cloud-init init +# This service is a shim which preserves systemd ordering while allowing a +# single Python process to run cloud-init's logic. This works by communicating +# with the cloud-init process over a unix socket to tell the process that this +# stage can start, and then wait on a return socket until the cloud-init +# process has completed this stage. The output from the return socket is piped +# into a shell so that the process can send a completion message (defaults to +# "done", otherwise includes an error message) and an exit code to systemd. +ExecStart=sh -c 'echo "start" | nc.openbsd -Uu -W1 /run/cloud-init/share/network.sock -s /run/cloud-init/share/network-return.sock | sh' RemainAfterExit=yes TimeoutSec=0 diff --git a/templates/hosts.aosc.tmpl b/templates/hosts.aosc.tmpl new file mode 100644 index 00000000000..897cebcc115 --- /dev/null +++ b/templates/hosts.aosc.tmpl @@ -0,0 +1,23 @@ +## template:jinja +{# +This file (/etc/cloud/templates/hosts.aosc.tmpl) is only utilized +if enabled in cloud-config. Specifically, in order to enable it +you need to add the following to config: + manage_etc_hosts: True +-#} +# Your system has configured 'manage_etc_hosts' as True. +# As a result, if you wish for changes to this file to persist +# then you will need to either +# a.) make changes to the master file in /etc/cloud/templates/hosts.aosc.tmpl +# b.) change or remove the value of 'manage_etc_hosts' in +# /etc/cloud/cloud.cfg or cloud-config from user-data +# +# +{# The value '{{hostname}}' will be replaced with the local-hostname -#} +127.0.0.1 {{fqdn}} {{hostname}} +127.0.0.1 localhost + +# The following lines are desirable for IPv6 capable hosts +::1 localhost ip6-localhost ip6-loopback +ff02::1 ip6-allnodes +ff02::2 ip6-allrouters diff --git a/tests/integration_tests/assets/DataSourceNoCacheNetworkOnly.py b/tests/integration_tests/assets/DataSourceNoCacheNetworkOnly.py new file mode 100644 index 00000000000..54a7bab3437 --- /dev/null +++ b/tests/integration_tests/assets/DataSourceNoCacheNetworkOnly.py @@ -0,0 +1,23 @@ +import logging + +from cloudinit import sources + +LOG = logging.getLogger(__name__) + + +class DataSourceNoCacheNetworkOnly(sources.DataSource): + def _get_data(self): + LOG.debug("TEST _get_data called") + return True + + +datasources = [ + ( + DataSourceNoCacheNetworkOnly, + (sources.DEP_FILESYSTEM, sources.DEP_NETWORK), + ), +] + + +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) diff --git a/tests/integration_tests/assets/DataSourceNoCacheWithFallback.py b/tests/integration_tests/assets/DataSourceNoCacheWithFallback.py new file mode 100644 index 00000000000..fdfc473f8a5 --- /dev/null +++ b/tests/integration_tests/assets/DataSourceNoCacheWithFallback.py @@ -0,0 +1,29 @@ +import logging +import os + +from cloudinit import sources + +LOG = logging.getLogger(__name__) + + +class DataSourceNoCacheWithFallback(sources.DataSource): + def _get_data(self): + if os.path.exists("/ci-test-firstboot"): + LOG.debug("TEST _get_data called") + return True + return False + + def check_if_fallback_is_allowed(self): + return True + + +datasources = [ + ( + DataSourceNoCacheWithFallback, + (sources.DEP_FILESYSTEM,), + ), +] + + +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) diff --git a/tests/integration_tests/assets/enable_coverage.py b/tests/integration_tests/assets/enable_coverage.py index ed71ceef8f5..1d18fcbef04 100644 --- a/tests/integration_tests/assets/enable_coverage.py +++ b/tests/integration_tests/assets/enable_coverage.py @@ -2,7 +2,7 @@ services = [ "cloud-init-local.service", - "cloud-init.service", + "cloud-init-network.service", "cloud-config.service", "cloud-final.service", ] diff --git a/tests/integration_tests/assets/enable_profile.py b/tests/integration_tests/assets/enable_profile.py index a6a0070c3c5..9b68e42ce05 100644 --- a/tests/integration_tests/assets/enable_profile.py +++ b/tests/integration_tests/assets/enable_profile.py @@ -2,7 +2,7 @@ services = [ "cloud-init-local.service", - "cloud-init.service", + "cloud-init-network.service", "cloud-config.service", "cloud-final.service", ] diff --git a/tests/integration_tests/cmd/test_schema.py b/tests/integration_tests/cmd/test_schema.py index 3155a07919b..c954484012a 100644 --- a/tests/integration_tests/cmd/test_schema.py +++ b/tests/integration_tests/cmd/test_schema.py @@ -3,7 +3,7 @@ import pytest -from cloudinit.util import should_log_deprecation +from cloudinit import lifecycle from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.releases import CURRENT_RELEASE, MANTIC from tests.integration_tests.util import ( @@ -71,7 +71,7 @@ def test_clean_log(self, class_client: IntegrationInstance): ) # the deprecation_version is 22.2 in schema for apt_* keys in # user-data. Pass 22.2 in against the client's version_boundary. - if should_log_deprecation("22.2", version_boundary): + if lifecycle.should_log_deprecation("22.2", version_boundary): log_level = "DEPRECATED" else: log_level = "INFO" diff --git a/tests/integration_tests/cmd/test_status.py b/tests/integration_tests/cmd/test_status.py index 23509c57cef..50396be709c 100644 --- a/tests/integration_tests/cmd/test_status.py +++ b/tests/integration_tests/cmd/test_status.py @@ -117,7 +117,7 @@ def test_status_json_errors(client): fi cloud-init status --wait --long > $1 date +%s.%N > $MARKER_FILE -""" # noqa: E501 +""" BEFORE_CLOUD_INIT_LOCAL = """\ @@ -162,7 +162,7 @@ def test_status_block_through_all_boot_status(client): # Assert that before-cloud-init-local.service started before # cloud-init-local.service could create status.json - client.execute("test -f /before-local.start-hasstatusjson").failed + assert client.execute("test -f /before-local.start-hasstatusjson").failed early_unit_timestamp = retry_read_from_file( client, "/before-local.start-nostatusjson" diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py index c3b8531ae92..8ba5a81b2b5 100644 --- a/tests/integration_tests/conftest.py +++ b/tests/integration_tests/conftest.py @@ -191,7 +191,7 @@ def _collect_profile(instance: IntegrationInstance, log_dir: Path): log_dir / "profile" / "local.stats", ) instance.pull_file( - "/var/log/cloud-init.service.stats", + "/var/log/cloud-init-network.service.stats", log_dir / "profile" / "network.stats", ) instance.pull_file( diff --git a/tests/integration_tests/datasources/test_caching.py b/tests/integration_tests/datasources/test_caching.py new file mode 100644 index 00000000000..467585fa20c --- /dev/null +++ b/tests/integration_tests/datasources/test_caching.py @@ -0,0 +1,113 @@ +import pytest + +from tests.integration_tests import releases, util +from tests.integration_tests.instances import IntegrationInstance + + +def setup_custom_datasource(client: IntegrationInstance, datasource_name: str): + client.write_to_file( + "/etc/cloud/cloud.cfg.d/99-imds.cfg", + f"datasource_list: [ {datasource_name}, None ]\n" + "datasource_pkg_list: [ cisources ]", + ) + assert client.execute("mkdir -p /usr/lib/python3/dist-packages/cisources") + client.push_file( + util.ASSETS_DIR / f"DataSource{datasource_name}.py", + "/usr/lib/python3/dist-packages/cisources/" + f"DataSource{datasource_name}.py", + ) + + +def verify_no_cache_boot(client: IntegrationInstance): + log = client.read_from_file("/var/log/cloud-init.log") + util.verify_ordered_items_in_text( + [ + "No local datasource found", + "running 'init'", + "no cache found", + "Detected DataSource", + "TEST _get_data called", + ], + text=log, + ) + util.verify_clean_boot(client) + + +@pytest.mark.skipif( + not releases.IS_UBUNTU, + reason="hardcoded dist-packages directory", +) +def test_no_cache_network_only(client: IntegrationInstance): + """Test cache removal per boot. GH-5486 + + This tests the CloudStack password reset use case. The expectation is: + - Metadata is fetched in network timeframe only + - Because `check_instance_id` is not defined, no cached datasource + is found in the init-local phase, but the cache is used in the + remaining phases due to existance of /run/cloud-init/.instance-id + - Because `check_if_fallback_is_allowed` is not defined, cloud-init + does NOT fall back to the pickled datasource, and will + instead delete the cache during the init-local phase + - Metadata is therefore fetched every boot in the network phase + """ + setup_custom_datasource(client, "NoCacheNetworkOnly") + + # Run cloud-init as if first boot + assert client.execute("cloud-init clean --logs") + client.restart() + + verify_no_cache_boot(client) + + # Clear the log without clean and run cloud-init for subsequent boot + assert client.execute("echo '' > /var/log/cloud-init.log") + client.restart() + + verify_no_cache_boot(client) + + +@pytest.mark.skipif( + not releases.IS_UBUNTU, + reason="hardcoded dist-packages directory", +) +def test_no_cache_with_fallback(client: IntegrationInstance): + """Test we use fallback when defined and no cache available.""" + setup_custom_datasource(client, "NoCacheWithFallback") + + # Run cloud-init as if first boot + assert client.execute("cloud-init clean --logs") + # Used by custom datasource + client.execute("touch /ci-test-firstboot") + client.restart() + + log = client.read_from_file("/var/log/cloud-init.log") + util.verify_ordered_items_in_text( + [ + "no cache found", + "Detected DataSource", + "TEST _get_data called", + "running 'init'", + "restored from cache with run check", + "running 'modules:config'", + ], + text=log, + ) + util.verify_clean_boot(client) + + # Clear the log without clean and run cloud-init for subsequent boot + assert client.execute("echo '' > /var/log/cloud-init.log") + client.execute("rm /ci-test-firstboot") + client.restart() + + log = client.read_from_file("/var/log/cloud-init.log") + util.verify_ordered_items_in_text( + [ + "cache invalid in datasource", + "Detected DataSource", + "Restored fallback datasource from checked cache", + "running 'init'", + "restored from cache with run check", + "running 'modules:config'", + ], + text=log, + ) + util.verify_clean_boot(client) diff --git a/tests/integration_tests/datasources/test_nocloud.py b/tests/integration_tests/datasources/test_nocloud.py index c6c440840a3..c3462c433a3 100644 --- a/tests/integration_tests/datasources/test_nocloud.py +++ b/tests/integration_tests/datasources/test_nocloud.py @@ -5,8 +5,8 @@ import pytest from pycloudlib.lxd.instance import LXDInstance +from cloudinit import lifecycle from cloudinit.subp import subp -from cloudinit.util import should_log_deprecation from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.integration_settings import PLATFORM from tests.integration_tests.releases import CURRENT_RELEASE, FOCAL @@ -162,7 +162,7 @@ def test_smbios_seed_network(self, client: IntegrationInstance): """\ [Unit] Description=Serve a local webserver - Before=cloud-init.service + Before=cloud-init-network.service Wants=cloud-init-local.service DefaultDependencies=no After=systemd-networkd-wait-online.service @@ -199,7 +199,7 @@ def test_smbios_seed_network(self, client: IntegrationInstance): client, "DEPRECATION_INFO_BOUNDARY" ) # nocloud-net deprecated in version 24.1 - if should_log_deprecation("24.1", version_boundary): + if lifecycle.should_log_deprecation("24.1", version_boundary): log_level = "DEPRECATED" else: log_level = "INFO" @@ -326,7 +326,8 @@ def _boot_with_cmdline( 'wget "https://github.com/FiloSottile/mkcert/releases/' "download/${latest_ver}/mkcert-" '${latest_ver}-linux-amd64"' - " -O mkcert" + " -O mkcert && " + "chmod 755 mkcert" ).ok # giddyup @@ -353,7 +354,7 @@ def _boot_with_cmdline( # and NoCloud operates in network timeframe After=systemd-networkd-wait-online.service After=networking.service - Before=cloud-init.service + Before=cloud-init-network.service [Service] Type=exec @@ -427,6 +428,10 @@ def test_nocloud_ftps_unencrypted_server_fails( " a scheme of ftps://, which is not allowed. Use ftp:// " "to allow connecting to insecure ftp servers.", ], + ignore_tracebacks=[ + 'ftplib.error_perm: 500 Command "AUTH" not understood.', + "UrlError: Attempted to connect to an insecure ftp server", + ], ) def test_nocloud_ftps_encrypted_server_succeeds( diff --git a/tests/integration_tests/instances.py b/tests/integration_tests/instances.py index 32281756cd1..1c8344ab916 100644 --- a/tests/integration_tests/instances.py +++ b/tests/integration_tests/instances.py @@ -106,7 +106,9 @@ def push_file( # First push to a temporary directory because of permissions issues tmp_path = _get_tmp_path() self.instance.push_file(str(local_path), tmp_path) - assert self.execute("mv {} {}".format(tmp_path, str(remote_path))).ok + assert self.execute( + "mv {} {}".format(tmp_path, str(remote_path)) + ), f"Failed to push {tmp_path} to {remote_path}" def read_from_file(self, remote_path) -> str: result = self.execute("cat {}".format(remote_path)) @@ -294,6 +296,8 @@ def ip(self) -> str: and self.instance.execute_via_ssh ): self._ip = self.instance.ip + elif not isinstance(self.instance, LXDInstance): + self._ip = self.instance.ip except NotImplementedError: self._ip = "Unknown" return self._ip diff --git a/tests/integration_tests/modules/test_combined.py b/tests/integration_tests/modules/test_combined.py index 0bf1b3d49e8..2d8b51ee362 100644 --- a/tests/integration_tests/modules/test_combined.py +++ b/tests/integration_tests/modules/test_combined.py @@ -17,7 +17,8 @@ from pycloudlib.gce.instance import GceInstance import cloudinit.config -from cloudinit.util import is_true, should_log_deprecation +from cloudinit import lifecycle +from cloudinit.util import is_true from tests.integration_tests.decorators import retry from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.integration_settings import PLATFORM @@ -138,7 +139,7 @@ def test_deprecated_message(self, class_client: IntegrationInstance): ) # the changed_version is 22.2 in schema for user.sudo key in # user-data. Pass 22.2 in against the client's version_boundary. - if should_log_deprecation("22.2", version_boundary): + if lifecycle.should_log_deprecation("22.2", version_boundary): log_level = "DEPRECATED" deprecation_count = 2 else: diff --git a/tests/integration_tests/modules/test_hotplug.py b/tests/integration_tests/modules/test_hotplug.py index 8c7bc7839d0..c088240de1a 100644 --- a/tests/integration_tests/modules/test_hotplug.py +++ b/tests/integration_tests/modules/test_hotplug.py @@ -301,6 +301,7 @@ def test_multi_nic_hotplug(setup_image, session_cloud: IntegrationCloud): @pytest.mark.skipif(CURRENT_RELEASE <= FOCAL, reason="See LP: #2055397") @pytest.mark.skipif(PLATFORM != "ec2", reason="test is ec2 specific") +@pytest.mark.skip(reason="IMDS race, see GH-5373. Unskip when fixed.") def test_multi_nic_hotplug_vpc(setup_image, session_cloud: IntegrationCloud): """Tests that additional secondary NICs are routable from local networks after the hotplug hook is executed when network updates diff --git a/tests/integration_tests/modules/test_ubuntu_pro.py b/tests/integration_tests/modules/test_ubuntu_pro.py index f4438163425..0f0cb944aec 100644 --- a/tests/integration_tests/modules/test_ubuntu_pro.py +++ b/tests/integration_tests/modules/test_ubuntu_pro.py @@ -5,7 +5,7 @@ import pytest from pycloudlib.cloud import ImageType -from cloudinit.util import should_log_deprecation +from cloudinit import lifecycle from tests.integration_tests.clouds import IntegrationCloud from tests.integration_tests.conftest import get_validated_source from tests.integration_tests.instances import ( @@ -143,7 +143,7 @@ def test_valid_token(self, client: IntegrationInstance): client, "DEPRECATION_INFO_BOUNDARY" ) # ubuntu_advantage key is deprecated in version 24.1 - if should_log_deprecation("24.1", version_boundary): + if lifecycle.should_log_deprecation("24.1", version_boundary): log_level = "DEPRECATED" else: log_level = "INFO" diff --git a/tests/integration_tests/test_instance_id.py b/tests/integration_tests/test_instance_id.py new file mode 100644 index 00000000000..dc2fbb0f00a --- /dev/null +++ b/tests/integration_tests/test_instance_id.py @@ -0,0 +1,98 @@ +from typing import cast + +import pytest +from pycloudlib.lxd.instance import LXDInstance + +from cloudinit import subp +from tests.integration_tests.instances import IntegrationInstance +from tests.integration_tests.integration_settings import PLATFORM +from tests.integration_tests.releases import CURRENT_RELEASE, FOCAL + +_INSTANCE_ID = 0 + + +def setup_meta_data(instance: LXDInstance): + """Increment the instance id and apply it to the instance.""" + global _INSTANCE_ID + _INSTANCE_ID += 1 + command = [ + "lxc", + "config", + "set", + instance.name, + f"user.meta-data=instance-id: test_{_INSTANCE_ID}", + ] + subp.subp(command) + + +# class TestInstanceID: +@pytest.mark.skipif( + PLATFORM not in ["lxd_container", "lxd_vm"] or CURRENT_RELEASE == FOCAL, + reason="Uses lxd-specific behavior.", +) +@pytest.mark.lxd_setup.with_args(setup_meta_data) +@pytest.mark.lxd_use_exec +def test_instance_id_changes(client: IntegrationInstance): + """Verify instance id change behavior + + If the id from the datasource changes, cloud-init should update the + instance id link. + """ + client.execute("cloud-init status --wait") + # check that instance id is the one we set + assert ( + "test_1" + == client.execute("cloud-init query instance-id").stdout.rstrip() + ) + assert ( + "/var/lib/cloud/instances/test_1" + == client.execute( + "readlink -f /var/lib/cloud/instance" + ).stdout.rstrip() + ) + + instance = cast(LXDInstance, client.instance) + setup_meta_data(instance) + client.restart() + client.execute("cloud-init status --wait") + # check that instance id is the one we reset + assert ( + "test_2" + == client.execute("cloud-init query instance-id").stdout.rstrip() + ) + assert ( + "/var/lib/cloud/instances/test_2" + == client.execute( + "readlink -f /var/lib/cloud/instance" + ).stdout.rstrip() + ) + + +@pytest.mark.lxd_use_exec +def test_instance_id_no_changes(client: IntegrationInstance): + """Verify instance id no change behavior + + If the id from the datasource does not change, cloud-init should not + update the instance id link. + """ + instance_id = client.execute( + "cloud-init query instance-id" + ).stdout.rstrip() + assert ( + f"/var/lib/cloud/instances/{instance_id}" + == client.execute( + "readlink -f /var/lib/cloud/instance" + ).stdout.rstrip() + ) + client.restart() + client.execute("cloud-init status --wait") + assert ( + instance_id + == client.execute("cloud-init query instance-id").stdout.rstrip() + ) + assert ( + f"/var/lib/cloud/instances/{instance_id}" + == client.execute( + "readlink -f /var/lib/cloud/instance" + ).stdout.rstrip() + ) diff --git a/tests/integration_tests/test_kernel_command_line_match.py b/tests/integration_tests/test_kernel_command_line_match.py index 60bda90726c..57abf513ecc 100644 --- a/tests/integration_tests/test_kernel_command_line_match.py +++ b/tests/integration_tests/test_kernel_command_line_match.py @@ -22,7 +22,7 @@ ( ( "ds=nocloud;s=http://my-url/;h=hostname", - "DataSourceNoCloud [seed=None][dsmode=net]", + "DataSourceNoCloud", True, ), ("ci.ds=openstack", "DataSourceOpenStack", True), @@ -49,17 +49,14 @@ def test_lxd_datasource_kernel_override( override_kernel_command_line(ds_str, client) if cmdline_configured: assert ( - "Machine is configured by the kernel command line to run on single" + "Kernel command line set to use a single" f" datasource {configured}" ) in client.execute("cat /var/log/cloud-init.log") else: # verify that no plat log = client.execute("cat /var/log/cloud-init.log") - assert (f"Detected platform: {configured}") in log - assert ( - "Machine is configured by the kernel " - "command line to run on single " - ) not in log + assert f"Detected {configured}" in log + assert "Kernel command line set to use a single" not in log GH_REPO_PATH = "https://raw.githubusercontent.com/canonical/cloud-init/main/" @@ -107,8 +104,8 @@ def test_lxd_datasource_kernel_override_nocloud_net( ) assert url_val in client.execute("cloud-init query subplatform").stdout assert ( - "Detected platform: DataSourceNoCloudNet [seed=None]" - "[dsmode=net]. Checking for active instance data" + "Detected platform: DataSourceNoCloudNet. Checking for active" + "instance data" ) in logs diff --git a/tests/integration_tests/test_upgrade.py b/tests/integration_tests/test_upgrade.py index 970a2406d8a..0a53eabb50e 100644 --- a/tests/integration_tests/test_upgrade.py +++ b/tests/integration_tests/test_upgrade.py @@ -14,7 +14,7 @@ IS_UBUNTU, MANTIC, ) -from tests.integration_tests.util import verify_clean_log +from tests.integration_tests.util import verify_clean_boot, verify_clean_log LOG = logging.getLogger("integration_testing.test_upgrade") @@ -81,11 +81,8 @@ def test_clean_boot_of_upgraded_package(session_cloud: IntegrationCloud): pre_cloud_blame = instance.execute("cloud-init analyze blame") # Ensure no issues pre-upgrade - log = instance.read_from_file("/var/log/cloud-init.log") - assert not json.loads(pre_result)["v1"]["errors"] - try: - verify_clean_log(log) + verify_clean_boot(instance) except AssertionError: LOG.warning( "There were errors/warnings/tracebacks pre-upgrade. " @@ -122,10 +119,7 @@ def test_clean_boot_of_upgraded_package(session_cloud: IntegrationCloud): post_cloud_blame = instance.execute("cloud-init analyze blame") # Ensure no issues post-upgrade - assert not json.loads(pre_result)["v1"]["errors"] - - log = instance.read_from_file("/var/log/cloud-init.log") - verify_clean_log(log) + verify_clean_boot(instance) # Ensure important things stayed the same assert pre_hostname == post_hostname diff --git a/tests/integration_tests/util.py b/tests/integration_tests/util.py index d218861f549..4830cf958de 100644 --- a/tests/integration_tests/util.py +++ b/tests/integration_tests/util.py @@ -14,6 +14,7 @@ import pytest from cloudinit.subp import subp +from tests.integration_tests.integration_settings import PLATFORM LOG = logging.getLogger("integration_testing.util") @@ -65,12 +66,14 @@ def verify_clean_boot( instance: "IntegrationInstance", ignore_warnings: Optional[Union[List[str], bool]] = None, ignore_errors: Optional[Union[List[str], bool]] = None, + ignore_tracebacks: Optional[Union[List[str], bool]] = None, require_warnings: Optional[list] = None, require_errors: Optional[list] = None, ): """raise assertions if the client experienced unexpected warnings or errors - fail when an required error isn't found + Fail when a required error isn't found. + Expected warnings and errors are defined in this function. This function is similar to verify_clean_log, hence the similar name. @@ -89,6 +92,80 @@ def verify_clean_boot( require_errors: Optional[list] = None, fail_when_expected_not_found: optional list of expected errors """ + + def append_or_create_list( + maybe_list: Optional[Union[List[str], bool]], value: str + ) -> Optional[Union[List[str], bool]]: + """handle multiple types""" + if isinstance(maybe_list, list): + maybe_list.append(value) + elif maybe_list is True: + return True # Ignoring all texts, so no need to append. + elif maybe_list in (None, False): + maybe_list = [value] + return maybe_list + + traceback_texts = [] + # Define exceptions by matrix of platform and Ubuntu release + if "azure" == PLATFORM: + # Consistently on all Azure launches: + ignore_warnings = append_or_create_list( + ignore_warnings, "No lease found; using default endpoint" + ) + elif "lxd_vm" == PLATFORM: + # Ubuntu lxd storage + ignore_warnings = append_or_create_list( + ignore_warnings, "thinpool by default on Ubuntu due to LP #1982780" + ) + ignore_warnings = append_or_create_list( + ignore_warnings, + "Could not match supplied host pattern, ignoring:", + ) + elif "oracle" == PLATFORM: + # LP: #1842752 + ignore_errors = append_or_create_list( + ignore_warnings, "Stderr: RTNETLINK answers: File exists" + ) + traceback_texts.append("Stderr: RTNETLINK answers: File exists") + # LP: #1833446 + ignore_warnings = append_or_create_list( + ignore_warnings, + "UrlError: 404 Client Error: Not Found for url: " + "http://169.254.169.254/latest/meta-data/", + ) + traceback_texts.append( + "UrlError: 404 Client Error: Not Found for url: " + "http://169.254.169.254/latest/meta-data/" + ) + # Oracle has a file in /etc/cloud/cloud.cfg.d that contains + # users: + # - default + # - name: opc + # ssh_redirect_user: true + # This can trigger a warning about opc having no public key + ignore_warnings = append_or_create_list( + ignore_warnings, + "Unable to disable SSH logins for opc given ssh_redirect_user", + ) + + _verify_clean_boot( + instance, + ignore_warnings=ignore_warnings, + ignore_errors=ignore_errors, + ignore_tracebacks=ignore_tracebacks, + require_warnings=require_warnings, + require_errors=require_errors, + ) + + +def _verify_clean_boot( + instance: "IntegrationInstance", + ignore_warnings: Optional[Union[List[str], bool]] = None, + ignore_errors: Optional[Union[List[str], bool]] = None, + ignore_tracebacks: Optional[Union[List[str], bool]] = None, + require_warnings: Optional[list] = None, + require_errors: Optional[list] = None, +): ignore_errors = ignore_errors or [] ignore_warnings = ignore_warnings or [] require_errors = require_errors or [] @@ -108,9 +185,9 @@ def verify_clean_boot( if expected in current_error: required_errors_found.add(expected) - # check for unexpected errors if ignore_errors is True: continue + # check for unexpected errors for expected in [*ignore_errors, *require_errors]: if expected in current_error: break @@ -125,9 +202,9 @@ def verify_clean_boot( if expected in current_warning: required_warnings_found.add(expected) - # check for unexpected warnings if ignore_warnings is True: continue + # check for unexpected warnings for expected in [*ignore_warnings, *require_warnings]: if expected in current_warning: break @@ -168,6 +245,28 @@ def verify_clean_boot( ) assert not errors, message + if ignore_tracebacks is True: + return + # assert no unexpected Tracebacks + expected_traceback_count = 0 + traceback_count = int( + instance.execute( + "grep --count Traceback /var/log/cloud-init.log" + ).stdout.strip() + ) + if ignore_tracebacks: + for expected_traceback in ignore_tracebacks: + expected_traceback_count += int( + instance.execute( + f"grep --count '{expected_traceback}'" + " /var/log/cloud-init.log" + ).stdout.strip() + ) + assert expected_traceback_count == traceback_count, ( + f"{traceback_count - expected_traceback_count} unexpected traceback(s)" + " found in /var/log/cloud-init.log" + ) + def verify_clean_log(log: str, ignore_deprecations: bool = True): """Assert no unexpected tracebacks or warnings in logs""" diff --git a/tests/unittests/cmd/devel/test_logs.py b/tests/unittests/cmd/devel/test_logs.py index 7dfdfac6edc..b1d9f585d30 100644 --- a/tests/unittests/cmd/devel/test_logs.py +++ b/tests/unittests/cmd/devel/test_logs.py @@ -4,7 +4,6 @@ import os import re from datetime import datetime -from io import StringIO import pytest @@ -21,22 +20,19 @@ @mock.patch("cloudinit.cmd.devel.logs.os.getuid") class TestCollectLogs: def test_collect_logs_with_userdata_requires_root_user( - self, m_getuid, tmpdir + self, m_getuid, tmpdir, caplog ): """collect-logs errors when non-root user collects userdata .""" m_getuid.return_value = 100 # non-root output_tarfile = tmpdir.join("logs.tgz") - with mock.patch("sys.stderr", new_callable=StringIO) as m_stderr: - assert 1 == logs.collect_logs( - output_tarfile, include_userdata=True - ) + assert 1 == logs.collect_logs(output_tarfile, include_userdata=True) assert ( "To include userdata, root user is required." - " Try sudo cloud-init collect-logs\n" == m_stderr.getvalue() + " Try sudo cloud-init collect-logs" in caplog.text ) def test_collect_logs_creates_tarfile( - self, m_getuid, m_log_paths, mocker, tmpdir + self, m_getuid, m_log_paths, mocker, tmpdir, caplog ): """collect-logs creates a tarfile with all related cloud-init info.""" m_getuid.return_value = 100 @@ -101,13 +97,10 @@ def fake_subprocess_call(cmd, stdout=None, stderr=None): ) stdout.write(expected_subp[cmd_tuple]) - fake_stderr = mock.MagicMock() - mocker.patch(M_PATH + "subp", side_effect=fake_subp) mocker.patch( M_PATH + "subprocess.call", side_effect=fake_subprocess_call ) - mocker.patch(M_PATH + "sys.stderr", fake_stderr) mocker.patch(M_PATH + "INSTALLER_APPORT_FILES", []) mocker.patch(M_PATH + "INSTALLER_APPORT_SENSITIVE_FILES", []) logs.collect_logs(output_tarfile, include_userdata=False) @@ -151,10 +144,10 @@ def fake_subprocess_call(cmd, stdout=None, stderr=None): assert "results" == load_text_file( os.path.join(out_logdir, "run", "cloud-init", "results.json") ) - fake_stderr.write.assert_any_call("Wrote %s\n" % output_tarfile) + assert f"Wrote {output_tarfile}" in caplog.text def test_collect_logs_includes_optional_userdata( - self, m_getuid, mocker, tmpdir, m_log_paths + self, m_getuid, mocker, tmpdir, m_log_paths, caplog ): """collect-logs include userdata when --include-userdata is set.""" m_getuid.return_value = 0 @@ -215,13 +208,10 @@ def fake_subprocess_call(cmd, stdout=None, stderr=None): ) stdout.write(expected_subp[cmd_tuple]) - fake_stderr = mock.MagicMock() - mocker.patch(M_PATH + "subp", side_effect=fake_subp) mocker.patch( M_PATH + "subprocess.call", side_effect=fake_subprocess_call ) - mocker.patch(M_PATH + "sys.stderr", fake_stderr) mocker.patch(M_PATH + "INSTALLER_APPORT_FILES", []) mocker.patch(M_PATH + "INSTALLER_APPORT_SENSITIVE_FILES", []) logs.collect_logs(output_tarfile, include_userdata=True) @@ -239,7 +229,7 @@ def fake_subprocess_call(cmd, stdout=None, stderr=None): m_log_paths.instance_data_sensitive.name, ) ) - fake_stderr.write.assert_any_call("Wrote %s\n" % output_tarfile) + assert f"Wrote {output_tarfile}" in caplog.text @pytest.mark.parametrize( "cmd, expected_file_contents, expected_return_value", @@ -266,19 +256,18 @@ def fake_subprocess_call(cmd, stdout=None, stderr=None): def test_write_command_output_to_file( self, m_getuid, - tmpdir, + tmp_path, cmd, expected_file_contents, expected_return_value, ): m_getuid.return_value = 100 - output_file = tmpdir.join("test-output-file.txt") + output_file = tmp_path / "test-output-file.txt" return_output = logs._write_command_output_to_file( - filename=output_file, cmd=cmd, + file_path=output_file, msg="", - verbosity=1, ) assert expected_return_value == return_output @@ -292,16 +281,15 @@ def test_write_command_output_to_file( ], ) def test_stream_command_output_to_file( - self, m_getuid, tmpdir, cmd, expected_file_contents + self, m_getuid, tmp_path, cmd, expected_file_contents ): m_getuid.return_value = 100 - output_file = tmpdir.join("test-output-file.txt") + output_file = tmp_path / "test-output-file.txt" logs._stream_command_output_to_file( - filename=output_file, cmd=cmd, + file_path=output_file, msg="", - verbosity=1, ) assert expected_file_contents == load_text_file(output_file) @@ -382,7 +370,6 @@ def test_include_installer_logs_when_present( logs._collect_installer_logs( log_dir=tmpdir.strpath, include_userdata=include_userdata, - verbosity=0, ) expect_userdata = bool(include_userdata and apport_sensitive_files) # when subiquity artifacts exist, and userdata set true, expect logs diff --git a/tests/unittests/cmd/test_main.py b/tests/unittests/cmd/test_main.py index f9b3faab130..bad728f2a72 100644 --- a/tests/unittests/cmd/test_main.py +++ b/tests/unittests/cmd/test_main.py @@ -13,7 +13,9 @@ from cloudinit.util import ensure_dir, load_text_file, write_file from tests.unittests.helpers import FilesystemMockingTestCase, wrap_and_call -MyArgs = namedtuple("MyArgs", "debug files force local reporter subcommand") +MyArgs = namedtuple( + "MyArgs", "debug files force local reporter subcommand skip_log_setup" +) class TestMain(FilesystemMockingTestCase): @@ -76,6 +78,7 @@ def test_main_init_run_net_runs_modules(self): local=False, reporter=None, subcommand="init", + skip_log_setup=False, ) (_item1, item2) = wrap_and_call( "cloudinit.cmd.main", @@ -122,6 +125,7 @@ def test_main_init_run_net_calls_set_hostname_when_metadata_present(self): local=False, reporter=None, subcommand="init", + skip_log_setup=False, ) def set_hostname(name, cfg, cloud, args): diff --git a/tests/unittests/config/test_cc_ansible.py b/tests/unittests/config/test_cc_ansible.py index 271d9d037ec..b5b25a64286 100644 --- a/tests/unittests/config/test_cc_ansible.py +++ b/tests/unittests/config/test_cc_ansible.py @@ -7,7 +7,7 @@ from pytest import mark, param, raises -from cloudinit import util +from cloudinit import lifecycle from cloudinit.config import cc_ansible from cloudinit.config.schema import ( SchemaValidationError, @@ -292,7 +292,7 @@ def test_required_keys(self, cfg, exception, mocker): mocker.patch(M_PATH + "AnsiblePull.check_deps") mocker.patch( M_PATH + "AnsiblePull.get_version", - return_value=cc_ansible.Version(2, 7, 1), + return_value=cc_ansible.lifecycle.Version(2, 7, 1), ) mocker.patch( M_PATH + "AnsiblePullDistro.is_installed", @@ -415,7 +415,7 @@ def test_parse_version_distro(self, m_subp): """Verify that the expected version is returned""" assert cc_ansible.AnsiblePullDistro( get_cloud().distro - ).get_version() == util.Version(2, 10, 8) + ).get_version() == lifecycle.Version(2, 10, 8) @mock.patch("cloudinit.subp.subp", side_effect=[(pip_version, "")]) def test_parse_version_pip(self, m_subp): @@ -424,7 +424,7 @@ def test_parse_version_pip(self, m_subp): distro.do_as = MagicMock(return_value=(pip_version, "")) pip = cc_ansible.AnsiblePullPip(distro, "root") received = pip.get_version() - expected = util.Version(2, 13, 2) + expected = lifecycle.Version(2, 13, 2) assert received == expected @mock.patch(M_PATH + "subp.subp", return_value=("stdout", "stderr")) diff --git a/tests/unittests/config/test_cc_mounts.py b/tests/unittests/config/test_cc_mounts.py index 4795357c039..7e85987b744 100644 --- a/tests/unittests/config/test_cc_mounts.py +++ b/tests/unittests/config/test_cc_mounts.py @@ -1,8 +1,10 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init import math import os.path import re +import textwrap from collections import namedtuple from unittest import mock @@ -27,193 +29,176 @@ M_PATH = "cloudinit.config.cc_mounts." -class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase): - def setUp(self): - super(TestSanitizeDevname, self).setUp() - self.new_root = self.tmp_dir() - self.patchOS(self.new_root) - - def _touch(self, path): - path = os.path.join(self.new_root, path.lstrip("/")) +class TestSanitizeDevname: + def _touch(self, path, new_root): + path = os.path.join(new_root, path.lstrip("/")) basedir = os.path.dirname(path) if not os.path.exists(basedir): os.makedirs(basedir) open(path, "a").close() - def _makedirs(self, directory): - directory = os.path.join(self.new_root, directory.lstrip("/")) + def _makedirs(self, directory, new_root): + directory = os.path.join(new_root, directory.lstrip("/")) if not os.path.exists(directory): os.makedirs(directory) - def mock_existence_of_disk(self, disk_path): - self._touch(disk_path) - self._makedirs(os.path.join("/sys/block", disk_path.split("/")[-1])) + def mock_existence_of_disk(self, disk_path, new_root): + self._touch(disk_path, new_root) + self._makedirs( + os.path.join("/sys/block", disk_path.split("/")[-1]), new_root + ) - def mock_existence_of_partition(self, disk_path, partition_number): - self.mock_existence_of_disk(disk_path) - self._touch(disk_path + str(partition_number)) + def mock_existence_of_partition( + self, disk_path, partition_number, new_root + ): + self.mock_existence_of_disk(disk_path, new_root) + self._touch(disk_path + str(partition_number), new_root) disk_name = disk_path.split("/")[-1] self._makedirs( os.path.join( "/sys/block", disk_name, disk_name + str(partition_number) - ) + ), + new_root, ) - def test_existent_full_disk_path_is_returned(self): + def test_existent_full_disk_path_is_returned(self, fake_filesystem): disk_path = "/dev/sda" - self.mock_existence_of_disk(disk_path) - self.assertEqual( - disk_path, - cc_mounts.sanitize_devname(disk_path, lambda x: None), + self.mock_existence_of_disk(disk_path, fake_filesystem) + assert disk_path == cc_mounts.sanitize_devname( + disk_path, lambda x: None ) - def test_existent_disk_name_returns_full_path(self): + def test_existent_disk_name_returns_full_path(self, fake_filesystem): disk_name = "sda" disk_path = "/dev/" + disk_name - self.mock_existence_of_disk(disk_path) - self.assertEqual( - disk_path, - cc_mounts.sanitize_devname(disk_name, lambda x: None), + self.mock_existence_of_disk(disk_path, fake_filesystem) + assert disk_path == cc_mounts.sanitize_devname( + disk_name, lambda x: None ) - def test_existent_meta_disk_is_returned(self): + def test_existent_meta_disk_is_returned(self, fake_filesystem): actual_disk_path = "/dev/sda" - self.mock_existence_of_disk(actual_disk_path) - self.assertEqual( - actual_disk_path, - cc_mounts.sanitize_devname( - "ephemeral0", - lambda x: actual_disk_path, - ), + self.mock_existence_of_disk(actual_disk_path, fake_filesystem) + assert actual_disk_path == cc_mounts.sanitize_devname( + "ephemeral0", + lambda x: actual_disk_path, ) - def test_existent_meta_partition_is_returned(self): + def test_existent_meta_partition_is_returned(self, fake_filesystem): disk_name, partition_part = "/dev/sda", "1" actual_partition_path = disk_name + partition_part - self.mock_existence_of_partition(disk_name, partition_part) - self.assertEqual( - actual_partition_path, - cc_mounts.sanitize_devname( - "ephemeral0.1", - lambda x: disk_name, - ), + self.mock_existence_of_partition( + disk_name, partition_part, fake_filesystem + ) + assert actual_partition_path == cc_mounts.sanitize_devname( + "ephemeral0.1", + lambda x: disk_name, ) - def test_existent_meta_partition_with_p_is_returned(self): + def test_existent_meta_partition_with_p_is_returned(self, fake_filesystem): disk_name, partition_part = "/dev/sda", "p1" actual_partition_path = disk_name + partition_part - self.mock_existence_of_partition(disk_name, partition_part) - self.assertEqual( - actual_partition_path, - cc_mounts.sanitize_devname( - "ephemeral0.1", - lambda x: disk_name, - ), + self.mock_existence_of_partition( + disk_name, partition_part, fake_filesystem + ) + assert actual_partition_path == cc_mounts.sanitize_devname( + "ephemeral0.1", + lambda x: disk_name, ) - def test_first_partition_returned_if_existent_disk_is_partitioned(self): + def test_first_partition_returned_if_existent_disk_is_partitioned( + self, fake_filesystem + ): disk_name, partition_part = "/dev/sda", "1" actual_partition_path = disk_name + partition_part - self.mock_existence_of_partition(disk_name, partition_part) - self.assertEqual( - actual_partition_path, - cc_mounts.sanitize_devname( - "ephemeral0", - lambda x: disk_name, - ), + self.mock_existence_of_partition( + disk_name, partition_part, fake_filesystem + ) + assert actual_partition_path == cc_mounts.sanitize_devname( + "ephemeral0", + lambda x: disk_name, ) - def test_nth_partition_returned_if_requested(self): + def test_nth_partition_returned_if_requested(self, fake_filesystem): disk_name, partition_part = "/dev/sda", "3" actual_partition_path = disk_name + partition_part - self.mock_existence_of_partition(disk_name, partition_part) - self.assertEqual( - actual_partition_path, - cc_mounts.sanitize_devname( - "ephemeral0.3", - lambda x: disk_name, - ), + self.mock_existence_of_partition( + disk_name, partition_part, fake_filesystem + ) + assert actual_partition_path == cc_mounts.sanitize_devname( + "ephemeral0.3", + lambda x: disk_name, ) - def test_transformer_returning_none_returns_none(self): - self.assertIsNone( + def test_transformer_returning_none_returns_none(self, fake_filesystem): + assert ( cc_mounts.sanitize_devname( "ephemeral0", lambda x: None, ) + is None ) - def test_missing_device_returns_none(self): - self.assertIsNone( + def test_missing_device_returns_none(self, fake_filesystem): + assert ( cc_mounts.sanitize_devname( "/dev/sda", None, ) + is None ) - def test_missing_sys_returns_none(self): + def test_missing_sys_returns_none(self, fake_filesystem): disk_path = "/dev/sda" - self._makedirs(disk_path) - self.assertIsNone( + self._makedirs(disk_path, fake_filesystem) + assert ( cc_mounts.sanitize_devname( disk_path, None, ) + is None ) - def test_existent_disk_but_missing_partition_returns_none(self): + def test_existent_disk_but_missing_partition_returns_none( + self, fake_filesystem + ): disk_path = "/dev/sda" - self.mock_existence_of_disk(disk_path) - self.assertIsNone( + self.mock_existence_of_disk(disk_path, fake_filesystem) + assert ( cc_mounts.sanitize_devname( "ephemeral0.1", lambda x: disk_path, ) + is None ) - def test_network_device_returns_network_device(self): + def test_network_device_returns_network_device(self, fake_filesystem): disk_path = "netdevice:/path" - self.assertEqual( + assert disk_path == cc_mounts.sanitize_devname( disk_path, - cc_mounts.sanitize_devname( - disk_path, - None, - ), + None, ) - def test_device_aliases_remapping(self): + def test_device_aliases_remapping(self, fake_filesystem): disk_path = "/dev/sda" - self.mock_existence_of_disk(disk_path) - self.assertEqual( - disk_path, - cc_mounts.sanitize_devname( - "mydata", lambda x: None, {"mydata": disk_path} - ), + self.mock_existence_of_disk(disk_path, fake_filesystem) + assert disk_path == cc_mounts.sanitize_devname( + "mydata", lambda x: None, {"mydata": disk_path} ) -class TestSwapFileCreation(test_helpers.FilesystemMockingTestCase): - def setUp(self): - super(TestSwapFileCreation, self).setUp() - self.new_root = self.tmp_dir() - self.patchOS(self.new_root) - - self.fstab_path = os.path.join(self.new_root, "etc/fstab") - self.swap_path = os.path.join(self.new_root, "swap.img") +class TestSwapFileCreation: + @pytest.fixture(autouse=True) + def setup(self, mocker, fake_filesystem: str): + self.new_root = fake_filesystem + self.swap_path = os.path.join(fake_filesystem, "swap.img") + fstab_path = os.path.join(fake_filesystem, "etc/fstab") self._makedirs("/etc") - self.add_patch( - "cloudinit.config.cc_mounts.FSTAB_PATH", - "mock_fstab_path", - self.fstab_path, - autospec=False, - ) - - self.add_patch("cloudinit.config.cc_mounts.subp.subp", "m_subp_subp") - - self.add_patch( - "cloudinit.config.cc_mounts.util.mounts", - "mock_util_mounts", + self.m_fstab = mocker.patch(f"{M_PATH}FSTAB_PATH", fstab_path) + self.m_subp = mocker.patch(f"{M_PATH}subp.subp") + self.m_mounts = mocker.patch( + f"{M_PATH}util.mounts", return_value={ "/dev/sda1": { "fstype": "ext4", @@ -257,7 +242,7 @@ def test_swap_creation_method_fallocate_on_xfs( m_get_mount_info.return_value = ["", "xfs"] cc_mounts.handle(None, self.cc, self.mock_cloud, []) - self.m_subp_subp.assert_has_calls( + self.m_subp.assert_has_calls( [ mock.call( ["fallocate", "-l", "0M", self.swap_path], capture=True @@ -276,7 +261,7 @@ def test_swap_creation_method_xfs( m_get_mount_info.return_value = ["", "xfs"] cc_mounts.handle(None, self.cc, self.mock_cloud, []) - self.m_subp_subp.assert_has_calls( + self.m_subp.assert_has_calls( [ mock.call( [ @@ -302,7 +287,7 @@ def test_swap_creation_method_btrfs( m_get_mount_info.return_value = ["", "btrfs"] cc_mounts.handle(None, self.cc, self.mock_cloud, []) - self.m_subp_subp.assert_has_calls( + self.m_subp.assert_has_calls( [ mock.call(["truncate", "-s", "0", self.swap_path]), mock.call(["chattr", "+C", self.swap_path]), @@ -324,7 +309,7 @@ def test_swap_creation_method_ext4( m_get_mount_info.return_value = ["", "ext4"] cc_mounts.handle(None, self.cc, self.mock_cloud, []) - self.m_subp_subp.assert_has_calls( + self.m_subp.assert_has_calls( [ mock.call( ["fallocate", "-l", "0M", self.swap_path], capture=True @@ -335,35 +320,20 @@ def test_swap_creation_method_ext4( ) -class TestFstabHandling(test_helpers.FilesystemMockingTestCase): +class TestFstabHandling: swap_path = "/dev/sdb1" - def setUp(self): - super(TestFstabHandling, self).setUp() - self.new_root = self.tmp_dir() - self.patchOS(self.new_root) + @pytest.fixture(autouse=True) + def setup(self, mocker, fake_filesystem: str): + self.new_root = fake_filesystem self.fstab_path = os.path.join(self.new_root, "etc/fstab") self._makedirs("/etc") - self.add_patch( - "cloudinit.config.cc_mounts.FSTAB_PATH", - "mock_fstab_path", - self.fstab_path, - autospec=False, - ) - - self.add_patch( - "cloudinit.config.cc_mounts._is_block_device", - "mock_is_block_device", - return_value=True, - ) - - self.add_patch("cloudinit.config.cc_mounts.subp.subp", "m_subp_subp") - - self.add_patch( - "cloudinit.config.cc_mounts.util.mounts", - "mock_util_mounts", + self.m_fstab = mocker.patch(f"{M_PATH}FSTAB_PATH", self.fstab_path) + self.m_subp = mocker.patch(f"{M_PATH}subp.subp") + self.m_mounts = mocker.patch( + f"{M_PATH}util.mounts", return_value={ "/dev/sda1": { "fstype": "ext4", @@ -373,6 +343,10 @@ def setUp(self): }, ) + self.m_is_block_device = mocker.patch( + f"{M_PATH}_is_block_device", return_value=True + ) + self.mock_cloud = mock.Mock() self.mock_log = mock.Mock() self.mock_cloud.device_name_to_device = self.device_name_to_device @@ -392,7 +366,7 @@ def device_name_to_device(self, path): def test_no_fstab(self): """Handle images which do not include an fstab.""" - self.assertFalse(os.path.exists(cc_mounts.FSTAB_PATH)) + assert not os.path.exists(cc_mounts.FSTAB_PATH) fstab_expected_content = ( "%s\tnone\tswap\tsw,comment=cloudconfig\t0\t0\n" % (self.swap_path,) @@ -400,19 +374,70 @@ def test_no_fstab(self): cc_mounts.handle(None, {}, self.mock_cloud, []) with open(cc_mounts.FSTAB_PATH, "r") as fd: fstab_new_content = fd.read() - self.assertEqual(fstab_expected_content, fstab_new_content) + assert fstab_expected_content == fstab_new_content - def test_swap_integrity(self): - """Ensure that the swap file is correctly created and can - swapon successfully. Fixing the corner case of: - kernel: swapon: swapfile has holes""" + @pytest.mark.parametrize( + "fstype, expected", + [ + ( + "btrfs", + [ + mock.call(["truncate", "-s", "0", "/swap.img"]), + mock.call(["chattr", "+C", "/swap.img"]), + mock.call( + ["fallocate", "-l", "0M", "/swap.img"], capture=True + ), + ], + ), + ( + "xfs", + [ + mock.call( + [ + "dd", + "if=/dev/zero", + "of=/swap.img", + "bs=1M", + "count=0", + ], + capture=True, + ) + ], + ), + ( + "ext4", + [ + mock.call( + ["fallocate", "-l", "0M", "/swap.img"], capture=True + ) + ], + ), + ], + ) + def test_swap_creation_command(self, fstype, expected, mocker): + """Ensure that the swap file is correctly created. + + Different filesystems require different methods. + """ + mocker.patch( + "cloudinit.util.get_mount_info", return_value=["", fstype] + ) + mocker.patch("cloudinit.util.kernel_version", return_value=(4, 17)) fstab = "/swap.img swap swap defaults 0 0\n" with open(cc_mounts.FSTAB_PATH, "w") as fd: fd.write(fstab) - cc = {"swap": ["filename: /swap.img", "size: 512", "maxsize: 512"]} + cc = { + "swap": {"filename": "/swap.img", "size": "512", "maxsize": "512"} + } cc_mounts.handle(None, cc, self.mock_cloud, []) + assert self.m_subp.call_args_list == expected + [ + mock.call(["mkswap", "/swap.img"]), + mock.call(["swapon", "-a"]), + mock.call(["mount", "-a"]), + mock.call(["systemctl", "daemon-reload"]), + ] def test_fstab_no_swap_device(self): """Ensure that cloud-init adds a discovered swap partition @@ -431,7 +456,7 @@ def test_fstab_no_swap_device(self): with open(cc_mounts.FSTAB_PATH, "r") as fd: fstab_new_content = fd.read() - self.assertEqual(fstab_expected_content, fstab_new_content) + assert fstab_expected_content == fstab_new_content def test_fstab_same_swap_device_already_configured(self): """Ensure that cloud-init will not add a swap device if the same @@ -449,7 +474,7 @@ def test_fstab_same_swap_device_already_configured(self): with open(cc_mounts.FSTAB_PATH, "r") as fd: fstab_new_content = fd.read() - self.assertEqual(fstab_expected_content, fstab_new_content) + assert fstab_expected_content == fstab_new_content def test_fstab_alternate_swap_device_already_configured(self): """Ensure that cloud-init will add a discovered swap device to @@ -470,30 +495,84 @@ def test_fstab_alternate_swap_device_already_configured(self): with open(cc_mounts.FSTAB_PATH, "r") as fd: fstab_new_content = fd.read() - self.assertEqual(fstab_expected_content, fstab_new_content) + assert fstab_expected_content == fstab_new_content def test_no_change_fstab_sets_needs_mount_all(self): """verify unchanged fstab entries are mounted if not call mount -a""" - fstab_original_content = ( - "LABEL=cloudimg-rootfs / ext4 defaults 0 0\n" - "LABEL=UEFI /boot/efi vfat defaults 0 0\n" - "/dev/vdb /mnt auto defaults,noexec,comment=cloudconfig 0 2\n" - ) - fstab_expected_content = fstab_original_content + fstab_original_content = textwrap.dedent( + f""" + LABEL=cloudimg-rootfs / ext4 defaults 0 0 + LABEL=UEFI /boot/efi vfat defaults 0 0 + /dev/vdb /mnt auto defaults,noexec,comment=cloudconfig 0 2 + {self.swap_path} none swap sw,comment=cloudconfig 0 0 + """ # noqa: E501 + ).strip() cc = {"mounts": [["/dev/vdb", "/mnt", "auto", "defaults,noexec"]]} with open(cc_mounts.FSTAB_PATH, "w") as fd: fd.write(fstab_original_content) + cc_mounts.handle(None, cc, self.mock_cloud, []) with open(cc_mounts.FSTAB_PATH, "r") as fd: fstab_new_content = fd.read() - self.assertEqual(fstab_expected_content, fstab_new_content) - cc_mounts.handle(None, cc, self.mock_cloud, []) - self.m_subp_subp.assert_has_calls( + assert fstab_original_content == fstab_new_content.strip() + self.m_subp.assert_has_calls( [ mock.call(["mount", "-a"]), mock.call(["systemctl", "daemon-reload"]), ] ) + def test_fstab_mounts_combinations(self): + """Verify various combinations of mount entries in /etc/fstab.""" + # First and third lines show that even with errors we keep fstab lines + # unedited unless they contain the cloudconfig comment. + # 2nd line shows we remove a line with a cloudconfig comment that + # can be added back in with the mounts config. + # 4th line shows we remove a line with a cloudconfig comment + # indiscriminately. + fstab_original_content = ( + "LABEL=keepme none ext4 defaults 0 0\n" + "/dev/sda1 /a auto defaults,comment=cloudconfig 0 2\n" + "LABEL=UEFI\n" + "/dev/sda2 /b auto defaults,comment=cloudconfig 0 2\n" + ) + with open(cc_mounts.FSTAB_PATH, "w") as fd: + fd.write(fstab_original_content) + cfg = { + "mounts": [ + # Line that will be overridden due to later None value + ["/dev/sda3", "dontcare", "auto", "defaults", "0", "0"], + # Add the one missing default field to the end + ["/dev/sda4", "/mnt2", "auto", "nofail", "1"], + # Remove all "/dev/sda3"'s here and earlier + ["/dev/sda3", None], + # As long as we have two fields we get the rest of the defaults + ["/dev/sda5", "/mnt3"], + # Takes the place of the line that was removed from fstab + # with the cloudconfig comment + ["/dev/sda1", "/mnt", "xfs", "auto", None, "2"], + # The line that survies after previous Nones + ["/dev/sda3", "/mnt4", "btrfs"], + ] + } + cc_mounts.handle(None, cfg, self.mock_cloud, []) + with open(cc_mounts.FSTAB_PATH, "r") as fd: + fstab_new_content = fd.read() + + assert ( + fstab_new_content.strip() + == textwrap.dedent( + """ + LABEL=keepme none ext4 defaults 0 0 + LABEL=UEFI + /dev/sda4 /mnt2 auto nofail,comment=cloudconfig 1 2 + /dev/sda5 /mnt3 auto defaults,nofail,x-systemd.after=cloud-init-network.service,_netdev,comment=cloudconfig 0 2 + /dev/sda1 /mnt xfs auto,comment=cloudconfig 0 2 + /dev/sda3 /mnt4 btrfs defaults,nofail,x-systemd.after=cloud-init-network.service,_netdev,comment=cloudconfig 0 2 + /dev/sdb1 none swap sw,comment=cloudconfig 0 0 + """ # noqa: E501 + ).strip() + ) + class TestCreateSwapfile: @pytest.mark.parametrize("fstype", ("xfs", "btrfs", "ext4", "other")) diff --git a/tests/unittests/config/test_cc_ssh.py b/tests/unittests/config/test_cc_ssh.py index 49327bb67e5..a49fbf01baf 100644 --- a/tests/unittests/config/test_cc_ssh.py +++ b/tests/unittests/config/test_cc_ssh.py @@ -7,7 +7,7 @@ import pytest -from cloudinit import ssh_util, util +from cloudinit import lifecycle, ssh_util from cloudinit.config import cc_ssh from cloudinit.config.schema import ( SchemaValidationError, @@ -334,7 +334,7 @@ def test_ssh_hostkey_permissions( Otherwise, 600. """ m_gid.return_value = 10 if ssh_keys_group_exists else -1 - m_sshd_version.return_value = util.Version(sshd_version, 0) + m_sshd_version.return_value = lifecycle.Version(sshd_version, 0) key_path = cc_ssh.KEY_FILE_TPL % "rsa" cloud = get_cloud(distro="centos") cc_ssh.handle("name", {"ssh_genkeytypes": ["rsa"]}, cloud, []) diff --git a/tests/unittests/config/test_cc_ubuntu_pro.py b/tests/unittests/config/test_cc_ubuntu_pro.py index df47e7ae41e..40f8035b30d 100644 --- a/tests/unittests/config/test_cc_ubuntu_pro.py +++ b/tests/unittests/config/test_cc_ubuntu_pro.py @@ -7,7 +7,7 @@ import pytest -from cloudinit import subp +from cloudinit import lifecycle, subp from cloudinit.config.cc_ubuntu_pro import ( _attach, _auto_attach, @@ -23,7 +23,6 @@ get_schema, validate_cloudconfig_schema, ) -from cloudinit.util import Version from tests.unittests.helpers import does_not_raise, mock, skipUnlessJsonSchema from tests.unittests.util import get_cloud @@ -452,8 +451,10 @@ class TestUbuntuProSchema: # we're using a high enough version of jsonschema to not need # to skip this test. JSONSCHEMA_SKIP_REASON - if Version.from_str(getattr(jsonschema, "__version__", "999")) - < Version(4) + if lifecycle.Version.from_str( + getattr(jsonschema, "__version__", "999") + ) + < lifecycle.Version(4) else "", id="deprecation_of_ubuntu_advantage_skip_old_json", ), diff --git a/tests/unittests/config/test_cc_write_files.py b/tests/unittests/config/test_cc_write_files.py index 742f9e8cf53..ec0024971ad 100644 --- a/tests/unittests/config/test_cc_write_files.py +++ b/tests/unittests/config/test_cc_write_files.py @@ -9,6 +9,7 @@ import tempfile import pytest +import responses from cloudinit import util from cloudinit.config.cc_write_files import decode_perms, handle, write_files @@ -84,6 +85,16 @@ def test_simple(self): ) self.assertEqual(util.load_text_file(filename), expected) + def test_empty(self): + self.patchUtils(self.tmp) + filename = "/tmp/my.file" + write_files( + "test_empty", + [{"path": filename}], + self.owner, + ) + self.assertEqual(util.load_text_file(filename), "") + def test_append(self): self.patchUtils(self.tmp) existing = "hello " @@ -167,6 +178,71 @@ def test_handle_plain_text(self): "Unknown encoding type text/plain", self.logs.getvalue() ) + def test_file_uri(self): + self.patchUtils(self.tmp) + src_path = "/tmp/file-uri" + dst_path = "/tmp/file-uri-target" + content = "asdf" + util.write_file(src_path, content) + cfg = { + "write_files": [ + { + "source": {"uri": "file://" + src_path}, + "path": dst_path, + } + ] + } + cc = self.tmp_cloud("ubuntu") + handle("ignored", cfg, cc, []) + self.assertEqual( + util.load_text_file(src_path), util.load_text_file(dst_path) + ) + + @responses.activate + def test_http_uri(self): + self.patchUtils(self.tmp) + path = "/tmp/http-uri-target" + url = "http://hostname/path" + content = "more asdf" + responses.add(responses.GET, url, content) + cfg = { + "write_files": [ + { + "source": { + "uri": url, + "headers": { + "foo": "bar", + "blah": "blah", + }, + }, + "path": path, + } + ] + } + cc = self.tmp_cloud("ubuntu") + handle("ignored", cfg, cc, []) + self.assertEqual(content, util.load_text_file(path)) + + def test_uri_fallback(self): + self.patchUtils(self.tmp) + src_path = "/tmp/INVALID" + dst_path = "/tmp/uri-fallback-target" + content = "asdf" + util.del_file(src_path) + cfg = { + "write_files": [ + { + "source": {"uri": "file://" + src_path}, + "content": content, + "encoding": "text/plain", + "path": dst_path, + } + ] + } + cc = self.tmp_cloud("ubuntu") + handle("ignored", cfg, cc, []) + self.assertEqual(content, util.load_text_file(dst_path)) + def test_deferred(self): self.patchUtils(self.tmp) file_path = "/tmp/deferred.file" @@ -249,6 +325,12 @@ class TestWriteFilesSchema: "write_files": [ { "append": False, + "source": { + "uri": "http://a.com/a", + "headers": { + "Authorization": "Bearer SOME_TOKEN" + }, + }, "content": "a", "encoding": "text/plain", "owner": "jeff", diff --git a/tests/unittests/config/test_cc_yum_add_repo.py b/tests/unittests/config/test_cc_yum_add_repo.py index e6a9109ee19..c77262f508f 100644 --- a/tests/unittests/config/test_cc_yum_add_repo.py +++ b/tests/unittests/config/test_cc_yum_add_repo.py @@ -31,7 +31,8 @@ def test_bad_config(self): "yum_repos": { "epel-testing": { "name": "Extra Packages for Enterprise Linux 5 - Testing", - # At least one of baseurl or metalink must be present. + # At least one of baseurl or metalink or mirrorlist + # must be present. # Missing this should cause the repo not to be written # 'baseurl': 'http://blah.org/pub/epel/testing/5/$barch', "enabled": False, @@ -84,6 +85,43 @@ def test_metalink_config(self): for k, v in expected[section].items(): self.assertEqual(parser.get(section, k), v) + def test_mirrorlist_config(self): + cfg = { + "yum_repos": { + "epel-testing": { + "name": "Extra Packages for Enterprise Linux 5 - Testing", + "mirrorlist": "http://mirrors.blah.org/metalink?repo=rhel-$releasever", + "enabled": False, + "gpgcheck": True, + "gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL", + "failovermethod": "priority", + }, + }, + } + self.patchUtils(self.tmp) + self.patchOS(self.tmp) + cc_yum_add_repo.handle("yum_add_repo", cfg, None, []) + contents = util.load_text_file("/etc/yum.repos.d/epel-testing.repo") + parser = configparser.ConfigParser() + parser.read_string(contents) + expected = { + "epel-testing": { + "name": "Extra Packages for Enterprise Linux 5 - Testing", + "failovermethod": "priority", + "gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL", + "enabled": "0", + "mirrorlist": "http://mirrors.blah.org/metalink?repo=rhel-$releasever", + "gpgcheck": "1", + } + } + for section in expected: + self.assertTrue( + parser.has_section(section), + "Contains section {0}".format(section), + ) + for k, v in expected[section].items(): + self.assertEqual(parser.get(section, k), v) + def test_write_config(self): cfg = { "yum_repos": { diff --git a/tests/unittests/conftest.py b/tests/unittests/conftest.py index e0baa63b99b..9401f2235ef 100644 --- a/tests/unittests/conftest.py +++ b/tests/unittests/conftest.py @@ -8,7 +8,7 @@ import pytest -from cloudinit import atomic_helper, log, util +from cloudinit import atomic_helper, lifecycle, log, util from cloudinit.cmd.devel import logs from cloudinit.gpg import GPG from tests.hypothesis import HAS_HYPOTHESIS @@ -152,7 +152,7 @@ def clear_deprecation_log(): # Since deprecations are de-duped, the existance (or non-existance) of # a deprecation warning in a previous test can cause the next test to # fail. - setattr(util.deprecate, "log", set()) + setattr(lifecycle.deprecate, "log", set()) PYTEST_VERSION_TUPLE = tuple(map(int, pytest.__version__.split("."))) diff --git a/tests/unittests/distros/test_aosc.py b/tests/unittests/distros/test_aosc.py new file mode 100644 index 00000000000..e8a66b7aef2 --- /dev/null +++ b/tests/unittests/distros/test_aosc.py @@ -0,0 +1,10 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from tests.unittests.distros import _get_distro +from tests.unittests.helpers import CiTestCase + + +class TestAOSC(CiTestCase): + def test_get_distro(self): + distro = _get_distro("aosc") + self.assertEqual(distro.osfamily, "aosc") diff --git a/tests/unittests/distros/test_create_users.py b/tests/unittests/distros/test_create_users.py index 8fa7f0cc092..ebbbb418e8a 100644 --- a/tests/unittests/distros/test_create_users.py +++ b/tests/unittests/distros/test_create_users.py @@ -4,8 +4,7 @@ import pytest -from cloudinit import distros, features, ssh_util -from cloudinit.util import should_log_deprecation +from cloudinit import distros, features, lifecycle, ssh_util from tests.unittests.helpers import mock from tests.unittests.util import abstract_to_concrete @@ -145,7 +144,7 @@ def test_create_groups_with_dict_deprecated( expected_levels = ( ["WARNING", "DEPRECATED"] - if should_log_deprecation( + if lifecycle.should_log_deprecation( "23.1", features.DEPRECATION_INFO_BOUNDARY ) else ["INFO"] @@ -180,7 +179,7 @@ def test_explicit_sudo_false(self, m_subp, dist, caplog): expected_levels = ( ["WARNING", "DEPRECATED"] - if should_log_deprecation( + if lifecycle.should_log_deprecation( "22.2", features.DEPRECATION_INFO_BOUNDARY ) else ["INFO"] diff --git a/tests/unittests/net/network_configs.py b/tests/unittests/net/network_configs.py index b68319cc806..2b55bbf421a 100644 --- a/tests/unittests/net/network_configs.py +++ b/tests/unittests/net/network_configs.py @@ -3385,6 +3385,9 @@ route1=2001:67c::/32,2001:67c:1562::1 route2=3001:67c::/32,3001:67c:15::1 + [ethernet] + mtu=9000 + """ ), }, diff --git a/tests/unittests/net/test_network_manager.py b/tests/unittests/net/test_network_manager.py new file mode 100644 index 00000000000..2aa476d7d15 --- /dev/null +++ b/tests/unittests/net/test_network_manager.py @@ -0,0 +1,323 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import textwrap +from unittest import mock + +import yaml + +from cloudinit.net import network_manager, network_state +from tests.unittests.helpers import dir2dict + + +def assert_equal_dict(expected_d, found_d): + for p, c in expected_d.items(): + if p not in found_d: + continue + assert c == found_d[p] + + +class TestNetworkManagerRenderNetworkState: + def _parse_network_state_from_config(self, config): + with mock.patch("cloudinit.net.network_state.get_interfaces_by_mac"): + config = yaml.safe_load(config) + return network_state.parse_net_config_data(config) + + def test_bond_dns_baseline(self, tmpdir): + + config = textwrap.dedent( + """\ + version: 1 + config: + - mac_address: 'xx:xx:xx:xx:xx:00' + mtu: 9000 + name: ens1f0np0 + subnets: [] + type: physical + - mac_address: 'xx:xx:xx:xx:xx:01' + mtu: 9000 + name: ens1f1np1 + subnets: [] + type: physical + - bond_interfaces: + - ens1f0np0 + - ens1f1np1 + mac_address: 'xx:xx:xx:xx:xx:00' + mtu: 9000 + name: bond0 + params: + bond-miimon: 100 + bond-mode: 802.3ad + bond-xmit_hash_policy: layer3+4 + subnets: [] + type: bond + - name: bond0.123 + subnets: + - address: 0.0.0.0 + ipv4: true + netmask: 255.255.255.0 + prefix: 24 + routes: + - gateway: 0.0.0.1 + netmask: 0.0.0.0 + network: 0.0.0.0 + type: static + type: vlan + vlan_id: 123 + vlan_link: bond0 + - address: 1.1.1.1 + search: hostname1 + type: nameserver + """ + ) + + expected_config = { + "/etc/NetworkManager/system-connections/cloud-init-ens1f0np0.nmconnection": textwrap.dedent( # noqa: E501 + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init ens1f0np0 + uuid=99c4bf6c-1691-53c4-bfe8-abdcb90b278a + autoconnect-priority=120 + type=ethernet + slave-type=bond + master=54317911-f840-516b-a10d-82cb4c1f075c + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mtu=9000 + mac-address=XX:XX:XX:XX:XX:00 + + """ + ), + "/etc/NetworkManager/system-connections/cloud-init-ens1f1np1.nmconnection": textwrap.dedent( # noqa: E501 + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init ens1f1np1 + uuid=2685ec2b-1c26-583d-a660-0ab24201fef3 + autoconnect-priority=120 + type=ethernet + slave-type=bond + master=54317911-f840-516b-a10d-82cb4c1f075c + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mtu=9000 + mac-address=XX:XX:XX:XX:XX:01 + + """ + ), + "/etc/NetworkManager/system-connections/cloud-init-bond0.nmconnection": textwrap.dedent( # noqa: E501 + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init bond0 + uuid=54317911-f840-516b-a10d-82cb4c1f075c + autoconnect-priority=120 + type=bond + interface-name=bond0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [bond] + mode=802.3ad + + [ethernet] + mtu=9000 + + """ + ), + "/etc/NetworkManager/system-connections/cloud-init-bond0.123.nmconnection": textwrap.dedent( # noqa: E501 + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init bond0.123 + uuid=7541e7a5-450b-570b-b3e8-a7f9eced114a + autoconnect-priority=120 + type=vlan + interface-name=bond0.123 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [vlan] + id=123 + parent=54317911-f840-516b-a10d-82cb4c1f075c + + [ipv4] + method=manual + may-fail=false + address1=0.0.0.0/24 + route1=0.0.0.0/0,0.0.0.1 + dns=1.1.1.1; + dns-search=hostname1; + + """ + ), + } + with mock.patch("cloudinit.net.get_interfaces_by_mac"): + ns = self._parse_network_state_from_config(config) + target = str(tmpdir) + network_manager.Renderer().render_network_state(ns, target=target) + rendered_content = dir2dict(target) + assert_equal_dict(expected_config, rendered_content) + + def test_bond_dns_redacted_with_method_disabled(self, tmpdir): + + config = textwrap.dedent( + """\ + version: 1 + config: + - mac_address: 'xx:xx:xx:xx:xx:00' + mtu: 9000 + name: ens1f0np0 + subnets: [] + type: physical + - mac_address: 'xx:xx:xx:xx:xx:01' + mtu: 9000 + name: ens1f1np1 + subnets: [] + type: physical + - bond_interfaces: + - ens1f0np0 + - ens1f1np1 + mac_address: 'xx:xx:xx:xx:xx:00' + mtu: 9000 + name: bond0 + params: + bond-miimon: 100 + bond-mode: 802.3ad + bond-xmit_hash_policy: layer3+4 + subnets: [] + type: bond + - name: bond0.123 + subnets: + - address: 0.0.0.0 + ipv4: true + netmask: 255.255.255.0 + prefix: 24 + routes: + - gateway: 0.0.0.1 + netmask: 0.0.0.0 + network: 0.0.0.0 + type: ipv6_slaac # !! to force ipvx.method to be disabled + type: vlan + vlan_id: 123 + vlan_link: bond0 + - address: 1.1.1.1 + search: hostname1 + type: nameserver + """ + ) + + expected_config = { + "/etc/NetworkManager/system-connections/cloud-init-ens1f0np0.nmconnection": textwrap.dedent( # noqa: E501 + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init ens1f0np0 + uuid=99c4bf6c-1691-53c4-bfe8-abdcb90b278a + autoconnect-priority=120 + type=ethernet + slave-type=bond + master=54317911-f840-516b-a10d-82cb4c1f075c + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mtu=9000 + mac-address=XX:XX:XX:XX:XX:00 + + """ + ), + "/etc/NetworkManager/system-connections/cloud-init-ens1f1np1.nmconnection": textwrap.dedent( # noqa: E501 + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init ens1f1np1 + uuid=2685ec2b-1c26-583d-a660-0ab24201fef3 + autoconnect-priority=120 + type=ethernet + slave-type=bond + master=54317911-f840-516b-a10d-82cb4c1f075c + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mtu=9000 + mac-address=XX:XX:XX:XX:XX:01 + + """ + ), + "/etc/NetworkManager/system-connections/cloud-init-bond0.nmconnection": textwrap.dedent( # noqa: E501 + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init bond0 + uuid=54317911-f840-516b-a10d-82cb4c1f075c + autoconnect-priority=120 + type=bond + interface-name=bond0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [bond] + mode=802.3ad + + [ethernet] + mtu=9000 + + """ + ), + "/etc/NetworkManager/system-connections/cloud-init-bond0.123.nmconnection": textwrap.dedent( # noqa: E501 + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init bond0.123 + uuid=7541e7a5-450b-570b-b3e8-a7f9eced114a + autoconnect-priority=120 + type=vlan + interface-name=bond0.123 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [vlan] + id=123 + parent=54317911-f840-516b-a10d-82cb4c1f075c + + [ipv6] + method=auto + may-fail=false + address1=0.0.0.0/24 + dns-search=hostname1; + + [ipv4] + method=disabled + route1=0.0.0.0/0,0.0.0.1 + + """ + ), + } + with mock.patch("cloudinit.net.get_interfaces_by_mac"): + ns = self._parse_network_state_from_config(config) + target = str(tmpdir) + network_manager.Renderer().render_network_state(ns, target=target) + rendered_content = dir2dict(target) + assert_equal_dict(expected_config, rendered_content) diff --git a/tests/unittests/net/test_network_state.py b/tests/unittests/net/test_network_state.py index eaad90dc8e1..a03f60f86f8 100644 --- a/tests/unittests/net/test_network_state.py +++ b/tests/unittests/net/test_network_state.py @@ -5,7 +5,7 @@ import pytest import yaml -from cloudinit import util +from cloudinit import lifecycle from cloudinit.net import network_state from cloudinit.net.netplan import Renderer as NetplanRenderer from cloudinit.net.renderers import NAME_TO_RENDERER @@ -215,7 +215,7 @@ def test_v2_warns_deprecated_gateways( In netplan targets we perform a passthrough and the warning is not needed. """ - util.deprecate.__dict__["log"] = set() + lifecycle.deprecate.__dict__["log"] = set() ncfg = yaml.safe_load( cfg.format( gateway4="gateway4: 10.54.0.1", diff --git a/tests/unittests/sources/helpers/test_openstack.py b/tests/unittests/sources/helpers/test_openstack.py index 7ae164140a0..6ec0bd75b0d 100644 --- a/tests/unittests/sources/helpers/test_openstack.py +++ b/tests/unittests/sources/helpers/test_openstack.py @@ -231,3 +231,129 @@ def test_bond_mac(self): assert expected == openstack.convert_net_json( network_json=network_json, known_macs=macs ) + + def test_dns_servers(self): + """ + Verify additional properties under subnet.routes are not rendered + """ + network_json = { + "links": [ + { + "id": "ens1f0np0", + "name": "ens1f0np0", + "type": "phy", + "ethernet_mac_address": "xx:xx:xx:xx:xx:00", + "mtu": 9000, + }, + { + "id": "ens1f1np1", + "name": "ens1f1np1", + "type": "phy", + "ethernet_mac_address": "xx:xx:xx:xx:xx:01", + "mtu": 9000, + }, + { + "id": "bond0", + "name": "bond0", + "type": "bond", + "bond_links": ["ens1f0np0", "ens1f1np1"], + "mtu": 9000, + "ethernet_mac_address": "xx:xx:xx:xx:xx:00", + "bond_mode": "802.3ad", + "bond_xmit_hash_policy": "layer3+4", + "bond_miimon": 100, + }, + { + "id": "bond0.123", + "name": "bond0.123", + "type": "vlan", + "vlan_link": "bond0", + "vlan_id": 123, + "vlan_mac_address": "xx:xx:xx:xx:xx:00", + }, + ], + "networks": [ + { + "id": "publicnet-ipv4", + "type": "ipv4", + "link": "bond0.123", + "ip_address": "x.x.x.x", + "netmask": "255.255.255.0", + "routes": [ + { + "network": "0.0.0.0", + "netmask": "0.0.0.0", + "gateway": "x.x.x.1", + "services": [ + {"type": "dns", "address": "1.1.1.1"}, + {"type": "dns", "address": "8.8.8.8"}, + ], + } + ], + "network_id": "00000000-0000-0000-0000-000000000000", + } + ], + "services": [], + } + expected = { + "version": 1, + "config": [ + { + "name": "ens1f0np0", + "type": "physical", + "mtu": 9000, + "subnets": [], + "mac_address": "xx:xx:xx:xx:xx:00", + }, + { + "name": "ens1f1np1", + "type": "physical", + "mtu": 9000, + "subnets": [], + "mac_address": "xx:xx:xx:xx:xx:01", + }, + { + "name": "bond0", + "type": "bond", + "mtu": 9000, + "subnets": [], + "mac_address": "xx:xx:xx:xx:xx:00", + "params": { + "bond-mode": "802.3ad", + "bond-xmit_hash_policy": "layer3+4", + "bond-miimon": 100, + }, + "bond_interfaces": ["ens1f0np0", "ens1f1np1"], + }, + { + "name": "bond0.123", + "type": "vlan", + "subnets": [ + { + "type": "static", + "netmask": "255.255.255.0", + "routes": [ + { + "network": "0.0.0.0", + "netmask": "0.0.0.0", + "gateway": "x.x.x.1", + } + ], + "address": "x.x.x.x", + "dns_nameservers": ["1.1.1.1", "8.8.8.8"], + "ipv4": True, + } + ], + "vlan_id": 123, + "vlan_link": "bond0", + }, + ], + } + macs = { + "xx:xx:xx:xx:xx:00": "ens1f0np0", + "xx:xx:xx:xx:xx:01": "ens1f1np1", + } + netcfg = openstack.convert_net_json( + network_json=network_json, known_macs=macs + ) + assert expected == netcfg diff --git a/tests/unittests/sources/test_azure.py b/tests/unittests/sources/test_azure.py index b96f5c718da..40c04016d67 100644 --- a/tests/unittests/sources/test_azure.py +++ b/tests/unittests/sources/test_azure.py @@ -54,6 +54,16 @@ def mock_wrapping_setup_ephemeral_networking(azure_ds): yield m +@pytest.fixture +def mock_wrapping_report_failure(azure_ds): + with mock.patch.object( + azure_ds, + "_report_failure", + wraps=azure_ds._report_failure, + ) as m: + yield m + + @pytest.fixture def mock_azure_helper_readurl(): with mock.patch( @@ -3764,6 +3774,91 @@ def provisioning_setup( } def test_no_pps(self): + ovf = construct_ovf_env(provision_guest_proxy_agent=False) + md, ud, cfg = dsaz.read_azure_ovf(ovf) + self.mock_util_mount_cb.return_value = (md, ud, cfg, {}) + self.mock_readurl.side_effect = [ + mock.MagicMock(contents=json.dumps(self.imds_md).encode()), + ] + self.mock_azure_get_metadata_from_fabric.return_value = [] + + self.azure_ds._check_and_get_data() + + assert self.mock_subp_subp.mock_calls == [] + + assert self.mock_readurl.mock_calls == [ + mock.call( + "http://169.254.169.254/metadata/instance?" + "api-version=2021-08-01&extended=true", + timeout=30, + headers_cb=imds.headers_cb, + exception_cb=mock.ANY, + infinite=True, + log_req_resp=True, + ), + ] + + # Verify DHCP is setup once. + assert self.mock_wrapping_setup_ephemeral_networking.mock_calls == [ + mock.call(timeout_minutes=20) + ] + assert self.mock_net_dhcp_maybe_perform_dhcp_discovery.mock_calls == [ + mock.call( + self.azure_ds.distro, + None, + dsaz.dhcp_log_cb, + ) + ] + assert self.azure_ds._wireserver_endpoint == "10.11.12.13" + assert self.azure_ds._is_ephemeral_networking_up() is False + + # Verify DMI usage. + assert self.mock_dmi_read_dmi_data.mock_calls == [ + mock.call("chassis-asset-tag"), + mock.call("system-uuid"), + ] + assert ( + self.azure_ds.metadata["instance-id"] + == "50109936-ef07-47fe-ac82-890c853f60d5" + ) + + # Verify IMDS metadata. + assert self.azure_ds.metadata["imds"] == self.imds_md + + # Verify reporting ready once. + assert self.mock_azure_get_metadata_from_fabric.mock_calls == [ + mock.call( + endpoint="10.11.12.13", + distro=self.azure_ds.distro, + iso_dev="/dev/sr0", + pubkey_info=None, + ) + ] + + # Verify netlink. + assert self.mock_netlink.mock_calls == [] + + # Verify no reported_ready marker written. + assert self.wrapped_util_write_file.mock_calls == [] + assert self.patched_reported_ready_marker_path.exists() is False + + # Verify reports via KVP. + assert len(self.mock_kvp_report_failure_to_host.mock_calls) == 0 + assert len(self.mock_azure_report_failure_to_fabric.mock_calls) == 0 + assert len(self.mock_kvp_report_success_to_host.mock_calls) == 1 + + # Verify dmesg reported via KVP. + assert len(self.mock_report_dmesg_to_kvp.mock_calls) == 1 + + def test_no_pps_gpa(self): + """test full provisioning scope when azure-proxy-agent + is enabled and running.""" + self.mock_subp_subp.side_effect = [ + subp.SubpResult("Guest Proxy Agent running", ""), + ] + ovf = construct_ovf_env(provision_guest_proxy_agent=True) + md, ud, cfg = dsaz.read_azure_ovf(ovf) + self.mock_util_mount_cb.return_value = (md, ud, cfg, {}) self.mock_readurl.side_effect = [ mock.MagicMock(contents=json.dumps(self.imds_md).encode()), ] @@ -3771,6 +3866,11 @@ def test_no_pps(self): self.azure_ds._check_and_get_data() + assert self.mock_subp_subp.mock_calls == [ + mock.call( + ["azure-proxy-agent", "--status", "--wait", "120"], + ), + ] assert self.mock_readurl.mock_calls == [ mock.call( "http://169.254.169.254/metadata/instance?" @@ -3829,11 +3929,96 @@ def test_no_pps(self): # Verify reports via KVP. assert len(self.mock_kvp_report_failure_to_host.mock_calls) == 0 + assert len(self.mock_azure_report_failure_to_fabric.mock_calls) == 0 assert len(self.mock_kvp_report_success_to_host.mock_calls) == 1 # Verify dmesg reported via KVP. assert len(self.mock_report_dmesg_to_kvp.mock_calls) == 1 + def test_no_pps_gpa_fail(self): + """test full provisioning scope when azure-proxy-agent is enabled and + throwing an exception during provisioning.""" + self.mock_subp_subp.side_effect = [ + subp.ProcessExecutionError( + cmd=["failed", "azure-proxy-agent"], + stdout="test_stdout", + stderr="test_stderr", + exit_code=4, + ), + ] + ovf = construct_ovf_env(provision_guest_proxy_agent=True) + md, ud, cfg = dsaz.read_azure_ovf(ovf) + self.mock_util_mount_cb.return_value = (md, ud, cfg, {}) + self.mock_readurl.side_effect = [ + mock.MagicMock(contents=json.dumps(self.imds_md).encode()), + ] + self.mock_azure_get_metadata_from_fabric.return_value = [] + + self.azure_ds._check_and_get_data() + + assert self.mock_subp_subp.mock_calls == [ + mock.call( + ["azure-proxy-agent", "--status", "--wait", "120"], + ), + ] + assert self.mock_readurl.mock_calls == [ + mock.call( + "http://169.254.169.254/metadata/instance?" + "api-version=2021-08-01&extended=true", + timeout=30, + headers_cb=imds.headers_cb, + exception_cb=mock.ANY, + infinite=True, + log_req_resp=True, + ), + ] + + # Verify DHCP is setup once. + assert self.mock_wrapping_setup_ephemeral_networking.mock_calls == [ + mock.call(timeout_minutes=20) + ] + assert self.mock_net_dhcp_maybe_perform_dhcp_discovery.mock_calls == [ + mock.call( + self.azure_ds.distro, + None, + dsaz.dhcp_log_cb, + ) + ] + assert self.azure_ds._wireserver_endpoint == "10.11.12.13" + assert self.azure_ds._is_ephemeral_networking_up() is False + + # Verify DMI usage. + assert self.mock_dmi_read_dmi_data.mock_calls == [ + mock.call("chassis-asset-tag"), + mock.call("system-uuid"), + mock.call("system-uuid"), + ] + assert ( + self.azure_ds.metadata["instance-id"] + == "50109936-ef07-47fe-ac82-890c853f60d5" + ) + + # Verify IMDS metadata. + assert self.azure_ds.metadata["imds"] == self.imds_md + + # Verify reporting ready once. + assert self.mock_azure_get_metadata_from_fabric.mock_calls == [] + + # Verify netlink. + assert self.mock_netlink.mock_calls == [] + + # Verify no reported_ready marker written. + assert self.wrapped_util_write_file.mock_calls == [] + assert self.patched_reported_ready_marker_path.exists() is False + + # Verify reports via KVP. + assert len(self.mock_kvp_report_failure_to_host.mock_calls) == 1 + assert len(self.mock_azure_report_failure_to_fabric.mock_calls) == 1 + assert len(self.mock_kvp_report_success_to_host.mock_calls) == 0 + + # Verify dmesg reported via KVP. + assert len(self.mock_report_dmesg_to_kvp.mock_calls) == 1 + @pytest.mark.parametrize("pps_type", ["Savable", "Running"]) def test_stale_pps(self, pps_type): imds_md_source = copy.deepcopy(self.imds_md) @@ -4522,6 +4707,64 @@ def test_imds_failure_results_in_provisioning_failure(self): assert len(self.mock_kvp_report_success_to_host.mock_calls) == 0 +class TestCheckAzureProxyAgent: + @pytest.fixture(autouse=True) + def proxy_setup( + self, + azure_ds, + mock_subp_subp, + caplog, + mock_wrapping_report_failure, + mock_timestamp, + ): + self.azure_ds = azure_ds + self.mock_subp_subp = mock_subp_subp + self.caplog = caplog + self.mock_wrapping_report_failure = mock_wrapping_report_failure + self.mock_timestamp = mock_timestamp + + def test_check_azure_proxy_agent_status(self): + self.mock_subp_subp.side_effect = [ + subp.SubpResult("Guest Proxy Agent running", ""), + ] + self.azure_ds._check_azure_proxy_agent_status() + assert "Running azure-proxy-agent" in self.caplog.text + assert self.mock_wrapping_report_failure.mock_calls == [] + + def test_check_azure_proxy_agent_status_notfound(self): + exception = subp.ProcessExecutionError(reason=FileNotFoundError()) + self.mock_subp_subp.side_effect = [ + exception, + ] + self.azure_ds._check_azure_proxy_agent_status() + assert "azure-proxy-agent not found" in self.caplog.text + assert self.mock_wrapping_report_failure.mock_calls == [ + mock.call( + errors.ReportableErrorProxyAgentNotFound(), + ), + ] + + def test_check_azure_proxy_agent_status_failure(self): + exception = subp.ProcessExecutionError( + cmd=["failed", "azure-proxy-agent"], + stdout="test_stdout", + stderr="test_stderr", + exit_code=4, + ) + self.mock_subp_subp.side_effect = [ + exception, + ] + self.azure_ds._check_azure_proxy_agent_status() + assert "azure-proxy-agent status failure" in self.caplog.text + assert self.mock_wrapping_report_failure.mock_calls == [ + mock.call( + errors.ReportableErrorProxyAgentStatusFailure( + exception=exception + ), + ), + ] + + class TestGetMetadataFromImds: @pytest.mark.parametrize("route_configured_for_imds", [False, True]) @pytest.mark.parametrize("report_failure", [False, True]) diff --git a/tests/unittests/sources/test_digitalocean.py b/tests/unittests/sources/test_digitalocean.py index 34a92453335..c111e710ffc 100644 --- a/tests/unittests/sources/test_digitalocean.py +++ b/tests/unittests/sources/test_digitalocean.py @@ -165,7 +165,7 @@ def test_returns_false_not_on_docean(self, m_read_sysinfo): self.assertTrue(m_read_sysinfo.called) @mock.patch("cloudinit.sources.helpers.digitalocean.read_metadata") - @mock.patch("cloudinit.sources.util.deprecate") + @mock.patch("cloudinit.sources.lifecycle.deprecate") def test_deprecation_log_on_init(self, mock_deprecate, _mock_readmd): ds = self.get_ds() self.assertTrue(ds.get_data()) @@ -176,7 +176,7 @@ def test_deprecation_log_on_init(self, mock_deprecate, _mock_readmd): ) @mock.patch("cloudinit.sources.helpers.digitalocean.read_metadata") - @mock.patch("cloudinit.sources.util.deprecate") + @mock.patch("cloudinit.sources.lifecycle.deprecate") def test_deprecation_log_on_unpick(self, mock_deprecate, _mock_readmd): ds = self.get_ds() self.assertTrue(ds.get_data()) diff --git a/tests/unittests/sources/test_ibmcloud.py b/tests/unittests/sources/test_ibmcloud.py index bee486f4dd5..37c2594dce3 100644 --- a/tests/unittests/sources/test_ibmcloud.py +++ b/tests/unittests/sources/test_ibmcloud.py @@ -272,6 +272,8 @@ def test_template_live(self, m_platform, m_sysuuid): ) ret = ibm.read_md() + if ret is None: # this is needed for mypy - ensures ret is not None + self.fail("read_md returned None unexpectedly") self.assertEqual(ibm.Platforms.TEMPLATE_LIVE_METADATA, ret["platform"]) self.assertEqual(tmpdir, ret["source"]) self.assertEqual(self.userdata, ret["userdata"]) @@ -298,6 +300,8 @@ def test_os_code_live(self, m_platform, m_sysuuid): ) ret = ibm.read_md() + if ret is None: # this is needed for mypy - ensures ret is not None + self.fail("read_md returned None unexpectedly") self.assertEqual(ibm.Platforms.OS_CODE, ret["platform"]) self.assertEqual(tmpdir, ret["source"]) self.assertEqual(self.userdata, ret["userdata"]) @@ -320,6 +324,8 @@ def test_os_code_live_no_userdata(self, m_platform, m_sysuuid): ) ret = ibm.read_md() + if ret is None: # this is needed for mypy - ensures ret is not None + self.fail("read_md returned None unexpectedly") self.assertEqual(ibm.Platforms.OS_CODE, ret["platform"]) self.assertEqual(tmpdir, ret["source"]) self.assertIsNone(ret["userdata"]) diff --git a/tests/unittests/sources/test_nocloud.py b/tests/unittests/sources/test_nocloud.py index 15b25196db7..b98ff73c9ac 100644 --- a/tests/unittests/sources/test_nocloud.py +++ b/tests/unittests/sources/test_nocloud.py @@ -98,6 +98,29 @@ def test_nocloud_seed_dir_non_lxd_platform_is_nocloud(self, m_is_lxd): self.assertEqual(dsrc.platform_type, "nocloud") self.assertEqual(dsrc.subplatform, "seed-dir (%s)" % seed_dir) + def test_nocloud_seedfrom(self, m_is_lxd): + """Check that a seedfrom triggers detection""" + assert dsNoCloud( + sys_cfg={"datasource": {"NoCloud": {"seedfrom": "somevalue"}}}, + distro=None, + paths=self.paths, + ).ds_detect() + + def test_nocloud_user_data_meta_data(self, m_is_lxd): + """Check that meta-data and user-data trigger detection""" + assert dsNoCloud( + sys_cfg={ + "datasource": { + "NoCloud": { + "meta-data": "", + "user-data": "#cloud-config\nsome-config", + } + } + }, + distro=None, + paths=self.paths, + ).ds_detect() + def test_fs_label(self, m_is_lxd): # find_devs_with should not be called ff fs_label is None class PsuedoException(Exception): diff --git a/tests/unittests/sources/test_wsl.py b/tests/unittests/sources/test_wsl.py index 31c5c897ed5..2f26d7fd565 100644 --- a/tests/unittests/sources/test_wsl.py +++ b/tests/unittests/sources/test_wsl.py @@ -355,6 +355,8 @@ def test_get_data_sh(self, m_lsb_release, tmpdir, paths): @mock.patch("cloudinit.util.get_linux_distro") def test_data_precedence(self, m_get_linux_dist, tmpdir, paths): + """Validates the precedence of user-data files.""" + m_get_linux_dist.return_value = SAMPLE_LINUX_DISTRO # Set up basic user data: @@ -400,9 +402,17 @@ def test_data_precedence(self, m_get_linux_dist, tmpdir, paths): assert "" == shell_script - # Additionally set up some UP4W agent data: + @mock.patch("cloudinit.util.get_linux_distro") + def test_interaction_with_pro(self, m_get_linux_dist, tmpdir, paths): + """Validates the interaction of user-data and Pro For WSL agent data""" + + m_get_linux_dist.return_value = SAMPLE_LINUX_DISTRO + + user_file = tmpdir.join(".cloud-init", "ubuntu-24.04.user-data") + user_file.dirpath().mkdir() + user_file.write("#cloud-config\nwrite_files:\n- path: /etc/wsl.conf") - # Now the winner should be the merge of the agent and Landscape data. + # The winner should be the merge of the agent and user provided data. ubuntu_pro_tmp = tmpdir.join(".ubuntupro", ".cloud-init") os.makedirs(ubuntu_pro_tmp, exist_ok=True) @@ -410,9 +420,14 @@ def test_data_precedence(self, m_get_linux_dist, tmpdir, paths): agent_file.write( """#cloud-config landscape: + host: + url: landscape.canonical.com:6554 client: - account_name: agenttest -ubuntu_advantage: + account_name: agenttest + url: https://landscape.canonical.com/message-system + ping_url: https://landscape.canonical.com/ping + tags: wsl +ubuntu_pro: token: testtoken""" ) @@ -436,17 +451,93 @@ def test_data_precedence(self, m_get_linux_dist, tmpdir, paths): ) assert "wsl.conf" in userdata assert "packages" not in userdata - assert "ubuntu_advantage" in userdata + assert "ubuntu_pro" in userdata assert "landscape" in userdata assert "agenttest" in userdata - # Additionally set up some Landscape provided user data + @mock.patch("cloudinit.util.get_linux_distro") + def test_landscape_vs_local_user(self, m_get_linux_dist, tmpdir, paths): + """Validates the precendence of Landscape-provided over local data""" + + m_get_linux_dist.return_value = SAMPLE_LINUX_DISTRO + + user_file = tmpdir.join(".cloud-init", "ubuntu-24.04.user-data") + user_file.dirpath().mkdir() + user_file.write( + """#cloud-config +ubuntu_pro: + token: usertoken +package_update: true""" + ) + + ubuntu_pro_tmp = tmpdir.join(".ubuntupro", ".cloud-init") + os.makedirs(ubuntu_pro_tmp, exist_ok=True) landscape_file = ubuntu_pro_tmp.join("%s.user-data" % INSTANCE_NAME) landscape_file.write( """#cloud-config landscape: client: account_name: landscapetest + tags: tag_aiml,tag_dev +locale: en_GB.UTF-8""" + ) + + # Run the datasource + ds = wsl.DataSourceWSL( + sys_cfg=SAMPLE_CFG, + distro=_get_distro("ubuntu"), + paths=paths, + ) + + assert ds.get_data() is True + ud = ds.get_userdata() + assert ud is not None + userdata = cast( + str, + join_payloads_from_content_type( + cast(MIMEMultipart, ud), "text/cloud-config" + ), + ) + + assert ( + "locale" in userdata + and "landscapetest" in userdata + and "ubuntu_pro" not in userdata + and "package_update" not in userdata + ), "Landscape data should have overriden user provided data" + + @mock.patch("cloudinit.util.get_linux_distro") + def test_landscape_provided_data(self, m_get_linux_dist, tmpdir, paths): + """Validates the interaction of Pro For WSL agent and Landscape data""" + + m_get_linux_dist.return_value = SAMPLE_LINUX_DISTRO + + ubuntu_pro_tmp = tmpdir.join(".ubuntupro", ".cloud-init") + os.makedirs(ubuntu_pro_tmp, exist_ok=True) + + agent_file = ubuntu_pro_tmp.join("agent.yaml") + agent_file.write( + """#cloud-config +landscape: + host: + url: hosted.com:6554 + client: + account_name: agenttest + url: https://hosted.com/message-system + ping_url: https://hosted.com/ping + ssl_public_key: C:\\Users\\User\\server.pem + tags: wsl +ubuntu_pro: + token: testtoken""" + ) + + landscape_file = ubuntu_pro_tmp.join("%s.user-data" % INSTANCE_NAME) + landscape_file.write( + """#cloud-config +landscape: + client: + account_name: landscapetest + tags: tag_aiml,tag_dev package_update: true""" ) @@ -469,14 +560,176 @@ def test_data_precedence(self, m_get_linux_dist, tmpdir, paths): ), ) - assert "wsl.conf" not in userdata - assert "packages" not in userdata - assert "ubuntu_advantage" in userdata + assert "ubuntu_pro" in userdata, "Agent data should be present" assert "package_update" in userdata, ( "package_update entry should not be overriden by agent data" " nor ignored" ) - assert "landscape" in userdata assert ( "landscapetest" not in userdata and "agenttest" in userdata ), "Landscape account name should have been overriden by agent data" + # Make sure we have tags from Landscape data, not agent's + assert ( + "tag_aiml" in userdata and "tag_dev" in userdata + ), "User-data should override agent data's Landscape computer tags" + assert "wsl" not in userdata + + @mock.patch("cloudinit.util.get_linux_distro") + def test_with_landscape_no_tags(self, m_get_linux_dist, tmpdir, paths): + """Validates the Pro For WSL default Landscape tags are applied""" + + m_get_linux_dist.return_value = SAMPLE_LINUX_DISTRO + + ubuntu_pro_tmp = tmpdir.join(".ubuntupro", ".cloud-init") + os.makedirs(ubuntu_pro_tmp, exist_ok=True) + + agent_file = ubuntu_pro_tmp.join("agent.yaml") + agent_file.write( + """#cloud-config +landscape: + host: + url: landscape.canonical.com:6554 + client: + account_name: agenttest + url: https://landscape.canonical.com/message-system + ping_url: https://landscape.canonical.com/ping + tags: wsl +ubuntu_pro: + token: testtoken""" + ) + # Set up some Landscape provided user data without tags + landscape_file = ubuntu_pro_tmp.join("%s.user-data" % INSTANCE_NAME) + landscape_file.write( + """#cloud-config +landscape: + client: + account_name: landscapetest +package_update: true""" + ) + + # Run the datasource + ds = wsl.DataSourceWSL( + sys_cfg=SAMPLE_CFG, + distro=_get_distro("ubuntu"), + paths=paths, + ) + + assert ds.get_data() is True + ud = ds.get_userdata() + + assert ud is not None + userdata = cast( + str, + join_payloads_from_content_type( + cast(MIMEMultipart, ud), "text/cloud-config" + ), + ) + + assert ( + "tags: wsl" in userdata + ), "Landscape computer tags should match UP4W agent's data defaults" + + @mock.patch("cloudinit.util.get_linux_distro") + def test_with_no_tags_at_all(self, m_get_linux_dist, tmpdir, paths): + """Asserts the DS still works if there are no Landscape tags at all""" + + m_get_linux_dist.return_value = SAMPLE_LINUX_DISTRO + + user_file = tmpdir.join(".cloud-init", "ubuntu-24.04.user-data") + user_file.dirpath().mkdir() + user_file.write("#cloud-config\nwrite_files:\n- path: /etc/wsl.conf") + + ubuntu_pro_tmp = tmpdir.join(".ubuntupro", ".cloud-init") + os.makedirs(ubuntu_pro_tmp, exist_ok=True) + + agent_file = ubuntu_pro_tmp.join("agent.yaml") + # Make sure we don't crash if there are no tags anywhere. + agent_file.write( + """#cloud-config +ubuntu_pro: + token: up4w_token""" + ) + # Set up some Landscape provided user data without tags + landscape_file = ubuntu_pro_tmp.join("%s.user-data" % INSTANCE_NAME) + landscape_file.write( + """#cloud-config +landscape: + client: + account_name: landscapetest +package_update: true""" + ) + + # Run the datasource + ds = wsl.DataSourceWSL( + sys_cfg=SAMPLE_CFG, + distro=_get_distro("ubuntu"), + paths=paths, + ) + + assert ds.get_data() is True + ud = ds.get_userdata() + + assert ud is not None + userdata = cast( + str, + join_payloads_from_content_type( + cast(MIMEMultipart, ud), "text/cloud-config" + ), + ) + assert "landscapetest" in userdata + assert "up4w_token" in userdata + assert "tags" not in userdata + + @mock.patch("cloudinit.util.get_linux_distro") + def test_with_no_client_subkey(self, m_get_linux_dist, tmpdir, paths): + """Validates the DS works without the landscape.client subkey""" + + m_get_linux_dist.return_value = SAMPLE_LINUX_DISTRO + ubuntu_pro_tmp = tmpdir.join(".ubuntupro", ".cloud-init") + os.makedirs(ubuntu_pro_tmp, exist_ok=True) + + agent_file = ubuntu_pro_tmp.join("agent.yaml") + # Make sure we don't crash if there is no client subkey. + # (That would be a bug in the agent as there is no other config + # value for landscape outside of landscape.client, so I'm making up + # some non-sense keys just to make sure we won't crash) + agent_file.write( + """#cloud-config +landscape: + server: + port: 6554 +ubuntu_pro: + token: up4w_token""" + ) + + landscape_file = ubuntu_pro_tmp.join("%s.user-data" % INSTANCE_NAME) + landscape_file.write( + """#cloud-config +landscape: + client: + account_name: landscapetest +package_update: true""" + ) + # Run the datasource + ds = wsl.DataSourceWSL( + sys_cfg=SAMPLE_CFG, + distro=_get_distro("ubuntu"), + paths=paths, + ) + + assert ds.get_data() is True + ud = ds.get_userdata() + + assert ud is not None + userdata = cast( + str, + join_payloads_from_content_type( + cast(MIMEMultipart, ud), "text/cloud-config" + ), + ) + assert "landscapetest" not in userdata + assert ( + "port: 6554" in userdata + ), "agent data should override the entire landscape config." + + assert "up4w_token" in userdata diff --git a/tests/unittests/sources/vmware/test_vmware_config_file.py b/tests/unittests/sources/vmware/test_vmware_config_file.py index fd4bb481e46..c1415934141 100644 --- a/tests/unittests/sources/vmware/test_vmware_config_file.py +++ b/tests/unittests/sources/vmware/test_vmware_config_file.py @@ -241,27 +241,45 @@ def test_get_nics_list_dhcp(self): elif cfg.get("name") == nic2.get("name"): nic2.update(cfg) + # Test NIC1 self.assertEqual("physical", nic1.get("type"), "type of NIC1") self.assertEqual("NIC1", nic1.get("name"), "name of NIC1") self.assertEqual( "00:50:56:a6:8c:08", nic1.get("mac_address"), "mac address of NIC1" ) subnets = nic1.get("subnets") - self.assertEqual(1, len(subnets), "number of subnets for NIC1") - subnet = subnets[0] - self.assertEqual("dhcp", subnet.get("type"), "DHCP type for NIC1") - self.assertEqual("auto", subnet.get("control"), "NIC1 Control type") + self.assertEqual(2, len(subnets), "number of subnets for NIC1") + subnet_ipv4 = subnets[0] + self.assertEqual( + "dhcp", subnet_ipv4.get("type"), "Ipv4 DHCP type for NIC1" + ) + self.assertEqual( + "auto", subnet_ipv4.get("control"), "NIC1 Control type" + ) + subnet_ipv6 = subnets[1] + self.assertEqual( + "dhcp6", subnet_ipv6.get("type"), "Ipv6 DHCP type for NIC1" + ) + # Test NIC2 self.assertEqual("physical", nic2.get("type"), "type of NIC2") self.assertEqual("NIC2", nic2.get("name"), "name of NIC2") self.assertEqual( "00:50:56:a6:5a:de", nic2.get("mac_address"), "mac address of NIC2" ) subnets = nic2.get("subnets") - self.assertEqual(1, len(subnets), "number of subnets for NIC2") - subnet = subnets[0] - self.assertEqual("dhcp", subnet.get("type"), "DHCP type for NIC2") - self.assertEqual("auto", subnet.get("control"), "NIC2 Control type") + self.assertEqual(2, len(subnets), "number of subnets for NIC2") + subnet_ipv4 = subnets[0] + self.assertEqual( + "dhcp", subnet_ipv4.get("type"), "Ipv4 DHCP type for NIC2" + ) + self.assertEqual( + "auto", subnet_ipv4.get("control"), "NIC2 Control type" + ) + subnet_ipv6 = subnets[1] + self.assertEqual( + "dhcp6", subnet_ipv6.get("type"), "Ipv6 DHCP type for NIC2" + ) def test_get_nics_list_static(self): """Tests if NicConfigurator properly calculates network subnets @@ -286,6 +304,7 @@ def test_get_nics_list_static(self): elif cfg.get("name") == nic2.get("name"): nic2.update(cfg) + # Test NIC1 self.assertEqual("physical", nic1.get("type"), "type of NIC1") self.assertEqual("NIC1", nic1.get("name"), "name of NIC1") self.assertEqual( @@ -345,6 +364,7 @@ def test_get_nics_list_static(self): else: self.assertEqual(True, False, "invalid gateway %s" % (gateway)) + # Test NIC2 self.assertEqual("physical", nic2.get("type"), "type of NIC2") self.assertEqual("NIC2", nic2.get("name"), "name of NIC2") self.assertEqual( @@ -352,16 +372,18 @@ def test_get_nics_list_static(self): ) subnets = nic2.get("subnets") - self.assertEqual(1, len(subnets), "Number of subnets for NIC2") + self.assertEqual(2, len(subnets), "Number of subnets for NIC2") - subnet = subnets[0] - self.assertEqual("static", subnet.get("type"), "Subnet type") + subnet_ipv4 = subnets[0] + self.assertEqual("static", subnet_ipv4.get("type"), "Subnet type") self.assertEqual( - "192.168.6.102", subnet.get("address"), "Subnet address" + "192.168.6.102", subnet_ipv4.get("address"), "Subnet address" ) self.assertEqual( - "255.255.0.0", subnet.get("netmask"), "Subnet netmask" + "255.255.0.0", subnet_ipv4.get("netmask"), "Subnet netmask" ) + subnet_ipv6 = subnets[1] + self.assertEqual("dhcp6", subnet_ipv6.get("type"), "Subnet type") def test_custom_script(self): cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") @@ -448,7 +470,10 @@ def test_non_primary_nic_without_gateway(self): "type": "static", "address": "10.20.87.154", "netmask": "255.255.252.0", - } + }, + { + "type": "dhcp6", + }, ], } ], @@ -499,7 +524,10 @@ def test_non_primary_nic_with_gateway(self): "metric": 10000, } ], - } + }, + { + "type": "dhcp6", + }, ], } ], @@ -559,7 +587,10 @@ def test_cust_non_primary_nic_with_gateway_(self): "metric": 10000, } ], - } + }, + { + "type": "dhcp6", + }, ], } ], @@ -604,7 +635,10 @@ def test_a_primary_nic_with_gateway(self): "address": "10.20.87.154", "netmask": "255.255.252.0", "gateway": "10.20.87.253", - } + }, + { + "type": "dhcp6", + }, ], } ], diff --git a/tests/unittests/test_all_stages.py b/tests/unittests/test_all_stages.py new file mode 100644 index 00000000000..90bde5e1add --- /dev/null +++ b/tests/unittests/test_all_stages.py @@ -0,0 +1,208 @@ +import random +import signal +import socket +import time +from threading import Thread +from unittest import mock + +from cloudinit import socket as ci_socket + + +class Sync: + """A device to send and receive synchronization messages + + Creating an instance of the device sends a b"start" + """ + + def __init__(self, name: str, path: str): + self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) + self.sock.connect(f"{path}/share/{name}.sock") + self.sock.bind(f"{path}/share/{name}-return.sock") + self.sock.sendall(b"start") + + def receive(self): + """receive 5 bytes from the socket""" + received = self.sock.recv(4096) + self.sock.close() + return received + + +class Timeout: + """A utility which may be used to verify that a timeout occurs + + TimeoutError is raised on successful timeout. + + Create a signal handler and use signal.alarm to verify that the + timeout occured. + """ + + def handle_timeout(self, *_): + raise TimeoutError() + + def __enter__(self): + signal.signal(signal.SIGALRM, self.handle_timeout) + # 1 second is, unfortunately, the minimum + signal.alarm(1) + + def __exit__(self, *_): + signal.alarm(0) + + +def test_all_stages_times_out(tmp_path): + """Verify that no "start" makes the protocol block""" + with mock.patch.object( + ci_socket, "DEFAULT_RUN_DIR", tmp_path + ), mock.patch.object(ci_socket, "sd_notify"), mock.patch.object( + ci_socket.os, "isatty", return_value=False + ), mock.patch.object( + ci_socket.sys.stdin, "fileno" + ): + sync = ci_socket.SocketSync("first") + + try: + with Timeout(): + # this should block for 1 second + with sync("first"): + pass + except TimeoutError: + # success is a timeout + pass + else: + raise AssertionError("Expected the thing to timeout!") + + +def test_all_stages(tmp_path): + """Verify that a socket can store "start" messages + + After a socket has been been bound but before it has started listening + """ + expected = "echo 'Completed socket interaction for boot stage {}'; exit 0;" + with mock.patch.object( + ci_socket, "DEFAULT_RUN_DIR", tmp_path + ), mock.patch.object(ci_socket, "sd_notify"), mock.patch.object( + ci_socket.os, "isatty", return_value=False + ), mock.patch.object( + ci_socket.sys.stdin, "fileno" + ): + sync = ci_socket.SocketSync("first", "second", "third") + + # send all three syncs to the sockets + first = Sync("first", tmp_path) + second = Sync("second", tmp_path) + third = Sync("third", tmp_path) + + # "wait" on the first sync event + with sync("first"): + pass + + # check that the first sync returned + assert expected.format("first").encode() == first.receive() + # "wait" on the second sync event + with sync("second"): + pass + # check that the second sync returned + assert expected.format("second").encode() == second.receive() + # "wait" on the third sync event + with sync("third"): + pass + # check that the third sync returned + assert expected.format("third").encode() == third.receive() + + +def test_all_stages_threaded(tmp_path): + """Verify that arbitrary "start" order works""" + + # in milliseconds + max_sleep = 100 + # initialize random number generator + random.seed(time.time()) + expected = "echo 'Completed socket interaction for boot stage {}'; exit 0;" + sync_storage = {} + + def syncer(index: int, name: str): + """sleep for 0-100ms then send a sync notification + + this allows sync order to be arbitrary + """ + time.sleep(0.001 * random.randint(0, max_sleep)) + sync_storage[index] = Sync(name, tmp_path) + + with mock.patch.object( + ci_socket, "DEFAULT_RUN_DIR", tmp_path + ), mock.patch.object(ci_socket, "sd_notify"), mock.patch.object( + ci_socket.os, "isatty", return_value=False + ), mock.patch.object( + ci_socket.sys.stdin, "fileno" + ): + + sync = ci_socket.SocketSync( + "first", "second", "third", "fourth", "fifth" + ) + + for i, name in { + 1: "first", + 2: "second", + 3: "third", + 4: "fourth", + 5: "fifth", + }.items(): + t = Thread(target=syncer, args=(i, name)) + t.run() + + # wait on the first sync event + with sync("first"): + pass + + # check that the first sync returned + assert expected.format("first").encode() == sync_storage[1].receive() + + # wait on the second sync event + with sync("second"): + pass + + # check that the second sync returned + assert expected.format("second").encode() == sync_storage[2].receive() + + # wait on the third sync event + with sync("third"): + pass + + # check that the third sync returned + assert expected.format("third").encode() == sync_storage[3].receive() + with sync("fourth"): + pass + + # check that the fourth sync returned + assert expected.format("fourth").encode() == sync_storage[4].receive() + + with sync("fifth"): + pass + + # check that the fifth sync returned + assert expected.format("fifth").encode() == sync_storage[5].receive() + + +def test_all_stages_exception(tmp_path): + """Verify that exceptions log messages produce a valid warning message""" + with mock.patch.object( + ci_socket, "DEFAULT_RUN_DIR", tmp_path + ), mock.patch.object(ci_socket, "sd_notify"), mock.patch.object( + ci_socket.os, "isatty", return_value=False + ), mock.patch.object( + ci_socket.sys.stdin, "fileno" + ): + sync = ci_socket.SocketSync("first", "second", "third") + + # send all three syncs to the sockets + first = Sync("first", tmp_path) + + # "wait" on the first sync event + with sync("first"): + # verify that an exception in context doesn't raise + 1 / 0 # pylint: disable=W0104 + + assert ( + b"echo 'fatal error, run \"systemctl status cloud-init-main." + b'service" and "cloud-init status --long" for ' + b"more details'; exit 1;" == first.receive() + ) diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index 3a92d29e261..a7c3b1ba38b 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -160,9 +160,7 @@ def test_no_arguments_shows_usage(self, capsys): def test_no_arguments_shows_error_message(self, capsys): exit_code = self._call_main() - missing_subcommand_message = ( - "the following arguments are required: subcommand" - ) + missing_subcommand_message = "a subcommand is required" _out, err = capsys.readouterr() assert ( missing_subcommand_message in err @@ -319,7 +317,8 @@ def test_wb_schema_subcommand_parser(self, m_read_cfg, capsys): ["all"], [ "**Supported distros:** all", - "**Supported distros:** almalinux, alpine, azurelinux, " + "**Supported distros:** " + "almalinux, alpine, aosc, azurelinux, " "centos, cloudlinux, cos, debian, eurolinux, fedora, " "freebsd, mariner, miraclelinux, openbsd, openeuler, " "OpenCloudOS, openmandriva, opensuse, opensuse-microos, " diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index e71e853f314..d8f10c1ab8f 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -867,6 +867,32 @@ def test_configured_list_with_none(self): mydata["files"][cfgpath] = 'datasource_list: ["Ec2", "None"]\n' self._check_via_dict(mydata, rc=RC_FOUND, dslist=["Ec2", DS_NONE]) + def test_nocloud_seedfrom(self): + """Check seedfrom system config detects nocloud. + + Verify that a cloud.cfg.d/ that contains more than two datasources in + its datasource_list will positively identify nocloud when a + datasource.NoCloud.seedfrom value exists + """ + self._check_via_dict( + copy.deepcopy(VALID_CFG["NoCloud-seedfrom"]), + rc=RC_FOUND, + dslist=["NoCloud", DS_NONE], + ) + + def test_nocloud_userdata_and_metadata(self): + """Check seedfrom system config detects nocloud. + + Verify that a cloud.cfg.d/ that contains more than two datasources in + its datasource_list will positively identify nocloud when both + datasource.NoCloud.{user-data,meta-data} value exists + """ + self._check_via_dict( + copy.deepcopy(VALID_CFG["NoCloud-user-data-meta-data"]), + rc=RC_FOUND, + dslist=["NoCloud", DS_NONE], + ) + def test_aliyun_identified(self): """Test that Aliyun cloud is identified by product id.""" self._test_ds_found("AliYun") @@ -1964,6 +1990,41 @@ def _print_run_output(rc, out, err, cfg, files): os.path.join(P_SEED_DIR, "nocloud", "meta-data"): "md\n", }, }, + "NoCloud-seedfrom": { + "ds": "NoCloud", + "files": { + # Also include a datasource list of more than just + # [NoCloud, None], because that would automatically select + # NoCloud without checking + "etc/cloud/cloud.cfg.d/test.cfg": dedent( + """\ + datasource_list: [ Azure, OpenStack, NoCloud, None ] + datasource: + NoCloud: + seedfrom: http://0.0.0.0/test + """ + ) + }, + }, + "NoCloud-user-data-meta-data": { + "ds": "NoCloud", + "files": { + # Also include a datasource list of more than just + # [NoCloud, None], because that would automatically select + # NoCloud without checking + "etc/cloud/cloud.cfg.d/test.cfg": dedent( + """\ + datasource_list: [ Azure, OpenStack, NoCloud, None ] + datasource: + NoCloud: + meta-data: "" + user-data: | + #cloud-config + + """ + ) + }, + }, "NoCloud-seed-ubuntu-core": { "ds": "NoCloud", "files": { diff --git a/tests/unittests/test_log.py b/tests/unittests/test_log.py index 87996310349..d67c3552157 100644 --- a/tests/unittests/test_log.py +++ b/tests/unittests/test_log.py @@ -6,10 +6,11 @@ import io import logging import time +from typing import cast import pytest -from cloudinit import log, util +from cloudinit import lifecycle, log, util from cloudinit.analyze.dump import CLOUD_INIT_ASCTIME_FMT from tests.unittests.helpers import CiTestCase @@ -63,10 +64,18 @@ def test_logger_uses_gmtime(self): class TestDeprecatedLogs: def test_deprecated_log_level(self, caplog): - logging.getLogger().deprecated("deprecated message") + logger = cast(log.CustomLoggerType, logging.getLogger()) + logger.deprecated("deprecated message") assert "DEPRECATED" == caplog.records[0].levelname assert "deprecated message" in caplog.text + def test_trace_log_level(self, caplog): + logger = cast(log.CustomLoggerType, logging.getLogger()) + logger.setLevel(logging.NOTSET) + logger.trace("trace message") + assert "TRACE" == caplog.records[0].levelname + assert "trace message" in caplog.text + @pytest.mark.parametrize( "expected_log_level, deprecation_info_boundary", ( @@ -103,7 +112,7 @@ def test_deprecate_log_level_based_on_features( "DEPRECATION_INFO_BOUNDARY", deprecation_info_boundary, ) - util.deprecate( + lifecycle.deprecate( deprecated="some key", deprecated_version="19.2", extra_message="dont use it", @@ -115,17 +124,18 @@ def test_deprecate_log_level_based_on_features( ) def test_log_deduplication(self, caplog): - util.deprecate( + log.define_extra_loggers() + lifecycle.deprecate( deprecated="stuff", deprecated_version="19.1", extra_message=":)", ) - util.deprecate( + lifecycle.deprecate( deprecated="stuff", deprecated_version="19.1", extra_message=":)", ) - util.deprecate( + lifecycle.deprecate( deprecated="stuff", deprecated_version="19.1", extra_message=":)", diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index d790bf4f1ca..c856f97564f 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -22,7 +22,15 @@ import pytest import yaml -from cloudinit import atomic_helper, features, importer, subp, url_helper, util +from cloudinit import ( + atomic_helper, + features, + importer, + lifecycle, + subp, + url_helper, + util, +) from cloudinit.distros import Distro from cloudinit.helpers import Paths from cloudinit.sources import DataSourceHostname @@ -2452,16 +2460,23 @@ class TestReadSeeded: def test_unicode_not_messed_up(self, tmpdir): ud = b"userdatablob" vd = b"vendordatablob" + network = b"test: 'true'" helpers.populate_dir( tmpdir.strpath, - {"meta-data": "key1: val1", "user-data": ud, "vendor-data": vd}, + { + "meta-data": "key1: val1", + "user-data": ud, + "vendor-data": vd, + "network-config": network, + }, ) - (found_md, found_ud, found_vd) = util.read_seeded( + found_md, found_ud, found_vd, found_network = util.read_seeded( tmpdir.strpath + os.path.sep ) assert found_md == {"key1": "val1"} assert found_ud == ud assert found_vd == vd + assert found_network == {"test": "true"} @pytest.mark.parametrize( "base, feature_flag, req_urls", @@ -2470,6 +2485,7 @@ def test_unicode_not_messed_up(self, tmpdir): "http://10.0.0.1/%s?qs=1", True, [ + "http://10.0.0.1/network-config?qs=1", "http://10.0.0.1/meta-data?qs=1", "http://10.0.0.1/user-data?qs=1", "http://10.0.0.1/vendor-data?qs=1", @@ -2480,6 +2496,7 @@ def test_unicode_not_messed_up(self, tmpdir): "https://10.0.0.1:8008/", True, [ + "https://10.0.0.1:8008/network-config", "https://10.0.0.1:8008/meta-data", "https://10.0.0.1:8008/user-data", "https://10.0.0.1:8008/vendor-data", @@ -2490,6 +2507,7 @@ def test_unicode_not_messed_up(self, tmpdir): "https://10.0.0.1:8008", True, [ + "https://10.0.0.1:8008/network-config", "https://10.0.0.1:8008/meta-data", "https://10.0.0.1:8008/user-data", "https://10.0.0.1:8008/vendor-data", @@ -2500,6 +2518,7 @@ def test_unicode_not_messed_up(self, tmpdir): "https://10.0.0.1:8008", False, [ + "https://10.0.0.1:8008network-config", "https://10.0.0.1:8008meta-data", "https://10.0.0.1:8008user-data", "https://10.0.0.1:8008vendor-data", @@ -2510,6 +2529,7 @@ def test_unicode_not_messed_up(self, tmpdir): "https://10.0.0.1:8008?qs=", True, [ + "https://10.0.0.1:8008?qs=network-config", "https://10.0.0.1:8008?qs=meta-data", "https://10.0.0.1:8008?qs=user-data", "https://10.0.0.1:8008?qs=vendor-data", @@ -2540,12 +2560,15 @@ def fake_response(url, timeout, retries): "NOCLOUD_SEED_URL_APPEND_FORWARD_SLASH", feature_flag, ): - (found_md, found_ud, found_vd) = util.read_seeded(base) + found_md, found_ud, found_vd, found_network = util.read_seeded( + base + ) # Meta-data treated as YAML assert found_md == {"/meta-data": 1} # user-data, vendor-data read raw. It could be scripts or other format assert found_ud == "/user-data: 1" assert found_vd == "/vendor-data: 1" + assert found_network == {"/network-config": 1} assert [ mock.call(req_url, timeout=5, retries=10) for req_url in req_urls ] == m_read.call_args_list @@ -2560,15 +2583,22 @@ def setUp(self): def test_unicode_not_messed_up(self): ud = b"userdatablob" vd = None + network = b"test: 'true'" helpers.populate_dir( - self.tmp, {"meta-data": "key1: val1", "user-data": ud} + self.tmp, + { + "meta-data": "key1: val1", + "user-data": ud, + "network-config": network, + }, ) sdir = self.tmp + os.path.sep - (found_md, found_ud, found_vd) = util.read_seeded(sdir) + found_md, found_ud, found_vd, found_network = util.read_seeded(sdir) self.assertEqual(found_md, {"key1": "val1"}) self.assertEqual(found_ud, ud) self.assertEqual(found_vd, vd) + self.assertEqual(found_network, {"test": "true"}) class TestEncode(helpers.TestCase): @@ -2802,19 +2832,6 @@ def test_non_utf8_in_environment(self, m_load_file): ) self.assertEqual(1, m_load_file.call_count) - @mock.patch(M_PATH + "load_binary_file") - def test_encoding_none_returns_bytes(self, m_load_file): - """encoding none returns bytes.""" - lines = (self.bootflag, self.simple1, self.simple2, self.mixed) - content = self.null.join(lines) - m_load_file.return_value = content - - self.assertEqual( - dict([t.split(b"=") for t in lines]), - util.get_proc_env(1, encoding=None), - ) - self.assertEqual(1, m_load_file.call_count) - @mock.patch(M_PATH + "load_binary_file") def test_all_utf8_encoded(self, m_load_file): """common path where only utf-8 decodable content.""" @@ -3086,9 +3103,13 @@ class TestVersion: ) def test_eq(self, v1, v2, eq): if eq: - assert util.Version.from_str(v1) == util.Version.from_str(v2) + assert lifecycle.Version.from_str( + v1 + ) == lifecycle.Version.from_str(v2) if not eq: - assert util.Version.from_str(v1) != util.Version.from_str(v2) + assert lifecycle.Version.from_str( + v1 + ) != lifecycle.Version.from_str(v2) @pytest.mark.parametrize( ("v1", "v2", "gt"), @@ -3102,11 +3123,15 @@ def test_eq(self, v1, v2, eq): ) def test_gt(self, v1, v2, gt): if gt: - assert util.Version.from_str(v1) > util.Version.from_str(v2) + assert lifecycle.Version.from_str(v1) > lifecycle.Version.from_str( + v2 + ) if not gt: - assert util.Version.from_str(v1) < util.Version.from_str( + assert lifecycle.Version.from_str(v1) < lifecycle.Version.from_str( v2 - ) or util.Version.from_str(v1) == util.Version.from_str(v2) + ) or lifecycle.Version.from_str(v1) == lifecycle.Version.from_str( + v2 + ) @pytest.mark.parametrize( ("version"), @@ -3120,31 +3145,31 @@ def test_gt(self, v1, v2, gt): ) def test_to_version_and_back_to_str(self, version): """Verify __str__, __iter__, and Version.from_str()""" - assert version == str(util.Version.from_str(version)) + assert version == str(lifecycle.Version.from_str(version)) @pytest.mark.parametrize( ("str_ver", "cls_ver"), ( ( "0.0.0.0", - util.Version(0, 0, 0, 0), + lifecycle.Version(0, 0, 0, 0), ), ( "1.0.0.0", - util.Version(1, 0, 0, 0), + lifecycle.Version(1, 0, 0, 0), ), ( "1.0.2.0", - util.Version(1, 0, 2, 0), + lifecycle.Version(1, 0, 2, 0), ), ( "9.8.2.0", - util.Version(9, 8, 2, 0), + lifecycle.Version(9, 8, 2, 0), ), ), ) def test_from_str(self, str_ver, cls_ver): - assert util.Version.from_str(str_ver) == cls_ver + assert lifecycle.Version.from_str(str_ver) == cls_ver @pytest.mark.allow_dns_lookup diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index d9accd11460..80872dcb508 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -42,6 +42,7 @@ citrus-it cjp256 CodeBleu Conan-Kudo +cpaelzer cvstealth dankenigsberg dankm @@ -105,10 +106,12 @@ klausenbusk KsenijaS landon912 ld9379435 +leavelet licebmi linitio LKHN lkundrak +LRitzdorf lucasmoura lucendio lungj @@ -178,6 +181,7 @@ TheRealFalcon thetoolsmith timothegenzmer tnt-dev +tobias-urdin tomponline tsanghan tSU-RooT diff --git a/tools/ds-identify b/tools/ds-identify index 31a15fed9e1..606be9c4da5 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -989,8 +989,12 @@ dscheck_NoCloud() { fi # This is a bit hacky, but a NoCloud false positive isn't the end of the world - if check_config "NoCloud" && check_config "user-data" && check_config "meta-data"; then - return ${DS_FOUND} + if check_config "NoCloud"; then + if check_config "user-data" && check_config "meta-data"; then + return ${DS_FOUND} + elif check_config "seedfrom"; then + return ${DS_FOUND} + fi fi return ${DS_NOT_FOUND} @@ -1692,7 +1696,7 @@ dscheck_VMware() { WSL_path() { local params="$1" path="$2" val="" - val="$(wslpath "$params" "$1")" + val="$(wslpath "$params" "$path")" _RET="$val" } @@ -1772,7 +1776,7 @@ dscheck_WSL() { # Then we can check for any .cloud-init folders for the user if [ ! -d "$profile_dir/.cloud-init/" ] && [ ! -d "$profile_dir/.ubuntupro/.cloud-init/" ]; then - debug 1 "No .cloud-init directories found" + debug 1 "No .cloud-init directories found in $profile_dir" return "${DS_NOT_FOUND}" fi diff --git a/tools/render-template b/tools/render-template index c3af642a08f..78beeecb2cf 100755 --- a/tools/render-template +++ b/tools/render-template @@ -14,6 +14,7 @@ def main(): "almalinux", "alpine", "amazon", + "aosc", "arch", "azurelinux", "benchmark", diff --git a/tox.ini b/tox.ini index a43ef53f3c2..d6982cbe382 100644 --- a/tox.ini +++ b/tox.ini @@ -226,9 +226,6 @@ commands = {envpython} -m sphinx {posargs:-W doc/rtd doc/rtd_html} doc8 doc/rtd -[doc-lint] -ignore-path-errors=doc/rtd/topics/faq.rst;D001 - [testenv:doc-spelling] deps = -r{toxinidir}/doc-requirements.txt