diff --git a/.coveragerc b/.coveragerc index d3924a87..ceb3b120 100644 --- a/.coveragerc +++ b/.coveragerc @@ -2,3 +2,4 @@ omit = */lib/python* */migrations/* + */tests/* diff --git a/Makefile b/Makefile index 046cf0f0..ade31f92 100644 --- a/Makefile +++ b/Makefile @@ -13,11 +13,11 @@ test: flake8 webui flake8 ocw flake8 manage.py - pytest --cov=./ + pytest --cov .PHONY: codecov codecov: - pytest -v --cov=./ --cov-report=html && xdg-open htmlcov/index.html + pytest -v --cov --cov-report=html && xdg-open htmlcov/index.html # Build containers docker-container: diff --git a/ocw/lib/EC2.py b/ocw/lib/EC2.py index 7b5dcea7..0c3fde93 100644 --- a/ocw/lib/EC2.py +++ b/ocw/lib/EC2.py @@ -1,9 +1,8 @@ -from .provider import Provider, Image +from .provider import Provider from webui.settings import PCWConfig, ConfigFile from dateutil.parser import parse import boto3 from botocore.exceptions import ClientError -import re from datetime import date, datetime, timedelta, timezone from ocw.lib.emailnotify import send_mail import traceback @@ -89,32 +88,21 @@ def all_clusters(self): return clusters @staticmethod - def needs_to_delete_snapshot(snapshot, cleanup_ec2_max_snapshot_age_days) -> bool: - delete_older_than = date.today() - timedelta(days=cleanup_ec2_max_snapshot_age_days) - if datetime.date(snapshot['StartTime']) < delete_older_than: - regexes = [ - re.compile(r'''^OpenQA upload image$'''), - re.compile(r'''^Created by CreateImage\([\w-]+\) for ami-\w+ from vol-\w+$''') - ] - for regex in regexes: - m = re.match(regex, snapshot['Description'].strip()) - if m: - return True - return False + def needs_to_delete_by_age(creation_time, cleanup_ec2_max_age_days) -> bool: + return datetime.date(creation_time) < (date.today() - timedelta(days=cleanup_ec2_max_age_days)) def cleanup_snapshots(self, cleanup_ec2_max_snapshot_age_days): for region in self.all_regions: response = self.ec2_client(region).describe_snapshots(OwnerIds=['self']) - response['Snapshots'].sort(key=lambda snapshot: snapshot['StartTime'].timestamp()) for snapshot in response['Snapshots']: - if EC2.needs_to_delete_snapshot(snapshot, cleanup_ec2_max_snapshot_age_days): - self.log_info("Deleting snapshot {} in region {} with StartTime={}", snapshot['SnapshotId'], - region, snapshot['StartTime']) + if EC2.needs_to_delete_by_age(snapshot['StartTime'], cleanup_ec2_max_snapshot_age_days): try: if self.dry_run: self.log_info("Snapshot deletion of {} skipped due to dry run mode", snapshot['SnapshotId']) else: + self.log_info("Deleting snapshot {} in region {} with StartTime={}", + snapshot['SnapshotId'], region, snapshot['StartTime']) self.ec2_client(region).delete_snapshot(SnapshotId=snapshot['SnapshotId']) except ClientError as ex: if ex.response['Error']['Code'] == 'InvalidSnapshot.InUse': @@ -122,12 +110,11 @@ def cleanup_snapshots(self, cleanup_ec2_max_snapshot_age_days): else: raise ex - def cleanup_volumes(self, cleanup_ec2_max_volumes_age_days): - delete_older_than = date.today() - timedelta(days=cleanup_ec2_max_volumes_age_days) + def cleanup_volumes(self, cleanup_ec2_max_age_days): for region in self.all_regions: response = self.ec2_client(region).describe_volumes() for volume in response['Volumes']: - if datetime.date(volume['CreateTime']) < delete_older_than: + if EC2.needs_to_delete_by_age(volume['CreateTime'], cleanup_ec2_max_age_days): if self.volume_protected(volume): self.log_info('Volume {} has tag DO_NOT_DELETE so protected from deletion', volume['VolumeId']) @@ -209,66 +196,13 @@ def delete_all_clusters(self): self.log_info("Finally deleting {} cluster", cluster) self.eks_client(region).delete_cluster(name=cluster) - def parse_image_name(self, img_name): - regexes = [ - # openqa-SLES12-SP5-EC2.x86_64-0.9.1-BYOS-Build1.55.raw.xz - re.compile(r'''^openqa-SLES - (?P\d+(-SP\d+)?) - -(?PEC2) - \. - (?P[^-]+) - - - (?P\d+\.\d+\.\d+) - - - (?P(BYOS|On-Demand)) - -Build - (?P\d+\.\d+) - \.raw\.xz - ''', re.RegexFlag.X), - # openqa-SLES15-SP2.x86_64-0.9.3-EC2-HVM-Build1.10.raw.xz' - # openqa-SLES15-SP2-BYOS.x86_64-0.9.3-EC2-HVM-Build1.10.raw.xz' - # openqa-SLES15-SP2.aarch64-0.9.3-EC2-HVM-Build1.49.raw.xz' - # openqa-SLES15-SP4-SAP-BYOS.x86_64-0.9.3-EC2-Build150400.1.31.raw.xz - re.compile(r'''^openqa-SLES - (?P\d+(-SP\d+)?) - (-(?P[^\.]+))? - \. - (?P[^-]+) - - - (?P\d+\.\d+\.\d+) - - - (?PEC2[-\w]*) - -Build(\d+\.)? - (?P\d+\.\d+) - \.raw\.xz - ''', re.RegexFlag.X), - # openqa-SLES12-SP4-EC2-HVM-BYOS.x86_64-0.9.2-Build2.56.raw.xz' - re.compile(r'''^openqa-SLES - (?P\d+(-SP\d+)?) - - - (?PEC2[^\.]+) - \. - (?P[^-]+) - - - (?P\d+\.\d+\.\d+) - - - Build - (?P\d+\.\d+) - \.raw\.xz - ''', re.RegexFlag.X) - ] - return self.parse_image_name_helper(img_name, regexes) - def cleanup_all(self): - cleanup_ec2_max_snapshot_age_days = PCWConfig.get_feature_property('cleanup', 'ec2-max-snapshot-age-days', - self._namespace) - cleanup_ec2_max_volumes_age_days = PCWConfig.get_feature_property('cleanup', 'ec2-max-volumes-age-days', - self._namespace) - self.cleanup_images() - if cleanup_ec2_max_snapshot_age_days >= 0: - self.cleanup_snapshots(cleanup_ec2_max_snapshot_age_days) - if cleanup_ec2_max_volumes_age_days >= 0: - self.cleanup_volumes(cleanup_ec2_max_volumes_age_days) + cleanup_ec2_max_age_days = PCWConfig.get_feature_property('cleanup', 'ec2-max-age-days', self._namespace) + + if cleanup_ec2_max_age_days >= 0: + self.cleanup_images(cleanup_ec2_max_age_days) + self.cleanup_volumes(cleanup_ec2_max_age_days) + self.cleanup_snapshots(cleanup_ec2_max_age_days) if PCWConfig.getBoolean('cleanup/vpc_cleanup', self._namespace): self.cleanup_uploader_vpcs() @@ -389,25 +323,13 @@ def cleanup_uploader_vpcs(self): region) send_mail('VPC deletion locked by running VMs', body) - def cleanup_images(self): + def cleanup_images(self, cleanup_ec2_max_age_days): for region in self.all_regions: response = self.ec2_client(region).describe_images(Owners=['self']) - images = list() for img in response['Images']: - # img is in the format described here: - # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.describe_images - m = self.parse_image_name(img['Name']) - if m: - self.log_dbg("Image {} is candidate for deletion with build {}", img['Name'], m['build']) - images.append( - Image(img['Name'], flavor=m['key'], build=m['build'], date=parse(img['CreationDate']), - img_id=img['ImageId'])) - else: - self.log_err(" Unable to parse image name '{}'", img['Name']) - keep_images = self.get_keeping_image_names(images) - for img in [i for i in images if i.name not in keep_images]: - self.log_dbg("Delete image '{}' (ami:{})".format(img.name, img.id)) - if self.dry_run: - self.log_info("Image deletion {} skipped due to dry run mode", img.id) - else: - self.ec2_client(region).deregister_image(ImageId=img.id, DryRun=False) + if EC2.needs_to_delete_by_age(parse(img['CreationDate']), cleanup_ec2_max_age_days): + if self.dry_run: + self.log_info("Image deletion {} skipped due to dry run mode", img['ImageId']) + else: + self.log_dbg("Delete image '{}' (ami:{})".format(img['Name'], img['ImageId'])) + self.ec2_client(region).deregister_image(ImageId=img['ImageId'], DryRun=False) diff --git a/ocw/lib/azure.py b/ocw/lib/azure.py index 65cabecf..55ee96ff 100644 --- a/ocw/lib/azure.py +++ b/ocw/lib/azure.py @@ -1,4 +1,4 @@ -from .provider import Provider, Image +from .provider import Provider from webui.settings import PCWConfig from azure.identity import ClientSecretCredential from azure.mgmt.resource import ResourceManagementClient @@ -6,7 +6,6 @@ from azure.mgmt.storage import StorageManagementClient from azure.storage.blob import BlobServiceClient from msrest.exceptions import AuthenticationError -import re import time from typing import Dict @@ -100,126 +99,44 @@ def list_disks_by_resource_group(self, resource_group): def list_by_resource_group(self, resource_group, filters=None): return [item for item in self.resource_mgmt_client().resources.list_by_resource_group( - resource_group, filter=filters)] - - def get_keeping_image_names(self): - images = list() - for item in self.container_client('sle-images').list_blobs(): - m = self.parse_image_name(item.name) - if m: - images.append(Image(item.name, flavor=m['key'], build=m['build'], date=item.last_modified)) - else: - self.log_err("Unable to parse image name '{}'", item.name) - - return super().get_keeping_image_names(images) + resource_group, filter=filters, expand="changedTime")] def cleanup_all(self): - ''' Cleanup all autodateed data which might created during automated tests.''' - self.cleanup_bootdiagnostics() - - keep_images = self.get_keeping_image_names() - self.cleanup_sle_images_container(keep_images) - self.cleanup_disks_from_rg(keep_images) - self.cleanup_images_from_rg(keep_images) - for i in keep_images: - self.log_info("Keep image {} ", i) - - def cleanup_bootdiagnostics(self): - containers = self.bs_client().list_containers() + self.cleanup_blob_containers() + self.cleanup_images_from_rg() + self.cleanup_disks_from_rg() + + def cleanup_blob_containers(self): + containers = self.bs_client().list_containers(include_metadata=True) for c in containers: - self.log_dbg('Found container {}', c.name) - if (re.match('^bootdiagnostics-', c.name)): - self.cleanup_bootdiagnostics_container(c) - - def cleanup_bootdiagnostics_container(self, container): - latest_modification = container.last_modified - container_blobs = self.container_client(container.name).list_blobs() - for blob in container_blobs: - if (latest_modification > blob.last_modified): - latest_modification = blob.last_modified - if (self.older_than_min_age(latest_modification)): - self.log_info("Mark container for deletion {}", container.name) - if self.dry_run: - self.log_info("Deletion of boot diagnostic container {} skipped due to dry run mode", container.name) - else: - self.bs_client().delete_container(container.name) - - def parse_image_name(self, img_name): - regexes = [ - # SLES12-SP5-Azure.x86_64-0.9.1-SAP-BYOS-Build3.3.vhd - re.compile(r""" - SLES - (?P\d+(-SP\d+)?) - -Azure\. - (?P[^-]+) - - - (?P\d+\.\d+\.\d+) - - - (?P[-\w]+) - - - Build(?P\d+\.\d+) - \.vhd - """, - re.X), - - # SLES15-SP2-BYOS.x86_64-0.9.3-Azure-Build1.10.vhd - # SLES15-SP2.x86_64-0.9.3-Azure-Basic-Build1.11.vhd - # SLES15-SP2-SAP-BYOS.x86_64-0.9.2-Azure-Build1.9.vhd - # SLES15-SP4-BYOS.x86_64-0.9.1-Azure-Build150400.2.103.vhd - re.compile(r""" - SLES - (?P\d+(-SP\d+)?) - (-(?P[^\.]+))?\. - (?P[^-]+) - - - (?P\d+\.\d+\.\d+) - (-(?PAzure[-\w]*))? - - - Build(\d+\.)?(?P\d+\.\d+) - \.vhd - """, - re.X) - ] - return self.parse_image_name_helper(img_name, regexes) - - def cleanup_sle_images_container(self, keep_images): - container_client = self.container_client('sle-images') - for img in container_client.list_blobs(): - m = self.parse_image_name(img.name) - if m: - self.log_dbg('Blob {} is candidate for deletion with build {} ', img.name, m['build']) - - if img.name not in keep_images: - self.log_info("Delete blob '{}'", img.name) - if self.dry_run: - self.log_info("Deletion of blob image {} skipped due to dry run mode", img.name) - else: - container_client.delete_blob(img.name, delete_snapshots="include") + if 'pcw_ignore' not in c['metadata']: + self.log_dbg('Found container {}', c.name) + container_blobs = self.container_client(c.name).list_blobs() + for blob in container_blobs: + if (self.older_than_max_age_hours(blob.last_modified)): + if self.dry_run: + self.log_info("Deletion of blob {} skipped due to dry run mode", blob.name) + else: + self.log_info("Deleting blob {}", blob.name) + self.container_client(c.name).delete_blob(blob.name, delete_snapshots="include") - def cleanup_images_from_rg(self, keep_images): + def cleanup_images_from_rg(self): for item in self.list_images_by_resource_group(self.__resource_group): - m = self.parse_image_name(item.name) - if m: - self.log_dbg('Image {} is candidate for deletion with build {} ', item.name, m['build']) - if item.name not in keep_images: + if (self.older_than_max_age_hours(item.changed_time)): + if self.dry_run: + self.log_info("Deletion of image {} skipped due to dry run mode", item.name) + else: self.log_info("Delete image '{}'", item.name) - if self.dry_run: - self.log_info("Deletion of image {} skipped due to dry run mode", item.name) - else: - self.compute_mgmt_client().images.begin_delete(self.__resource_group, item.name) + self.compute_mgmt_client().images.begin_delete(self.__resource_group, item.name) - def cleanup_disks_from_rg(self, keep_images): + def cleanup_disks_from_rg(self): for item in self.list_disks_by_resource_group(self.__resource_group): - m = self.parse_image_name(item.name) - if m: - self.log_dbg('Disk {} is candidate for deletion with build {} ', item.name, m['build']) - - if item.name not in keep_images: - if self.compute_mgmt_client().disks.get(self.__resource_group, item.name).managed_by: - self.log_warn("Disk is in use - unable delete {}", item.name) + if (self.older_than_max_age_hours(item.changed_time)): + if self.compute_mgmt_client().disks.get(self.__resource_group, item.name).managed_by: + self.log_warn("Disk is in use - unable delete {}", item.name) + else: + if self.dry_run: + self.log_info("Deletion of disk {} skipped due to dry run mode", item.name) else: self.log_info("Delete disk '{}'", item.name) - if self.dry_run: - self.log_info("Deletion of image {} skipped due to dry run mode", item.name) - else: - self.compute_mgmt_client().disks.begin_delete(self.__resource_group, item.name) + self.compute_mgmt_client().disks.begin_delete(self.__resource_group, item.name) diff --git a/ocw/lib/gce.py b/ocw/lib/gce.py index 70c9e00d..f00c5257 100644 --- a/ocw/lib/gce.py +++ b/ocw/lib/gce.py @@ -1,8 +1,8 @@ -from .provider import Provider, Image +from .provider import Provider import googleapiclient.discovery from google.oauth2 import service_account from dateutil.parser import parse -import re +from datetime import timezone class GCE(Provider): @@ -81,6 +81,7 @@ def delete_instance(self, instance_id, zone): "Deletion of instance {} skipped due to dry run mode", instance_id ) else: + self.log_info("Delete instance {}".format(instance_id)) self.compute_client().instances().delete( project=self.__project, zone=zone, instance=instance_id ).execute() @@ -89,115 +90,33 @@ def delete_instance(self, instance_id, zone): def url_to_name(url): return url[url.rindex("/")+1:] - def parse_image_name(self, img_name): - regexes = [ - # sles12-sp5-gce-x8664-0-9-1-byos-build1-56 - re.compile( - r"""^sles - (?P\d+(-sp\d+)?) - - - (?Pgce) - - - (?P[^-]+) - - - (?P\d+-\d+-\d+) - - - (?P(byos|on-demand)) - -build - (?P\d+-\d+) - """, - re.RegexFlag.X, - ), - # sles15-sp2-byos-x8664-0-9-3-gce-build1-10 - # sles15-sp2-x8664-0-9-3-gce-build1-10 - re.compile( - r"""^sles - (?P\d+(-sp\d+)?) - (-(?P[-\w]+))? - - - (?P[^-]+) - - - (?P\d+-\d+-\d+) - - - (?Pgce) - - - build - (?P\d+-\d+) - """, - re.RegexFlag.X, - ), - # sles15-sp1-gce-byos-x8664-1-0-5-build1-101 - re.compile( - r"""^sles - (?P\d+(-sp\d+)?) - (-(?Pgce))? - - - (?P[-\w]+) - - - (?P[^-]+) - - - (?P\d+-\d+-\d+) - - - build - (?P\d+-\d+) - """, - re.RegexFlag.X, - ), - ] - return self.parse_image_name_helper(img_name, regexes) - def cleanup_all(self): - images = list() request = self.compute_client().images().list(project=self.__project) while request is not None: response = request.execute() if "items" not in response: break for image in response["items"]: - # creation:2019-11-04T14:23:06.372-08:00 - # name:sles12-sp5-gce-x8664-0-9-1-byos-build1-56 - m = self.parse_image_name(image["name"]) - if m: - images.append( - Image( - image["name"], - flavor=m["key"], - build=m["build"], - date=parse(image["creationTimestamp"]), + if self.older_than_max_age_hours(parse(image["creationTimestamp"]).astimezone(timezone.utc)): + if self.dry_run: + self.log_info("Deletion of image {} skipped due to dry run mode", image["name"]) + else: + self.log_info("Delete image '{}'", image["name"]) + request = ( + self.compute_client() + .images() + .delete(project=self.__project, image=image["name"]) ) - ) - self.log_dbg( - "Image {} is candidate for deletion with build {}", - image["name"], - m["build"], - ) - else: - self.log_err("Unable to parse image name '{}'", image["name"]) + response = request.execute() + if "error" in response: + for e in response["error"]["errors"]: + self.log_err(e["message"]) + if "warnings" in response: + for w in response["warnings"]: + self.log_warn(w["message"]) request = ( self.compute_client() .images() .list_next(previous_request=request, previous_response=response) ) - - keep_images = self.get_keeping_image_names(images) - - for img in [i for i in images if i.name not in keep_images]: - self.log_info("Delete image '{}'", img.name) - if self.dry_run: - self.log_info( - "Deletion of image {} skipped due to dry run mode", img.name - ) - else: - request = ( - self.compute_client() - .images() - .delete(project=self.__project, image=img.name) - ) - response = request.execute() - if "error" in response: - for e in response["error"]["errors"]: - self.log_err(e["message"]) - if "warnings" in response: - for w in response["warnings"]: - self.log_warn(w["message"]) diff --git a/ocw/lib/provider.py b/ocw/lib/provider.py index 911490d8..8dec0753 100644 --- a/ocw/lib/provider.py +++ b/ocw/lib/provider.py @@ -1,9 +1,7 @@ from webui.settings import PCWConfig -import re from datetime import datetime from datetime import timedelta from datetime import timezone -from looseversion import LooseVersion import logging import json from pathlib import Path @@ -31,48 +29,17 @@ def getData(self, name=None): return self.auth_json return self.auth_json[name] - def older_than_min_age(self, age): - return datetime.now(timezone.utc) > age + timedelta( - hours=PCWConfig.get_feature_property('cleanup', 'min-image-age-hours', self._namespace)) - - def needs_to_delete_image(self, order_number, image_date): - if self.older_than_min_age(image_date): - max_images_per_flavor = PCWConfig.get_feature_property('cleanup', 'max-images-per-flavor', - self._namespace) - max_image_age = image_date + timedelta( - hours=PCWConfig.get_feature_property('cleanup', 'max-image-age-hours', self._namespace)) - return order_number >= max_images_per_flavor or max_image_age < datetime.now(timezone.utc) - else: - return False - - def parse_image_name_helper(self, img_name, regex_s, group_key=['version', 'flavor', 'type', 'arch'], - group_build=['kiwi', 'build']): - for regex in regex_s: - m = re.match(regex, img_name) - if m: - gdict = m.groupdict() - return { - 'key': '-'.join([gdict[k] for k in group_key if k in gdict and gdict[k] is not None]), - 'build': "-".join([gdict[k] for k in group_build if k in gdict and gdict[k] is not None]), - } - return None - - def get_keeping_image_names(self, images): - images_by_flavor = dict() - for img in images: - if (img.flavor not in images_by_flavor): - images_by_flavor[img.flavor] = list() - images_by_flavor[img.flavor].append(img) - - keep_images = list() - for img_list in [images_by_flavor[x] for x in sorted(images_by_flavor)]: - img_list.sort(key=lambda x: LooseVersion(x.build), reverse=True) - for i in range(0, len(img_list)): - img = img_list[i] - if (not self.needs_to_delete_image(i, img.date)): - keep_images.append(img.name) - - return keep_images + def older_than_max_age_hours(self, age): + """ + older_than_max_age_hours - calculates if certain resource bypass maximum allowed TTL + maximum allowed TTL is controled by cleanup/max-age-hours pcw.ini config param + :param age: usually creation time of resource or any other timestamp which may be used to identify + age of the resource + :return: True if resource is already too old , false otherwise + """ + delta_in_hours = PCWConfig.get_feature_property('cleanup', 'max-age-hours', self._namespace) + max_allowed_age = datetime.now(timezone.utc) - timedelta(hours=delta_in_hours) + return max_allowed_age > age def log_info(self, message: str, *args: object): if args: @@ -93,16 +60,3 @@ def log_dbg(self, message: str, *args: object): if args: message = message.format(*args) self.logger.debug("[{}] {}".format(self._namespace, message)) - - -class Image: - - def __init__(self, name, flavor, build, date, img_id=None): - self.name = name - self.flavor = flavor - self.build = build - self.date = date - self.id = img_id if img_id else name - - def __str__(self): - return "[{} {} {} {}]".format(self.name, self.flavor, self.build, self.date) diff --git a/requirements.txt b/requirements.txt index e9085a48..24d840f5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -17,4 +17,3 @@ google-api-python-client==2.55.0 google-cloud-storage==2.4.0 python-dateutil apscheduler -looseversion diff --git a/templates/pcw.ini b/templates/pcw.ini index 4b569983..4555b959 100644 --- a/templates/pcw.ini +++ b/templates/pcw.ini @@ -17,26 +17,41 @@ smtp-port = 25 to = from = pcw@publiccloud.qa.suse.de + +[cluster.notify] +# List of people who will be notfied about found EKS clusters +to = + [notify.namespace.qac] # list of recipients which will be notified in case of issues in certain namespace to = [cleanup] -# Specify how many images per flavor get kept -max-images-per-flavor = 2 -# Max age of an image file -max-images-age-hours = 24 +# Max age of data storage resources ( used in EC2 only ) +ec2-max-age-days = 1 +# Max age of data storage resources ( used in Azure and GCE ) +max-age-hours = 1 # Specify with which namespace, we will do the cleanup. # if not specifed default/namespaces list will be taken instead namespaces = qac # Values specified under "cleanup.namespace.{namespace}" have precedence over same values in [cleanup] for this certain namespace [cleanup.namespace.qac] -# EC2 snapshots younger than this amount of days will be ignored -ec2-max-snapshot-age-days = 2 +# Max age of an image file ( used in EC2 only ) +ec2-max-age-days = 2 # EC2 volumes younger than this amount of days will be ignored ec2-max-volumes-age-days = 2 +# Azure resource group which PCW will scan for data storage resources needs to be deleted azure-storage-resourcegroup = openqa-upload +# AccountName used for creation of BlobServiceClient azure-storage-account-name = openqa # When set to true EC2 VPC cleanup will be enabled vpc_cleanup = true + +[updaterun] +# if openqa_ttl tag is not defined this TTL will be set to the instance +default_ttl = 44100 + +[webui] +# URL base used to generate URL to openQA together with job ID from openqa_var_JOB_ID tag +openqa_url=https://openqa.suse.de/ diff --git a/tests/generators.py b/tests/generators.py index 01a966a9..d42ef79f 100644 --- a/tests/generators.py +++ b/tests/generators.py @@ -2,33 +2,18 @@ from datetime import datetime fake = Faker() -min_image_age_hours = 7 -max_images_per_flavor = 1 -max_image_age_hours = 20 +max_age_hours = 7 azure_storage_resourcegroup = 'openqa' -ec2_max_snapshot_age_days = 1 -ec2_max_volumes_age_days = 5 - - -class MockImage: - def __init__(self, name, last_modified=None): - self.name = name - self.last_modified = last_modified +ec2_max_age_days = 1 def mock_get_feature_property(feature: str, property: str, namespace: str = None): - if property == 'min-image-age-hours': - return min_image_age_hours - elif property == 'max-images-per-flavor': - return max_images_per_flavor - elif property == 'max-image-age-hours': - return max_image_age_hours + if property == 'max-age-hours': + return max_age_hours elif property == 'azure-storage-resourcegroup': return azure_storage_resourcegroup - elif property == 'ec2-max-snapshot-age-days': - return ec2_max_snapshot_age_days - elif property == 'ec2-max-volumes-age-days': - return ec2_max_volumes_age_days + elif property == 'ec2-max-age-days': + return ec2_max_age_days class ec2_meta_mock: diff --git a/tests/test_azure.py b/tests/test_azure.py index b13f3388..8fec58fd 100644 --- a/tests/test_azure.py +++ b/tests/test_azure.py @@ -1,31 +1,14 @@ from ocw.lib.azure import Azure, Provider from webui.settings import PCWConfig from datetime import datetime, timezone, timedelta -from .generators import MockImage from .generators import mock_get_feature_property from tests import generators from msrest.exceptions import AuthenticationError +from faker import Faker import time import pytest - -delete_calls = {'quantity': [], 'old': [], 'young': []} - - -def delete_blob_mock(self, container_name, img_name, snapshot=None): - delete_calls[container_name].append(img_name) - - -def list_blobs_mock(self, container_name): - last_modified = datetime.now() - return [MockImage('SLES15-SP2-Azure-HPC.x86_64-0.9.0-Build1.43.vhd'), - MockImage('SLES15-SP2-Azure-HPC.x86_64-0.9.1-Build1.3.vhd'), - MockImage('SLES15-SP2-Azure-HPC.x86_64-0.9.1-Build1.7.vhd'), - MockImage('SLES15-SP2-BYOS.x86_64-0.9.3-Azure-Build2.36.vhd', last_modified), - MockImage('SLES15-SP2-BYOS.x86_64-0.9.6-Azure-Build1.3.vhd', last_modified), - MockImage('SLES15-SP2-BYOS.x86_64-0.9.6-Azure-Build1.9.vhd', last_modified) - ] - +deleted_images = list() @pytest.fixture def azure_patch(monkeypatch): @@ -34,258 +17,230 @@ def azure_patch(monkeypatch): monkeypatch.setattr(PCWConfig, 'get_feature_property', mock_get_feature_property) return Azure('fake') +@pytest.fixture +def container_client_all_new(monkeypatch): + fakecontainerclient = FakeContainerClient([FakeBlob(), FakeBlob()]) + monkeypatch.setattr(Azure, 'container_client', lambda *args, **kwargs: fakecontainerclient) + return fakecontainerclient -def test_parse_image_name(azure_patch): +@pytest.fixture +def container_client_one_old(monkeypatch): + old_times = datetime.now(timezone.utc) - timedelta(hours=generators.max_age_hours+1) + fakecontainerclient = FakeContainerClient([FakeBlob(old_times, "to_be_deleted"), FakeBlob()]) + monkeypatch.setattr(Azure, 'container_client', lambda *args, **kwargs: fakecontainerclient) + return fakecontainerclient - assert azure_patch.parse_image_name('SLES12-SP5-Azure.x86_64-0.9.1-SAP-BYOS-Build3.3.vhd') == { - 'key': '12-SP5-SAP-BYOS-x86_64', - 'build': '0.9.1-3.3' - } +@pytest.fixture +def bs_client_no_pcw_ignore(monkeypatch): + fakeblobserviceclient = FakeBlobServiceClient([ FakeBlobContainer(), FakeBlobContainer() ]) + monkeypatch.setattr(Azure, 'bs_client', lambda *args, **kwargs: fakeblobserviceclient) - assert azure_patch.parse_image_name('SLES15-SP2-BYOS.x86_64-0.9.3-Azure-Build1.10.vhd') == { - 'key': '15-SP2-Azure-BYOS-x86_64', - 'build': '0.9.3-1.10' - } - assert azure_patch.parse_image_name('SLES15-SP2.x86_64-0.9.3-Azure-Basic-Build1.11.vhd') == { - 'key': '15-SP2-Azure-Basic-x86_64', - 'build': '0.9.3-1.11' - } +@pytest.fixture +def bs_client_one_pcw_ignore(monkeypatch): + fakeblobserviceclient = FakeBlobServiceClient([ FakeBlobContainer({"pcw_ignore": "1"}), FakeBlobContainer() ]) + monkeypatch.setattr(Azure, 'bs_client', lambda *args, **kwargs: fakeblobserviceclient) - assert azure_patch.parse_image_name('SLES15-SP2-SAP-BYOS.x86_64-0.9.2-Azure-Build1.9.vhd') == { - 'key': '15-SP2-Azure-SAP-BYOS-x86_64', - 'build': '0.9.2-1.9' - } - assert azure_patch.parse_image_name('SLES15-SP2-Azure-HPC.x86_64-0.9.0-Build1.43.vhd') == { - 'key': '15-SP2-Azure-HPC-x86_64', - 'build': '0.9.0-1.43' - } - assert azure_patch.parse_image_name('SLES15-SP2-BYOS.aarch64-0.9.3-Azure-Build2.36.vhdfixed.x') == { - 'key': '15-SP2-Azure-BYOS-aarch64', - 'build': '0.9.3-2.36' - } +@pytest.fixture +def mock_compute_mgmt_client(monkeypatch): - assert azure_patch.parse_image_name('do not match') is None + global deleted_images + # to make sure that we not failing due to other test left dirty env. + deleted_images = list() + def mock_compute_mgmt_client(self): + def compute_mgmt_client(): + pass + compute_mgmt_client.images = lambda: None + compute_mgmt_client.images.begin_delete = lambda rg, name: deleted_images.append(name) + return compute_mgmt_client -def test_cleanup_sle_images_container(azure_patch, monkeypatch): - class FakeContainerClient: - deleted_blobs = list() + monkeypatch.setattr(Azure, 'compute_mgmt_client', mock_compute_mgmt_client) - def list_blobs(self): - return [ - MockImage('SLES15-SP2-Azure-HPC.x86_64-0.9.0-Build1.43.vhd'), - MockImage('SLES15-SP2-Azure-HPC.x86_64-0.9.1-Build1.3.vhd'), - MockImage('YouWillNotGetMyBuildNumber'), - ] +class FakeDisk: - def delete_blob(self, img_name, delete_snapshots): - self.deleted_blobs.append(img_name) + def __init__(self, managed_by = None): + self.managed_by = managed_by - fakecontainerclient = FakeContainerClient() +class FakeBlobContainer: - monkeypatch.setattr(Azure, 'container_client', lambda *args, **kwargs: fakecontainerclient) - az = Azure('fake') - keep_images = ['SLES15-SP2-Azure-HPC.x86_64-0.9.1-Build1.3.vhd'] + def __init__(self, metadata = []): + self.name = Faker().uuid4() + self.metadata = metadata - az.cleanup_sle_images_container(keep_images) - assert fakecontainerclient.deleted_blobs == ['SLES15-SP2-Azure-HPC.x86_64-0.9.0-Build1.43.vhd'] + def __getitem__(self, i): + return self.metadata -def test_cleanup_images_from_rg(azure_patch, monkeypatch): - deleted_images = list() - items = [ - MockImage('SLES15-SP2-Azure-HPC.x86_64-0.9.0-Build1.43.vhd'), - MockImage('SLES15-SP2-Azure-HPC.x86_64-0.9.1-Build1.3.vhd'), - MockImage('YouWillNotGetMyBuildNumber'), - ] - - def mock_res_mgmt_client(self): - def res_mgmt_client(): - pass - res_mgmt_client.resources = lambda: None - res_mgmt_client.resources.list_by_resource_group = lambda *args, **kwargs: items - return res_mgmt_client +class FakeBlob: - def mock_compute_mgmt_client(self): - def compute_mgmt_client(): - pass - compute_mgmt_client.images = lambda: None - compute_mgmt_client.images.begin_delete = lambda rg, name: deleted_images.append(name) - return compute_mgmt_client + def __init__(self, last_modified=None, name = None): + if name is None: + self.name = Faker().uuid4() + else: + self.name = name + if last_modified is None: + self.last_modified = datetime.now(timezone.utc) + else: + self.last_modified = last_modified - monkeypatch.setattr(Azure, 'resource_mgmt_client', mock_res_mgmt_client) - monkeypatch.setattr(Azure, 'compute_mgmt_client', mock_compute_mgmt_client) +class FakeBlobServiceClient: - az = Azure('fake') - keep_images = ['SLES15-SP2-Azure-HPC.x86_64-0.9.1-Build1.3.vhd'] - az.cleanup_images_from_rg(keep_images) - assert deleted_images == ['SLES15-SP2-Azure-HPC.x86_64-0.9.0-Build1.43.vhd'] + def __init__(self, containers): + self.containers = containers + def list_containers(self, include_metadata): + return self.containers -def test_cleanup_disks_from_rg(azure_patch, monkeypatch): - deleted_disks = list() - items = [ - MockImage('SLES15-SP2-Azure-HPC.x86_64-0.9.0-Build1.43.vhd'), - MockImage('SLES15-SP2-Azure-HPC.x86_64-0.9.1-Build1.3.vhd'), - MockImage('SLES15-SP2-Azure-HPC.x86_64-0.9.1-Build1.7.vhd'), - MockImage('YouWillNotGetMyBuildNumber'), - ] +class FakeContainerClient: - def mock_res_mgmt_client(self): - def res_mgmt_client(): - pass - res_mgmt_client.resources = lambda: None - res_mgmt_client.resources.list_by_resource_group = lambda *args, **kwargs: items - return res_mgmt_client + def list_blobs(self): + return self.blobs - def mock_compute_mgmt_client(self): - class FakeDisk: - def __init__(self, rg, name): - self.managed_by = name == 'SLES15-SP2-Azure-HPC.x86_64-0.9.0-Build1.43.vhd' + def __init__(self, blobs): + self.deleted_blobs = list() + self.blobs = blobs - def compute_mgmt_client(): - pass - compute_mgmt_client.disks = lambda: None - compute_mgmt_client.disks.get = lambda rg, name: FakeDisk(rg, name) - compute_mgmt_client.disks.begin_delete = lambda rg, name: deleted_disks.append(name) - return compute_mgmt_client + def delete_blob(self, img_name, delete_snapshots): + self.deleted_blobs.append(img_name) - monkeypatch.setattr(Azure, 'resource_mgmt_client', mock_res_mgmt_client) - monkeypatch.setattr(Azure, 'compute_mgmt_client', mock_compute_mgmt_client) +class FakeItem: - keep_images = ['SLES15-SP2-Azure-HPC.x86_64-0.9.1-Build1.3.vhd'] - az = Azure('fake') - az.cleanup_disks_from_rg(keep_images) - assert deleted_disks == ['SLES15-SP2-Azure-HPC.x86_64-0.9.1-Build1.7.vhd'] + def __init__(self, changed_time=None, name = None): + if changed_time is None: + self.changed_time = datetime.now(timezone.utc) + else: + self.changed_time = changed_time + if name is None: + self.name = Faker().uuid4() + else: + self.name = name +def test_cleanup_blob_containers_all_new_no_pcw_ignore(azure_patch, container_client_all_new, bs_client_no_pcw_ignore): + azure_patch.cleanup_blob_containers() + assert container_client_all_new.deleted_blobs == [] -def test_get_keeping_image_names(azure_patch, monkeypatch): - class FakeContainerClient: - def list_blobs(self): - older_then_min_age = datetime.now(timezone.utc) - timedelta(hours=generators.min_image_age_hours+1) - return [ - MockImage('SLES15-SP2-Azure-HPC.x86_64-0.9.0-Build1.43.vhd', older_then_min_age), - MockImage('SLES15-SP2-Azure-HPC.x86_64-0.9.1-Build1.3.vhd', older_then_min_age), - MockImage('YouWillNotGetMyBuildNumber', older_then_min_age), - ] +def test_cleanup_blob_containers_one_old_no_pcw_ignore(azure_patch, container_client_one_old, bs_client_no_pcw_ignore): + azure_patch.cleanup_blob_containers() + assert container_client_one_old.deleted_blobs == ["to_be_deleted", "to_be_deleted"] - fakecontainerclient = FakeContainerClient() - monkeypatch.setattr(Azure, 'container_client', lambda *args, **kwargs: fakecontainerclient) +def test_cleanup_blob_containers_one_old_one_pcw_ignore(azure_patch, container_client_one_old, bs_client_one_pcw_ignore): + azure_patch.cleanup_blob_containers() + assert container_client_one_old.deleted_blobs == ["to_be_deleted"] - az = Azure('fake') - generators.max_images_per_flavor = 1 - assert az.get_keeping_image_names() == ['SLES15-SP2-Azure-HPC.x86_64-0.9.1-Build1.3.vhd'] +def test_cleanup_blob_containers_all_new_one_pcw_ignore(azure_patch, container_client_all_new, bs_client_one_pcw_ignore): + azure_patch.cleanup_blob_containers() + assert container_client_all_new.deleted_blobs == [] -def test_cleanup_all(azure_patch, monkeypatch): - called = 0 +def test_cleanup_images_from_rg_all_new(azure_patch, monkeypatch, mock_compute_mgmt_client): - def count_call(*args, **kwargs): - nonlocal called - called = called + 1 + monkeypatch.setattr(Azure, 'list_by_resource_group', lambda *args, **kwargs: [FakeItem(), FakeItem()]) + azure_patch.cleanup_images_from_rg() - monkeypatch.setattr(Azure, 'get_storage_key', lambda *args, **kwargs: 'FOOXX') - monkeypatch.setattr(Azure, 'get_keeping_image_names', lambda *args, **kwargs: ['a', 'b']) - monkeypatch.setattr(Azure, 'cleanup_sle_images_container', count_call) - monkeypatch.setattr(Azure, 'cleanup_disks_from_rg', count_call) - monkeypatch.setattr(Azure, 'cleanup_images_from_rg', count_call) - monkeypatch.setattr(Azure, 'cleanup_bootdiagnostics', count_call) - monkeypatch.setattr(Provider, 'read_auth_json', lambda *args, **kwargs: '{}') + assert len(deleted_images) == 0 - az = Azure('fake') - az.cleanup_all() - assert called == 4 +def test_cleanup_images_from_rg_one_old(azure_patch, monkeypatch, mock_compute_mgmt_client): + old_times = datetime.now(timezone.utc) - timedelta(hours=generators.max_age_hours+1) + monkeypatch.setattr(Azure, 'list_by_resource_group', lambda *args, **kwargs: [FakeItem(old_times, "to_delete"), + FakeItem() + ]) + azure_patch.cleanup_images_from_rg() + assert len(deleted_images) == 1 + assert deleted_images[0] == "to_delete" -def test_cleanup_bootdiagnostics(azure_patch, monkeypatch): - called = 0 +def test_cleanup_disks_from_rg_all_new(azure_patch, monkeypatch): - def count_call(*args, **kwargs): - nonlocal called - called = called + 1 + global deleted_images + # to make sure that we not failing due to other test left dirty env. + deleted_images = list() - class FakeBlobServiceClient: + def mock_compute_mgmt_client(self): + def compute_mgmt_client(): + pass - def list_containers(self): - return [ - MockImage('bootdiagnostics-A'), - MockImage('ShouldNotMatchRegex'), - MockImage('bootdiagnostics-C'), - MockImage('bootdiagnostics-D'), - MockImage('bootdiagnostics-E'), - ] + compute_mgmt_client.disks = lambda: None + compute_mgmt_client.disks.begin_delete = lambda rg, name: deleted_images.append(name) + compute_mgmt_client.disks.get = lambda rg, name: FakeDisk() + return compute_mgmt_client - fakeblobserviceclient = FakeBlobServiceClient() + monkeypatch.setattr(Azure, 'compute_mgmt_client', mock_compute_mgmt_client) - monkeypatch.setattr(Azure, 'bs_client', lambda *args, **kwargs: fakeblobserviceclient) - monkeypatch.setattr(Azure, 'cleanup_bootdiagnostics_container', count_call) + monkeypatch.setattr(Azure, 'list_by_resource_group', lambda *args, **kwargs: [FakeItem(), FakeItem()]) + azure_patch.cleanup_disks_from_rg() - az = Azure('fake') - az.cleanup_bootdiagnostics() + assert len(deleted_images) == 0 - assert called == 4 +def test_cleanup_disks_from_rg_one_old_no_managed_by(azure_patch, monkeypatch): + + global deleted_images + # to make sure that we not failing due to other test left dirty env. + deleted_images = list() + + def mock_compute_mgmt_client(self): + def compute_mgmt_client(): + pass + compute_mgmt_client.disks = lambda: None + compute_mgmt_client.disks.begin_delete = lambda rg, name: deleted_images.append(name) + compute_mgmt_client.disks.get = lambda rg, name: FakeDisk() + return compute_mgmt_client -def test_cleanup_bootdiagnostics_container_older_than_min_age(azure_patch, monkeypatch): + monkeypatch.setattr(Azure, 'compute_mgmt_client', mock_compute_mgmt_client) - class FakeBlobServiceClient: - deleted_containers = list() + old_times = datetime.now(timezone.utc) - timedelta(hours=generators.max_age_hours+1) + monkeypatch.setattr(Azure, 'list_by_resource_group', lambda *args, **kwargs: [FakeItem(old_times, "to_delete"), + FakeItem() + ]) + azure_patch.cleanup_disks_from_rg() - def delete_container(self, container_name): - self.deleted_containers.append(container_name) + assert len(deleted_images) == 1 + assert deleted_images[0] == "to_delete" - class FakeContainerClient(): +def test_cleanup_disks_from_rg_one_old_with_managed_by(azure_patch, monkeypatch): - def list_blobs(self): - older_then_min_age = datetime.now(timezone.utc) - timedelta(hours=generators.min_image_age_hours+1) - newer_then_min_age = datetime.now(timezone.utc) - return [ - MockImage('Image', newer_then_min_age), - MockImage('Image', newer_then_min_age), - MockImage('Image', newer_then_min_age), - MockImage('Image', older_then_min_age), - ] + global deleted_images + # to make sure that we not failing due to other test left dirty env. + deleted_images = list() - fakecontainerclient = FakeContainerClient() - fakeblobserviceclient = FakeBlobServiceClient() - monkeypatch.setattr(Azure, 'container_client', lambda *args, **kwargs: fakecontainerclient) - monkeypatch.setattr(Azure, 'bs_client', lambda *args, **kwargs: fakeblobserviceclient) + def mock_compute_mgmt_client(self): + def compute_mgmt_client(): + pass - az = Azure('fake') - az.cleanup_bootdiagnostics_container(MockImage('HaveOneOlder', datetime.now(timezone.utc))) - assert len(fakeblobserviceclient.deleted_containers) == 1 + compute_mgmt_client.disks = lambda: None + compute_mgmt_client.disks.begin_delete = lambda rg, name: deleted_images.append(name) + compute_mgmt_client.disks.get = lambda rg, name: FakeDisk("I am busy") + return compute_mgmt_client + monkeypatch.setattr(Azure, 'compute_mgmt_client', mock_compute_mgmt_client) -def test_cleanup_bootdiagnostics_container_all_newer(azure_patch, monkeypatch): + old_times = datetime.now(timezone.utc) - timedelta(hours=generators.max_age_hours+1) + monkeypatch.setattr(Azure, 'list_by_resource_group', lambda *args, **kwargs: [FakeItem(old_times, "to_delete"), + FakeItem() + ]) + azure_patch.cleanup_disks_from_rg() - class FakeBlobServiceClient: - deleted_containers = list() + assert len(deleted_images) == 0 - def delete_container(self, container_name): - self.deleted_containers.append(container_name) - class FakeContainerClient(): +def test_cleanup_all(azure_patch, monkeypatch): + called = 0 - def list_blobs(self): - older_then_min_age = datetime.now(timezone.utc) - timedelta(hours=generators.min_image_age_hours+1) - newer_then_min_age = datetime.now(timezone.utc) - return [ - MockImage('Image', newer_then_min_age), - MockImage('Image', newer_then_min_age), - MockImage('Image', newer_then_min_age), - MockImage('Image', newer_then_min_age), - ] + def count_call(*args, **kwargs): + nonlocal called + called = called + 1 - fakecontainerclient = FakeContainerClient() - fakeblobserviceclient = FakeBlobServiceClient() - monkeypatch.setattr(Azure, 'container_client', lambda *args, **kwargs: fakecontainerclient) - monkeypatch.setattr(Azure, 'bs_client', lambda *args, **kwargs: fakeblobserviceclient) + monkeypatch.setattr(Azure, 'get_storage_key', lambda *args, **kwargs: 'FOOXX') + monkeypatch.setattr(Azure, 'cleanup_blob_containers', count_call) + monkeypatch.setattr(Azure, 'cleanup_disks_from_rg', count_call) + monkeypatch.setattr(Azure, 'cleanup_images_from_rg', count_call) + monkeypatch.setattr(Provider, 'read_auth_json', lambda *args, **kwargs: '{}') az = Azure('fake') - az.cleanup_bootdiagnostics_container(MockImage('AllNewer', datetime.now(timezone.utc))) - assert len(fakeblobserviceclient.deleted_containers) == 0 + az.cleanup_all() + assert called == 3 def test_check_credentials(monkeypatch): diff --git a/tests/test_ec2.py b/tests/test_ec2.py index 410a10b1..29131721 100644 --- a/tests/test_ec2.py +++ b/tests/test_ec2.py @@ -1,13 +1,16 @@ from ocw.lib.EC2 import EC2, Provider from webui.settings import PCWConfig from tests.generators import mock_get_feature_property -from tests.generators import min_image_age_hours, max_image_age_hours, ec2_max_volumes_age_days, \ - ec2_max_snapshot_age_days +from tests.generators import ec2_max_age_days +from faker import Faker from datetime import datetime, timezone, timedelta from botocore.exceptions import ClientError import pytest -older_then_min_age = (datetime.now(timezone.utc) - timedelta(hours=min_image_age_hours + 1)).isoformat() +older_than_max_age_date = datetime.now(timezone.utc) - timedelta(days=ec2_max_age_days + 1) +older_than_max_age_str = older_than_max_age_date.strftime("%m/%d/%Y, %H:%M:%S") +now_age_date = datetime.now(timezone.utc) +now_age_str = now_age_date.strftime("%m/%d/%Y, %H:%M:%S") # used by test_delete_vpc_deleting_everything test. Needs to be global due to use in ec2_patch fixture delete_vpc_calls_stack = [] @@ -39,7 +42,6 @@ def mocked_client(): monkeypatch.setattr(EC2, 'get_all_regions', lambda self: ['region1']) monkeypatch.setattr(PCWConfig, 'get_feature_property', mock_get_feature_property) monkeypatch.setattr(EC2, 'ec2_client', lambda self, region: MockedEC2Client()) - monkeypatch.setattr(EC2, 'needs_to_delete_snapshot', lambda *args, **kwargs: True) monkeypatch.setattr(EC2, 'ec2_resource', lambda self, region: mocked_ec2_resource) mocked_ec2_resource.Vpc = mocked_vpc @@ -233,92 +235,48 @@ def delete(self): MockedVpcPeeringConnection.delete_called = True -def test_parse_image_name(ec2_patch): - assert ec2_patch.parse_image_name('openqa-SLES12-SP5-EC2.x86_64-0.9.1-BYOS-Build1.55.raw.xz') == { - 'key': '12-SP5-EC2-BYOS-x86_64', - 'build': '0.9.1-1.55' - } - assert ec2_patch.parse_image_name('openqa-SLES15-SP2.x86_64-0.9.3-EC2-HVM-Build1.10.raw.xz') == { - 'key': '15-SP2-EC2-HVM-x86_64', - 'build': '0.9.3-1.10' - } - assert ec2_patch.parse_image_name('openqa-SLES15-SP2-BYOS.x86_64-0.9.3-EC2-HVM-Build1.10.raw.xz') == { - 'key': '15-SP2-EC2-HVM-BYOS-x86_64', - 'build': '0.9.3-1.10' - } - assert ec2_patch.parse_image_name('openqa-SLES15-SP2.aarch64-0.9.3-EC2-HVM-Build1.49.raw.xz') == { - 'key': '15-SP2-EC2-HVM-aarch64', - 'build': '0.9.3-1.49' - } - assert ec2_patch.parse_image_name('openqa-SLES12-SP4-EC2-HVM-BYOS.x86_64-0.9.2-Build2.56.raw.xz') == { - 'key': '12-SP4-EC2-HVM-BYOS-x86_64', - 'build': '0.9.2-2.56' - } - assert ec2_patch.parse_image_name('openqa-SLES15-SP2-CHOST-BYOS.x86_64-0.9.3-EC2-Build1.11.raw.xz') == { - 'key': '15-SP2-EC2-CHOST-BYOS-x86_64', - 'build': '0.9.3-1.11' - } - assert ec2_patch.parse_image_name('do not match') is None - - -def test_cleanup_images_delete_due_time(ec2_patch): - newer_then_min_age = datetime.now(timezone.utc).isoformat() - older_then_max_age = (datetime.now(timezone.utc) - timedelta(hours=max_image_age_hours + 1)).isoformat() +def test_cleanup_images_one_old(ec2_patch): MockedEC2Client.deleted_images = list() MockedEC2Client.response = { 'Images': [ - {'Name': 'SomeThingElse', - 'CreationDate': older_then_max_age, 'ImageId': 0}, - {'Name': 'openqa-SLES15-SP2-BYOS.x86_64-0.9.3-EC2-HVM-Build1.10.raw.xz', - 'CreationDate': newer_then_min_age, 'ImageId': 1}, - {'Name': 'openqa-SLES15-SP2-BYOS.x86_64-0.9.3-EC2-HVM-Build1.11.raw.xz', - 'CreationDate': older_then_min_age, 'ImageId': 2}, - {'Name': 'openqa-SLES15-SP2-BYOS.x86_64-0.9.3-EC2-HVM-Build1.12.raw.xz', - 'CreationDate': older_then_min_age, 'ImageId': 3}, - {'Name': 'openqa-SLES15-SP2-BYOS.x86_64-0.9.3-EC2-HVM-Build1.13.raw.xz', - 'CreationDate': older_then_max_age, 'ImageId': 4}, + {'Name': Faker().uuid4(), 'CreationDate': now_age_str, 'ImageId': 0}, + {'Name': Faker().uuid4(), 'CreationDate': older_than_max_age_str, 'ImageId': 2}, ] } - ec2_patch.cleanup_images() - assert MockedEC2Client.deleted_images == [2, 3, 4] - + ec2_patch.cleanup_images(ec2_max_age_days) + assert MockedEC2Client.deleted_images == [2] -def test_cleanup_images_delete_due_quantity(ec2_patch): +def test_cleanup_images_all_new(ec2_patch): MockedEC2Client.deleted_images = list() MockedEC2Client.response = { 'Images': [ - {'Name': 'openqa-SLES15-SP2-BYOS.x86_64-0.9.3-EC2-HVM-Build1.12.raw.xz', - 'CreationDate': older_then_min_age, 'ImageId': 3}, - {'Name': 'openqa-SLES15-SP2-BYOS.x86_64-0.9.3-EC2-HVM-Build1.13.raw.xz', - 'CreationDate': older_then_min_age, 'ImageId': 4}, + {'Name': Faker().uuid4(), 'CreationDate': now_age_str, 'ImageId': 0}, + {'Name': Faker().uuid4(), 'CreationDate': now_age_str, 'ImageId': 2}, ] } - ec2_patch.cleanup_images() - assert MockedEC2Client.deleted_images == [3] - - -def test_needs_to_delete_snapshot(): - days_to_delete = 1 - old_enough = datetime.now() - timedelta(days=days_to_delete + 1) - correct_description1 = 'OpenQA upload image' - correct_description2 = 'Created by CreateImage(jsdkfhsdkj) for ami-sdjhfksdj from vol-sdjfhksdjh' - snapshot_to_delete = {'StartTime': old_enough, 'Description': correct_description1} - snapshot_to_delete2 = {'StartTime': old_enough, 'Description': correct_description2} - not_old_enough = {'StartTime': datetime.now(), 'Description': correct_description1} - wrong_description = {'StartTime': old_enough, 'Description': 'DDDDDDDDD'} - assert EC2.needs_to_delete_snapshot(snapshot_to_delete, days_to_delete) - assert EC2.needs_to_delete_snapshot(snapshot_to_delete2, days_to_delete) - assert not EC2.needs_to_delete_snapshot(not_old_enough, days_to_delete) - assert not EC2.needs_to_delete_snapshot(wrong_description, days_to_delete) - - -def test_cleanup_snapshots_cleanup_check(ec2_patch): + ec2_patch.cleanup_images(ec2_max_age_days) + assert MockedEC2Client.deleted_images == [] + + + +def test_needs_to_delete_by_age(): + assert EC2.needs_to_delete_by_age(older_than_max_age_date, ec2_max_age_days) + assert not EC2.needs_to_delete_by_age(now_age_date, ec2_max_age_days) + + +def test_cleanup_snapshots_cleanup_all_new(ec2_patch): MockedEC2Client.response = { 'Snapshots': [{'SnapshotId': MockedEC2Client.snapshotid_to_delete, 'StartTime': datetime.now()}] } - ec2_patch.cleanup_snapshots(ec2_max_snapshot_age_days) - # snapshot was deleted because setting **is** defined so whole cleanup start actually working - assert MockedEC2Client.snapshotid_to_delete not in MockedEC2Client.ec2_snapshots + ec2_patch.cleanup_snapshots(ec2_max_age_days) + assert len(MockedEC2Client.ec2_snapshots) == 2 + +def test_cleanup_snapshots_cleanup_one_old(ec2_patch): + MockedEC2Client.response = { + 'Snapshots': [{'SnapshotId': MockedEC2Client.snapshotid_to_delete, 'StartTime': older_than_max_age_date}] + } + ec2_patch.cleanup_snapshots(ec2_max_age_days) + assert len(MockedEC2Client.ec2_snapshots) == 1 def test_cleanup_snapshots_have_ami(ec2_patch): @@ -326,20 +284,18 @@ def test_cleanup_snapshots_have_ami(ec2_patch): 'Snapshots': [{'SnapshotId': MockedEC2Client.snapshotid_i_have_ami, 'StartTime': datetime.now()}] } MockedEC2Client.delete_snapshot_raise_error = True - ec2_patch.cleanup_snapshots(ec2_max_snapshot_age_days) + ec2_patch.cleanup_snapshots(ec2_max_age_days) assert MockedEC2Client.snapshotid_i_have_ami in MockedEC2Client.ec2_snapshots def test_cleanup_volumes_cleanupcheck(ec2_patch): MockedEC2Client.response = { - 'Volumes': [{'VolumeId': MockedEC2Client.volumeid_to_delete, - 'CreateTime': datetime.now(timezone.utc) - timedelta(days=ec2_max_volumes_age_days + 1)}, - {'VolumeId': 'too_young_to_die', 'CreateTime': datetime.now(timezone.utc) - timedelta(days=2)}, - {'VolumeId': MockedEC2Client.volumeid_to_delete, - 'CreateTime': datetime.now(timezone.utc) - timedelta(days=ec2_max_volumes_age_days + 1), + 'Volumes': [{'VolumeId': MockedEC2Client.volumeid_to_delete, 'CreateTime': older_than_max_age_date}, + {'VolumeId': 'too_young_to_die', 'CreateTime': now_age_date}, + {'VolumeId': MockedEC2Client.volumeid_to_delete, 'CreateTime': older_than_max_age_date, 'Tags': [{'Key': 'DO_NOT_DELETE', 'Value': '1'}]}, ] } - ec2_patch.cleanup_volumes(ec2_max_volumes_age_days) + ec2_patch.cleanup_volumes(ec2_max_age_days) assert len(MockedEC2Client.deleted_volumes) == 1 assert MockedEC2Client.deleted_volumes[0] == MockedEC2Client.volumeid_to_delete @@ -462,7 +418,7 @@ def test_delete_vpc_subnets(ec2_patch): def test_cleanup_all_calling_all(ec2_patch, monkeypatch): called_stack = [] - def mocked_cleanup_images(self): + def mocked_cleanup_images(self, arg1): called_stack.append('cleanup_images') def mocked_cleanup_snapshots(self, arg1): @@ -486,7 +442,7 @@ def mocked_get_boolean(config_path, field=None): ec2_patch.cleanup_all() - assert called_stack == ['cleanup_images', 'cleanup_snapshots', 'cleanup_volumes', 'cleanup_uploader_vpcs'] + assert called_stack == ['cleanup_images', 'cleanup_volumes', 'cleanup_snapshots', 'cleanup_uploader_vpcs'] def test_list_clusters(ec2_patch, monkeypatch): mocked_eks = MockedEKSClient() diff --git a/tests/test_gce.py b/tests/test_gce.py index b7b50519..e0c3f359 100644 --- a/tests/test_gce.py +++ b/tests/test_gce.py @@ -1,37 +1,9 @@ from ocw.lib.gce import GCE, Provider from webui.settings import PCWConfig -from tests.generators import min_image_age_hours, max_image_age_hours -from tests.generators import mock_get_feature_property +from tests.generators import max_age_hours, mock_get_feature_property from tests import generators from datetime import datetime, timezone, timedelta - - -def test_parse_image_name(monkeypatch): - monkeypatch.setattr(PCWConfig, 'get_feature_property', lambda *args, **kwargs: "FOOF") - monkeypatch.setattr(Provider, 'read_auth_json', lambda *args, **kwargs: '{}') - gce = GCE('fake') - - assert gce.parse_image_name('sles12-sp5-gce-x8664-0-9-1-byos-build1-56') == { - 'key': '12-sp5-gce-byos-x8664', - 'build': '0-9-1-1-56' - } - - assert gce.parse_image_name('sles15-sp2-byos-x8664-0-9-3-gce-build1-10') == { - 'key': '15-sp2-gce-byos-x8664', - 'build': '0-9-3-1-10' - } - - assert gce.parse_image_name('sles15-sp2-x8664-0-9-3-gce-build1-10') == { - 'key': '15-sp2-gce-x8664', - 'build': '0-9-3-1-10' - } - - assert gce.parse_image_name('sles15-sp2-chost-byos-x8664-0-9-3-gce-build1-11') == { - 'key': '15-sp2-gce-chost-byos-x8664', - 'build': '0-9-3-1-11' - } - - assert gce.parse_image_name('do not match') is None +from faker import Faker class FakeRequest: @@ -60,28 +32,26 @@ def delete(self, *args, **kwargs): def test_cleanup_all(monkeypatch): - newer_then_min_age = datetime.now(timezone.utc).isoformat() - older_then_min_age = (datetime.now(timezone.utc) - timedelta(hours=min_image_age_hours+1)).isoformat() - older_then_max_age = (datetime.now(timezone.utc) - timedelta(hours=max_image_age_hours+1)).isoformat() + older_then_max_age = (datetime.now(timezone.utc) - timedelta(hours=max_age_hours+1)).strftime("%m/%d/%Y, %H:%M:%S") + now_age = datetime.now(timezone.utc).strftime("%m/%d/%Y, %H:%M:%S") fmi = FakeMockImages([ FakeRequest({ # on images().list() 'items': [ - {'name': 'I will not be parsed', 'creationTimestamp': older_then_max_age}, - {'name': 'sles12-sp5-gce-x8664-0-9-1-byos-build1-54', 'creationTimestamp': newer_then_min_age}, - {'name': 'sles12-sp5-gce-x8664-0-9-1-byos-build1-56', 'creationTimestamp': older_then_min_age} + {'name': 'keep', 'creationTimestamp': now_age}, + {'name': 'delete1', 'creationTimestamp': older_then_max_age} ] }), + FakeRequest(), # on images().delete() FakeRequest({ # on images().list_next() 'items': [ - {'name': 'sles12-sp5-gce-x8664-0-9-1-byos-build1-57', 'creationTimestamp': older_then_min_age}, - {'name': 'sles12-sp5-gce-x8664-0-9-1-byos-build1-58', 'creationTimestamp': older_then_max_age} + {'name': 'keep', 'creationTimestamp': now_age}, + {'name': 'delete2', 'creationTimestamp': older_then_max_age} ] }), - None, # on images().list_next() FakeRequest({'error': {'errors': [{'message': 'err message'}]}, - 'warnings': [{'message': 'warning message'}]}), - FakeRequest(), # on images().delete() + 'warnings': [{'message': 'warning message'}]}), + None # on images().list_next() ]) def mocked_compute_client(): @@ -93,9 +63,8 @@ def mocked_compute_client(): monkeypatch.setattr(Provider, 'read_auth_json', lambda *args, **kwargs: '{}') gce = GCE('fake') - generators.max_images_per_flavor = 2 gce.cleanup_all() - assert fmi.deleted == ['sles12-sp5-gce-x8664-0-9-1-byos-build1-56', 'sles12-sp5-gce-x8664-0-9-1-byos-build1-58'] + assert fmi.deleted == ['delete1', 'delete2'] fmi = FakeMockImages([FakeRequest({})]) gce.cleanup_all() diff --git a/tests/test_pcwconfig.py b/tests/test_pcwconfig.py index c152f445..0989fd26 100644 --- a/tests/test_pcwconfig.py +++ b/tests/test_pcwconfig.py @@ -4,10 +4,12 @@ def test_get_feature_property_with_defaults(pcw_file): - assert PCWConfig.get_feature_property('cleanup', 'max-images-per-flavor', 'fake') == 1 - assert type(PCWConfig.get_feature_property('cleanup', 'max-images-per-flavor', 'fake')) is int - assert type(PCWConfig.get_feature_property('cleanup', 'min-image-age-hours', 'fake')) is int - assert type(PCWConfig.get_feature_property('cleanup', 'max-image-age-hours', 'fake')) is int + assert type(PCWConfig.get_feature_property('cleanup', 'max-age-hours', 'fake')) is int + assert type(PCWConfig.get_feature_property('cleanup', 'ec2-max-age-days', 'fake')) is int + assert type(PCWConfig.get_feature_property('updaterun', 'default_ttl', 'fake')) is int + assert PCWConfig.get_feature_property('cleanup', 'max-age-hours', 'fake') == 24 * 7 + assert PCWConfig.get_feature_property('cleanup', 'ec2-max-age-days', 'fake') == -1 + assert PCWConfig.get_feature_property('updaterun', 'default_ttl', 'fake') == 44400 assert PCWConfig.get_feature_property('cleanup', 'azure-storage-resourcegroup', 'fake') == 'openqa-upload' assert type(PCWConfig.get_feature_property('cleanup', 'azure-storage-resourcegroup', 'fake')) is str @@ -19,11 +21,10 @@ def test_get_feature_property_lookup_error(pcw_file): def test_get_feature_property_from_pcw_ini_feature(pcw_file): set_pcw_ini(pcw_file, """ [cleanup] -max-images-per-flavor = 666 +max-age-hours = 666 azure-storage-resourcegroup = bla-blub """) - assert PCWConfig.get_feature_property('cleanup', 'max-images-per-flavor', 'fake') == 666 - assert type(PCWConfig.get_feature_property('cleanup', 'max-images-per-flavor', 'fake')) is int + assert PCWConfig.get_feature_property('cleanup', 'max-age-hours', 'fake') == 666 assert PCWConfig.get_feature_property('cleanup', 'azure-storage-resourcegroup', 'fake') == 'bla-blub' assert type(PCWConfig.get_feature_property('cleanup', 'azure-storage-resourcegroup', 'fake')) is str @@ -31,14 +32,14 @@ def test_get_feature_property_from_pcw_ini_feature(pcw_file): def test_get_feature_property_from_pcw_ini_with_namespace(pcw_file): set_pcw_ini(pcw_file, """ [cleanup] -max-images-per-flavor = 666 +max-age-hours = 666 azure-storage-resourcegroup = bla-blub [cleanup.namespace.testns] -max-images-per-flavor = 42 +max-age-hours = 42 azure-storage-resourcegroup = bla-blub-ns """) - cleanup_max_images_per_flavor = PCWConfig.get_feature_property('cleanup', 'max-images-per-flavor', 'testns') + cleanup_max_images_per_flavor = PCWConfig.get_feature_property('cleanup', 'max-age-hours', 'testns') cleanup_azure_storage_resourcegroup = PCWConfig.get_feature_property('cleanup', 'azure-storage-resourcegroup', 'testns') assert cleanup_max_images_per_flavor == 42 assert type(cleanup_max_images_per_flavor) is int diff --git a/tests/test_provider.py b/tests/test_provider.py index 81da61ce..b4b64994 100644 --- a/tests/test_provider.py +++ b/tests/test_provider.py @@ -1,13 +1,11 @@ -from ocw.lib.provider import Provider, Image +from ocw.lib.provider import Provider from datetime import datetime from datetime import timezone from datetime import timedelta from tests import generators from webui.settings import PCWConfig from .generators import mock_get_feature_property -from .generators import max_images_per_flavor -from .generators import min_image_age_hours -from .generators import max_image_age_hours +from .generators import max_age_hours import pytest @@ -17,70 +15,14 @@ def provider_patch(monkeypatch): monkeypatch.setattr(Provider, 'read_auth_json', lambda *args, **kwargs: '{}') -def test_older_than_min_age_older(provider_patch): +def test_older_than_max_age_hours_older(provider_patch): provider = Provider('testolderminage') - assert provider.older_than_min_age(datetime.now(timezone.utc) - timedelta(hours=25)) == True + assert provider.older_than_max_age_hours(datetime.now(timezone.utc) - timedelta(hours=25)) == True -def test_older_than_min_age_younger(provider_patch): +def test_older_than_max_age_hours_younger(provider_patch): provider = Provider('testolderminage') - assert provider.older_than_min_age(datetime.now(timezone.utc) - timedelta(hours=23)) == False - - -def test_needs_to_delete_image(monkeypatch): - monkeypatch.setattr(PCWConfig, 'get_feature_property', mock_get_feature_property) - monkeypatch.setattr(Provider, 'read_auth_json', lambda *args, **kwargs: '{}') - provider = Provider('testneedstodelete') - too_many_images = max_images_per_flavor+1 - not_enough_images = max_images_per_flavor-3 - older_than_min_age = datetime.now(timezone.utc) - timedelta(hours=min_image_age_hours+1) - assert provider.needs_to_delete_image(too_many_images, datetime.now(timezone.utc)) == False - assert provider.needs_to_delete_image(too_many_images, older_than_min_age) == True - assert provider.needs_to_delete_image(not_enough_images, older_than_min_age) == False - - -def test_get_keeping_image_names(monkeypatch): - monkeypatch.setattr(PCWConfig, 'get_feature_property', mock_get_feature_property) - monkeypatch.setattr(Provider, 'read_auth_json', lambda *args, **kwargs: '{}') - provider = Provider('testneedstodelete') - - newer_then_min_age = datetime.now(timezone.utc) - older_then_min_age = datetime.now(timezone.utc) - timedelta(hours=min_image_age_hours+1) - older_then_max_age = datetime.now(timezone.utc) - timedelta(hours=max_image_age_hours+1) - - generators.max_images_per_flavor = 1 - images = [ - Image('foo-A-0.0.1-0.1', 'A', '0.0.1-0.1', older_then_min_age), - Image('foo-A-0.0.1-0.2', 'A', '0.0.1-0.2', older_then_min_age), - ] - assert provider.get_keeping_image_names(images) == ['foo-A-0.0.1-0.2'] - - images = [ - Image('foo-A-0.0.1-0.1', 'A', '0.0.1-0.1', older_then_min_age), - Image('foo-A-0.0.1-0.2', 'A', '0.0.1-0.2', older_then_max_age), - ] - assert provider.get_keeping_image_names(images) == [] - - images = [ - Image('foo-A-0.0.1-0.1', 'A', '0.0.1-0.1', newer_then_min_age), - Image('foo-A-0.0.1-0.2', 'A', '0.0.1-0.2', older_then_min_age), - ] - assert provider.get_keeping_image_names(images) == ['foo-A-0.0.1-0.2', 'foo-A-0.0.1-0.1'] - - images = [ - Image('foo-A-0.0.1-0.1', 'A', '0.0.1-0.1', older_then_min_age), - Image('foo-A-0.0.1-0.2', 'A', '0.0.1-0.2', older_then_min_age), - Image('foo-B-0.0.1-0.1', 'B', '0.0.1-0.1', older_then_min_age), - Image('foo-B-0.1.1-0.1', 'B', '0.1.1-0.1', older_then_min_age) - ] - assert provider.get_keeping_image_names(images) == ['foo-A-0.0.1-0.2', 'foo-B-0.1.1-0.1'] - - generators.max_images_per_flavor = 2 - images = [ - Image('foo-A-0.0.1-0.1', 'A', '0.0.1-0.1', older_then_min_age), - Image('foo-A-0.0.1-0.2', 'A', '0.0.1-0.2', older_then_min_age), - ] - assert provider.get_keeping_image_names(images) == ['foo-A-0.0.1-0.2', 'foo-A-0.0.1-0.1'] + assert provider.older_than_max_age_hours(datetime.now(timezone.utc) - timedelta(hours=23)) == False def test_getData(monkeypatch): diff --git a/webui/settings.py b/webui/settings.py index e8b21c5a..18d2a2de 100644 --- a/webui/settings.py +++ b/webui/settings.py @@ -210,13 +210,10 @@ class PCWConfig(): @staticmethod def get_feature_property(feature: str, property: str, namespace: str = None): default_values = { - 'cleanup/max-images-per-flavor': {'default': 1, 'return_type': int}, - 'cleanup/max-image-age-hours': {'default': 24 * 31, 'return_type': int}, - 'cleanup/min-image-age-hours': {'default': 24, 'return_type': int}, + 'cleanup/max-age-hours': {'default': 24 * 7, 'return_type': int}, 'cleanup/azure-storage-resourcegroup': {'default': 'openqa-upload', 'return_type': str}, 'cleanup/azure-storage-account-name': {'default': 'openqa', 'return_type': str}, - 'cleanup/ec2-max-snapshot-age-days': {'default': -1, 'return_type': int}, - 'cleanup/ec2-max-volumes-age-days': {'default': -1, 'return_type': int}, + 'cleanup/ec2-max-age-days': {'default': -1, 'return_type': int}, 'updaterun/default_ttl': {'default': 44400, 'return_type': int}, 'notify/to': {'default': None, 'return_type': str}, 'notify/age-hours': {'default': 12, 'return_type': int},