From f0e5095d7811104a632d2350e2d3de46411b02fb Mon Sep 17 00:00:00 2001 From: Filip Sobalski Date: Thu, 5 Dec 2019 18:06:53 +0100 Subject: [PATCH] Python3 --- .travis.yml | 6 +----- README.md | 4 ++-- handle_autoscaling_event.py | 28 +++++++++++++++------------- import_scaling_handler.py | 10 ++++++---- update_varnish_backends.py | 12 +++++++----- 5 files changed, 31 insertions(+), 29 deletions(-) diff --git a/.travis.yml b/.travis.yml index d693fe5..b47cc21 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,10 +11,6 @@ env: - LAMBDA_RUNTIME=python2 - LAMBDA_RUNTIME=python3 -jobs: - allow_failures: - - env: LAMBDA_RUNTIME=python3 - script: > docker run \ --rm \ @@ -30,4 +26,4 @@ deploy: skip_cleanup: true cleanup: false on: - tags: true \ No newline at end of file + tags: true diff --git a/README.md b/README.md index 0e9b48b..77343be 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ # MageOps AWS Lambdas for Handling Autoscaling This lambdas perform various tasks related to managing ASGs in response -to various events and conditions. They are strongly coupled to +to various events and conditions. They are strongly coupled to [MageOps infrastructure setup](https://github.com/mageops/ansible-workflow). ## Single Deploy Package @@ -16,7 +16,7 @@ the entrypoint will changed based on which one is used. ### Generating deploy package ```bash -docker run --rm --tty --volume "$(pwd):/var/app" mageops/aws-lambda-build python2 autoscaling-lambdas-deploy-package +docker run --rm --tty --volume "$(pwd):/var/app" mageops/aws-lambda-build python3 autoscaling-lambdas-deploy-package ``` #### Docker image for building lambdas diff --git a/handle_autoscaling_event.py b/handle_autoscaling_event.py index 8eb5db2..aeeddcf 100644 --- a/handle_autoscaling_event.py +++ b/handle_autoscaling_event.py @@ -1,3 +1,5 @@ +from __future__ import print_function + import boto3 import os import json @@ -41,21 +43,21 @@ def update_backends(exclude_backend_instance_ids=None, wait_for_finish=False): update_lambda_name = os.environ['UPDATE_LAMBDA_NAME'] if exclude_backend_instance_ids: - print "Ignoring backend instances %s" % ', '.join(exclude_backend_instance_ids) + print("Ignoring backend instances %s" % ', '.join(exclude_backend_instance_ids)) varnish_hosts = get_ec2_hosts(varnish_instance_filter) backend_hosts = get_ec2_hosts(backend_instance_filter, exclude_backend_instance_ids) extra_hosts = get_ec2_hosts(extra_instance_filter, exclude_backend_instance_ids) - print "Varnish hosts to be updated: %s, found using filter %s" % (varnish_hosts, varnish_instance_filter) - print "New backend hosts: %s, found using filter %s" % (backend_hosts, backend_instance_filter) - print "New extra hosts: %s, found using filter %s" % (extra_hosts, extra_instance_filter) + print("Varnish hosts to be updated: %s, found using filter %s" % (varnish_hosts, varnish_instance_filter)) + print("New backend hosts: %s, found using filter %s" % (backend_hosts, backend_instance_filter)) + print("New extra hosts: %s, found using filter %s" % (extra_hosts, extra_instance_filter)) s3_client = boto3.client('s3') varnish_key_object = s3_client.get_object(Bucket=varnish_key_bucket, Key=varnish_key_name) varnish_key = varnish_key_object['Body'].read() - print "Downloaded varnish ssh key from %s/%s" % (varnish_key_bucket, varnish_key_name) + print("Downloaded varnish ssh key from %s/%s" % (varnish_key_bucket, varnish_key_name)) payload = json.dumps({ 'varnish_ssh_key': varnish_key, @@ -66,10 +68,10 @@ def update_backends(exclude_backend_instance_ids=None, wait_for_finish=False): }) if wait_for_finish: - print 'Invoking update lambda with wait' + print('Invoking update lambda with wait') invocation_type = 'RequestResponse' else: - print 'Invoking update lambda asynchronously' + print('Invoking update lambda asynchronously') invocation_type = 'Event' boto3.client('lambda').invoke( @@ -80,7 +82,7 @@ def update_backends(exclude_backend_instance_ids=None, wait_for_finish=False): ) if wait_for_finish: - print 'Update lambda finished' + print('Update lambda finished') def complete_lifecycle_action(hook, asg, instance_id, action=LIFECYCLE_ACTION_CONTINUE): @@ -93,14 +95,14 @@ def complete_lifecycle_action(hook, asg, instance_id, action=LIFECYCLE_ACTION_CO InstanceId=instance_id ) - print "Asg continue response: %s" % response + print("Asg continue response: %s" % response) def handle_plain_event(event_type, event_data): - print 'Handling plain event "%s"' % event_type + print('Handling plain event "%s"' % event_type) if event_type != LAUNCH_SUCCESSFUL_EVENT: - print 'Unsupported event type, doing nothing' + print('Unsupported event type, doing nothing') return update_backends() @@ -112,13 +114,13 @@ def handle_lifecycle_event(event_type, event_data): asg_name = event_data[KEY_ASG_NAME] instance_id = event_data[KEY_EC2_INSTANCE_ID] - print 'Handling lifecycle event "%s" / hook "%s"' % (event_type, current_hook) + print('Handling lifecycle event "%s" / hook "%s"' % (event_type, current_hook)) if current_hook == terminate_hook: update_backends([instance_id], True) complete_lifecycle_action(current_hook, asg_name, instance_id, LIFECYCLE_ACTION_CONTINUE) else: - print 'Unsupported lifecycle hook, doing nothing' + print('Unsupported lifecycle hook, doing nothing') def handle(event, context): diff --git a/import_scaling_handler.py b/import_scaling_handler.py index 532086f..6712228 100644 --- a/import_scaling_handler.py +++ b/import_scaling_handler.py @@ -1,3 +1,5 @@ +from __future__ import print_function + import boto3 import os import json @@ -7,7 +9,7 @@ def ensure_running(asg_name): - print 'Setting desired capacity of %s to 1' % asg_name + print('Setting desired capacity of %s to 1' % asg_name) asg_client.set_desired_capacity( AutoScalingGroupName=asg_name, DesiredCapacity=1, @@ -16,7 +18,7 @@ def ensure_running(asg_name): def ensure_notrunning(asg_name): - print 'Setting desired capacity of %s to 0' % asg_name + print('Setting desired capacity of %s to 0' % asg_name) asg_client.set_desired_capacity( AutoScalingGroupName=asg_name, DesiredCapacity=0, @@ -41,8 +43,8 @@ def handle(event, context): # if we'd like to check current ASG status that's additional call anyway # not worth it if is_import_needed(check_endpoint): - print 'Import instance needed' + print('Import instance needed') ensure_running(asg_name) else: - print 'Import not needed' + print('Import not needed') ensure_notrunning(asg_name) diff --git a/update_varnish_backends.py b/update_varnish_backends.py index 85ffa01..ed04d6d 100644 --- a/update_varnish_backends.py +++ b/update_varnish_backends.py @@ -1,3 +1,5 @@ +from __future__ import print_function + import boto3 import os import json @@ -8,9 +10,9 @@ from datetime import datetime def execute_command(ssh_client, command): - print 'Executing %s' % command + print('Executing %s' % command) stdin, stdout, stderr = ssh_client.exec_command(command) - print stdout.read(), stderr.read() + print(stdout.read(), stderr.read()) def handle(event, context): @@ -34,14 +36,14 @@ def handle(event, context): ssh_client = paramiko.SSHClient() ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - print 'Connecting to %s...' % host + print('Connecting to %s...' % host) ssh_client.connect(hostname = host, username = ssh_username, pkey = ssh_key) - print 'Copying new backends vcl to %s ...' % new_vcl_name + print('Copying new backends vcl to %s ...' % new_vcl_name) sftp = ssh_client.open_sftp() sftp.putfo(StringIO.StringIO(backend_vcl), new_vcl_name) - print 'Updating vcls...' + print('Updating vcls...') execute_command(ssh_client, 'sudo /usr/bin/varnish_update_backends %s' % new_vcl_name) ssh_client.close()