From 35aa7860686fbb9117d43c0b95f99c4cebc4928a Mon Sep 17 00:00:00 2001 From: John Stange Date: Fri, 20 Mar 2020 16:05:43 -0400 Subject: [PATCH 001/124] add Windows 2019 to list of automatically generated base images --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 2ced95e7c..cc3488184 100755 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -349,7 +349,7 @@ Base Images Linux: Base Images Windows: stage: Deploy script: - - /opt/mu/lib/extras/generate-stock-images --clouds AWS --aws-creds egtprod --platforms win2k12 win2k16 + - /opt/mu/lib/extras/generate-stock-images --clouds AWS --aws-creds egtprod --platforms win2k12 win2k16 win2k19 tags: - mu-gitlab-runner only: From 86f3a1e07ed319b354a8e3f5b51cdafe51469f80 Mon Sep 17 00:00:00 2001 From: John Stange Date: Fri, 20 Mar 2020 17:07:52 -0400 Subject: [PATCH 002/124] Logger: refactor to rubocop's satisfaction --- modules/mu/logger.rb | 259 +++++++++++++++++++------------------------ 1 file changed, 115 insertions(+), 144 deletions(-) diff --git a/modules/mu/logger.rb b/modules/mu/logger.rb index 8a42b76ca..c1b155c56 100644 --- a/modules/mu/logger.rb +++ b/modules/mu/logger.rb @@ -33,6 +33,33 @@ class Logger # Show DEBUG log entries and extra call stack and threading info LOUD = 2.freeze + # stash a hash map for color outputs + COLORMAP = { + DEBUG => { :html => "orange", :ansi => :yellow }, + INFO => { :html => "green", :ansi => :green }, + NOTICE => { :html => "yellow", :ansi => :yellow }, + WARN => { :html => "orange", :ansi => :light_red }, + ERR => { :html => "red", :ansi => :red } + }.freeze + + # minimum log verbosity at which we'll print various types of messages + PRINT_MSG_IF = { + DEBUG => { :msg => LOUD, :details => LOUD }, + INFO => { :msg => NORMAL, :details => LOUD }, + NOTICE => { :msg => nil, :details => QUIET }, + WARN => { :msg => nil, :details => SILENT }, + ERR => { :msg => nil, :details => nil } + }.freeze + + # Syslog equivalents of our log levels + SYSLOG_MAP = { + DEBUG => Syslog::LOG_DEBUG, + INFO => Syslog::LOG_NOTICE, + NOTICE => Syslog::LOG_NOTICE, + WARN => Syslog::LOG_WARNING, + ERR => Syslog::LOG_ERR + }.freeze + attr_accessor :verbosity @verbosity = MU::Logger::NORMAL @quiet = false @@ -76,59 +103,23 @@ def log(msg, html ||= @html handle ||= @handle color ||= @color - return if verbosity == MU::Logger::SILENT - return if verbosity < MU::Logger::LOUD and level == DEBUG - return if verbosity < MU::Logger::NORMAL and level == INFO - # By which we mean, "get the filename (with the .rb stripped off) which - # originated the call to this method. Which, for our purposes, is the - # MU subclass that called us. Useful information. And it looks like Perl. - mod_root = Regexp.quote("#{ENV['MU_LIBDIR']}/modules/mu/") - bin_root = Regexp.quote("#{ENV['MU_INSTALLDIR']}/bin/") - caller_name = caller[1] + if verbosity == MU::Logger::SILENT or (verbosity < MU::Logger::LOUD and level == DEBUG) or (verbosity < MU::Logger::NORMAL and level == INFO) + return + end - caller_name.sub!(/:.*/, "") - caller_name.sub!(/^\.\//, "") - caller_name.sub!(/^#{mod_root}/, "") - caller_name.sub!(/^#{bin_root}/, "") - caller_name.sub!(/\.r[ub]$/, "") - caller_name.sub!(/#{Regexp.quote(MU.myRoot)}\//, "") - caller_name.sub!(/^modules\//, "") + caller_name = extract_caller_name(caller[1]) time = Time.now.strftime("%b %d %H:%M:%S").to_s Syslog.open("Mu/"+caller_name, Syslog::LOG_PID, Syslog::LOG_DAEMON | Syslog::LOG_LOCAL3) if !Syslog.opened? - if !details.nil? - if details.is_a?(Hash) and details.has_key?(:details) - details = details[:details] - end - details = PP.pp(details, '') if !details.is_a?(String) - end - details = "
"+details+"
" if html - # We get passed literal quoted newlines sometimes, fix 'em. Get Windows' - # ugly line feeds too. - if !details.nil? - details = details.dup # in case it's frozen or something - details.gsub!(/\\n/, "\n") - details.gsub!(/(\\r|\r)/, "") - end + + details = format_details(details, html) msg = msg.first if msg.is_a?(Array) msg = "" if msg == nil msg = msg.to_s if !msg.is_a?(String) and msg.respond_to?(:to_s) - # wrapper for writing a log entry to multiple filehandles - # @param handles [Array] - # @param msgs [Array] - def write(handles = [], msgs = []) - return if handles.nil? or msgs.nil? - handles.each { |h| - msgs.each { |m| - h.puts m - } - } - end - @@log_semaphere.synchronize { handles = [handle] extra_logfile = if deploy and deploy.deploy_dir and Dir.exist?(deploy.deploy_dir) @@ -137,110 +128,41 @@ def write(handles = [], msgs = []) handles << extra_logfile if extra_logfile msgs = [] - case level - when SUMMARY - @summary << msg - when DEBUG - if verbosity >= MU::Logger::LOUD - if html - html_out "#{time} - #{caller_name} - #{msg}", "orange" - html_out " #{details}" if details - elsif color - msgs << "#{time} - #{caller_name} - #{msg}".yellow.on_black - msgs << "#{details}".white.on_black if details - else - msgs << "#{time} - #{caller_name} - #{msg}" - msgs << "#{details}" if details - end - Syslog.log(Syslog::LOG_DEBUG, msg.gsub(/%/, '')) - Syslog.log(Syslog::LOG_DEBUG, details.gsub(/%/, '')) if details - end - when INFO - if verbosity >= MU::Logger::NORMAL - if html - html_out "#{time} - #{caller_name} - #{msg}", "green" - elsif color - msgs << "#{time} - #{caller_name} - #{msg}".green.on_black - else - msgs << "#{time} - #{caller_name} - #{msg}" - end - if verbosity >= MU::Logger::LOUD - if html - html_out " #{details}" - elsif color - msgs << "#{details}".white.on_black if details - else - msgs << "#{details}" if details - end - end - Syslog.log(Syslog::LOG_NOTICE, msg.gsub(/%/, '')) - Syslog.log(Syslog::LOG_NOTICE, details.gsub(/%/, '')) if details - end - when NOTICE - if html - html_out "#{time} - #{caller_name} - #{msg}", "yellow" - elsif color - msgs << "#{time} - #{caller_name} - #{msg}".yellow.on_black - else - msgs << "#{time} - #{caller_name} - #{msg}" - end - if verbosity >= MU::Logger::QUIET - if html - html_out "#{caller_name} - #{msg}" - elsif color - msgs << "#{details}".white.on_black if details - else - msgs << "#{details}" if details - end - end - Syslog.log(Syslog::LOG_NOTICE, msg.gsub(/%/, '')) - Syslog.log(Syslog::LOG_NOTICE, details.gsub(/%/, '')) if details - when WARN - if html - html_out "#{time} - #{caller_name} - #{msg}", "orange" - elsif color - msgs << "#{time} - #{caller_name} - #{msg}".light_red.on_black - else - msgs << "#{time} - #{caller_name} - #{msg}" - end - if verbosity >= MU::Logger::SILENT - if html - html_out "#{caller_name} - #{msg}" - elsif color - msgs << "#{details}".white.on_black if details - else - msgs << "#{details}" if details - end - end - Syslog.log(Syslog::LOG_WARNING, msg.gsub(/%/, '')) - Syslog.log(Syslog::LOG_WARNING, details.gsub(/%/, '')) if details - when ERR - if html - html_out "#{time} - #{caller_name} - #{msg}", "red" - html_out " #{details}" if details - elsif color - msgs << "#{time} - #{caller_name} - #{msg}".red.on_black - msgs << "#{details}".white.on_black if details - else - msgs << "#{time} - #{caller_name} - #{msg}" - msgs << "#{details}" if details - end - Syslog.log(Syslog::LOG_ERR, msg.gsub(/%/, '')) - Syslog.log(Syslog::LOG_ERR, details.gsub(/%/, '')) if details + if !PRINT_MSG_IF[level][:msg] or level >= PRINT_MSG_IF[level][:msg] + if html + html_out "#{time} - #{caller_name} - #{msg}", COLORMAP[level][:html] + else + str = "#{time} - #{caller_name} - #{msg}" + str = str.call(COLORMAP[level][:ansi]).on_black if color + msgs << str + end + Syslog.log(SYSLOG_MAP[level], msg.gsub(/%/, '')) + end + + if details and (!PRINT_MSG_IF[level][:details] or level >= PRINT_MSG_IF[level][:details]) + if html + html_out " #{details}" else - if html - html_out "#{time} - #{caller_name} - #{msg}" - html_out " #{details}" if details - elsif color - msgs << "#{time} - #{caller_name} - #{msg}".white.on_black - msgs << "#{details}".white.on_black if details - else - msgs << "#{time} - #{caller_name} - #{msg}" - msgs << "#{details}" if details - end - Syslog.log(Syslog::LOG_NOTICE, msg.gsub(/%/, '')) - Syslog.log(Syslog::LOG_NOTICE, details.gsub(/%/, '')) if details + details = details.white.on_block if color + msgs << details + end + Syslog.log(SYSLOG_MAP[level], details.gsub(/%/, '')) end + +# else +# if html +# html_out "#{time} - #{caller_name} - #{msg}" +# html_out " #{details}" if details +# elsif color +# msgs << "#{time} - #{caller_name} - #{msg}".white.on_black +# msgs << "#{details}".white.on_black if details +# else +# msgs << "#{time} - #{caller_name} - #{msg}" +# msgs << "#{details}" if details +# end +# Syslog.log(Syslog::LOG_NOTICE, msg.gsub(/%/, '')) +# Syslog.log(Syslog::LOG_NOTICE, details.gsub(/%/, '')) if details + write(handles, msgs) extra_logfile.close if extra_logfile @@ -250,6 +172,43 @@ def write(handles = [], msgs = []) private + def format_details(details, html = false) + return if details.nil? + + if details.is_a?(Hash) and details.has_key?(:details) + details = details[:details] + end + details = PP.pp(details, '') if !details.is_a?(String) + + details = "
"+details+"
" if html + # We get passed literal quoted newlines sometimes, fix 'em. Get Windows' + # ugly line feeds too. + + details = details.dup # in case it's frozen or something + details.gsub!(/\\n/, "\n") + details.gsub!(/(\\r|\r)/, "") + + details + end + + # By which we mean, "get the filename (with the .rb stripped off) which + # originated the call to this method. Which, for our purposes, is the + # MU subclass that called us. Useful information. And it looks like Perl. + def extract_caller_name(caller_name) + return nil if !caller_name or !caller_name.is_a?(String) + mod_root = Regexp.quote("#{ENV['MU_LIBDIR']}/modules/mu/") + bin_root = Regexp.quote("#{ENV['MU_INSTALLDIR']}/bin/") + + caller_name.sub!(/:.*/, "") + caller_name.sub!(/^\.\//, "") + caller_name.sub!(/^#{mod_root}/, "") + caller_name.sub!(/^#{bin_root}/, "") + caller_name.sub!(/\.r[ub]$/, "") + caller_name.sub!(/#{Regexp.quote(MU.myRoot)}\//, "") + caller_name.sub!(/^modules\//, "") + caller_name + end + # Output a log message as HTML. # # @param msg [String]: The log message to print @@ -259,5 +218,17 @@ def html_out(msg, color_name="black") @handle.puts "#{msg}" end + # wrapper for writing a log entry to multiple filehandles + # @param handles [Array] + # @param msgs [Array] + def write(handles = [], msgs = []) + return if handles.nil? or msgs.nil? + handles.each { |h| + msgs.each { |m| + h.puts m + } + } + end + end #class end #module From ebae266dbd249e85224940542eb4cb5cf3f29163 Mon Sep 17 00:00:00 2001 From: John Stange Date: Fri, 20 Mar 2020 17:50:48 -0400 Subject: [PATCH 003/124] AWS::Database.terminate_rds_instance: cleansing --- modules/Gemfile.lock | 46 +++++++----- modules/mu.rb | 39 +++++----- modules/mu/clouds/aws/database.rb | 120 ++++++++++++------------------ modules/mu/logger.rb | 30 ++++---- 4 files changed, 108 insertions(+), 127 deletions(-) diff --git a/modules/Gemfile.lock b/modules/Gemfile.lock index 760bab09c..601421425 100644 --- a/modules/Gemfile.lock +++ b/modules/Gemfile.lock @@ -44,7 +44,7 @@ PATH GEM remote: https://rubygems.org/ specs: - activesupport (6.0.2.1) + activesupport (6.0.2.2) concurrent-ruby (~> 1.0, >= 1.0.2) i18n (>= 0.7, < 2) minitest (~> 5.1) @@ -54,7 +54,7 @@ GEM public_suffix (>= 2.0.2, < 4.0) ast (2.4.0) aws-eventstream (1.0.3) - aws-sdk-core (2.11.470) + aws-sdk-core (2.11.471) aws-sigv4 (~> 1.0) jmespath (~> 1.0) aws-sigv4 (1.1.1) @@ -124,8 +124,8 @@ GEM ms_rest_azure (~> 0.11.0) azure_mgmt_adhybridhealth_service (0.17.0) ms_rest_azure (~> 0.11.1) - azure_mgmt_advisor (0.17.0) - ms_rest_azure (~> 0.11.0) + azure_mgmt_advisor (0.17.1) + ms_rest_azure (~> 0.11.1) azure_mgmt_alerts_management (0.17.0) ms_rest_azure (~> 0.11.1) azure_mgmt_analysis_services (0.17.2) @@ -156,7 +156,7 @@ GEM ms_rest_azure (~> 0.11.1) azure_mgmt_commerce (0.17.1) ms_rest_azure (~> 0.11.0) - azure_mgmt_compute (0.19.0) + azure_mgmt_compute (0.19.1) ms_rest_azure (~> 0.11.1) azure_mgmt_consumption (0.18.0) ms_rest_azure (~> 0.11.1) @@ -194,7 +194,7 @@ GEM ms_rest_azure (~> 0.11.0) azure_mgmt_edgegateway (0.18.0) ms_rest_azure (~> 0.11.0) - azure_mgmt_event_grid (0.17.10) + azure_mgmt_event_grid (0.18.0) ms_rest_azure (~> 0.11.1) azure_mgmt_event_hub (0.18.0) ms_rest_azure (~> 0.11.1) @@ -206,7 +206,7 @@ GEM ms_rest_azure (~> 0.11.1) azure_mgmt_import_export (0.17.0) ms_rest_azure (~> 0.11.1) - azure_mgmt_iot_central (0.18.0) + azure_mgmt_iot_central (0.19.0) ms_rest_azure (~> 0.11.1) azure_mgmt_iot_hub (0.17.3) ms_rest_azure (~> 0.11.1) @@ -248,7 +248,7 @@ GEM ms_rest_azure (~> 0.11.1) azure_mgmt_netapp (0.18.3) ms_rest_azure (~> 0.11.1) - azure_mgmt_network (0.23.0) + azure_mgmt_network (0.23.1) ms_rest_azure (~> 0.11.1) azure_mgmt_notification_hubs (0.17.2) ms_rest_azure (~> 0.11.0) @@ -260,7 +260,7 @@ GEM ms_rest_azure (~> 0.11.1) azure_mgmt_policy (0.17.8) ms_rest_azure (~> 0.11.1) - azure_mgmt_policy_insights (0.17.5) + azure_mgmt_policy_insights (0.17.6) ms_rest_azure (~> 0.11.1) azure_mgmt_portal (0.17.0) ms_rest_azure (~> 0.11.1) @@ -320,7 +320,11 @@ GEM ms_rest_azure (~> 0.11.1) azure_mgmt_stream_analytics (0.17.2) ms_rest_azure (~> 0.11.0) - azure_mgmt_subscriptions (0.18.1) + azure_mgmt_subscriptions (0.18.2) + ms_rest_azure (~> 0.11.1) + azure_mgmt_support (0.17.0) + ms_rest_azure (~> 0.11.1) + azure_mgmt_synapse (0.17.0) ms_rest_azure (~> 0.11.1) azure_mgmt_time_series_insights (0.17.0) ms_rest_azure (~> 0.11.1) @@ -330,7 +334,7 @@ GEM ms_rest_azure (~> 0.11.1) azure_mgmt_web (0.17.5) ms_rest_azure (~> 0.11.1) - azure_sdk (0.52.1) + azure_sdk (0.53.0) azure-storage (~> 0.14.0.preview) azure_cognitiveservices_anomalydetector (~> 0.17.0) azure_cognitiveservices_autosuggest (~> 0.17.1) @@ -360,7 +364,7 @@ GEM azure_graph_rbac (~> 0.17.1) azure_key_vault (~> 0.17.3) azure_mgmt_adhybridhealth_service (~> 0.17.0) - azure_mgmt_advisor (~> 0.17.0) + azure_mgmt_advisor (~> 0.17.1) azure_mgmt_alerts_management (~> 0.17.0) azure_mgmt_analysis_services (~> 0.17.2) azure_mgmt_api_management (~> 0.18.4) @@ -376,7 +380,7 @@ GEM azure_mgmt_cdn (~> 0.17.3) azure_mgmt_cognitive_services (~> 0.19.0) azure_mgmt_commerce (~> 0.17.1) - azure_mgmt_compute (~> 0.19.0) + azure_mgmt_compute (~> 0.19.1) azure_mgmt_consumption (~> 0.18.0) azure_mgmt_container_instance (~> 0.17.4) azure_mgmt_container_registry (~> 0.18.3) @@ -395,13 +399,13 @@ GEM azure_mgmt_devtestlabs (~> 0.18.0) azure_mgmt_dns (~> 0.17.4) azure_mgmt_edgegateway (~> 0.18.0) - azure_mgmt_event_grid (~> 0.17.10) + azure_mgmt_event_grid (~> 0.18.0) azure_mgmt_event_hub (~> 0.18.0) azure_mgmt_features (~> 0.17.2) azure_mgmt_hanaonazure (~> 0.18.0) azure_mgmt_hdinsight (~> 0.17.7) azure_mgmt_import_export (~> 0.17.0) - azure_mgmt_iot_central (~> 0.18.0) + azure_mgmt_iot_central (~> 0.19.0) azure_mgmt_iot_hub (~> 0.17.3) azure_mgmt_key_vault (~> 0.17.5) azure_mgmt_kusto (~> 0.19.1) @@ -422,13 +426,13 @@ GEM azure_mgmt_msi (~> 0.17.1) azure_mgmt_mysql (~> 0.17.0) azure_mgmt_netapp (~> 0.18.3) - azure_mgmt_network (~> 0.23.0) + azure_mgmt_network (~> 0.23.1) azure_mgmt_notification_hubs (~> 0.17.2) azure_mgmt_operational_insights (~> 0.17.2) azure_mgmt_operations_management (~> 0.17.0) azure_mgmt_peering (~> 0.17.0) azure_mgmt_policy (~> 0.17.8) - azure_mgmt_policy_insights (~> 0.17.5) + azure_mgmt_policy_insights (~> 0.17.6) azure_mgmt_portal (~> 0.17.0) azure_mgmt_postgresql (~> 0.17.1) azure_mgmt_powerbi_dedicated (~> 0.17.0) @@ -458,7 +462,9 @@ GEM azure_mgmt_storagecache (~> 0.17.1) azure_mgmt_storagesync (~> 0.18.0) azure_mgmt_stream_analytics (~> 0.17.2) - azure_mgmt_subscriptions (~> 0.18.1) + azure_mgmt_subscriptions (~> 0.18.2) + azure_mgmt_support (~> 0.17.0) + azure_mgmt_synapse (~> 0.17.0) azure_mgmt_time_series_insights (~> 0.17.0) azure_mgmt_traffic_manager (~> 0.17.2) azure_mgmt_vmware_cloudsimple (~> 0.17.0) @@ -705,7 +711,7 @@ GEM os (1.0.1) paint (1.0.1) parallel (1.19.1) - parser (2.7.0.4) + parser (2.7.0.5) ast (~> 2.4.0) plist (3.5.0) polyglot (0.3.5) @@ -794,7 +800,7 @@ GEM thor (1.0.1) thread_safe (0.3.6) timeliness (0.3.10) - tomlrb (1.2.9) + tomlrb (1.3.0) treetop (1.6.10) polyglot (~> 0.3) tzinfo (1.2.6) diff --git a/modules/mu.rb b/modules/mu.rb index cf5d0e68c..6fa0d998c 100644 --- a/modules/mu.rb +++ b/modules/mu.rb @@ -212,8 +212,28 @@ def deep_merge!(with, on = self) end ENV['HOME'] = Etc.getpwuid(Process.uid).dir +module MU + + # For log entries that should only be logged when we're in verbose mode + DEBUG = 0.freeze + # For ordinary log entries + INFO = 1.freeze + # For more interesting log entries which are not errors + NOTICE = 2.freeze + # Log entries for non-fatal errors + WARN = 3.freeze + # Log entries for non-fatal errors + WARNING = 3.freeze + # Log entries for fatal errors + ERR = 4.freeze + # Log entries for fatal errors + ERROR = 4.freeze + # Log entries that will be held and displayed/emailed at the end of deploy, + # cleanup, etc. +end require 'mu/logger' + module MU # Subclass core thread so we can gracefully handle it when we hit system @@ -620,25 +640,6 @@ def self.log(msg, level = MU::INFO, shorthand_details = nil, details: nil, html: @@logger.log(msg, level, details: details, html: html, verbosity: verbosity, color: color) end - # For log entries that should only be logged when we're in verbose mode - DEBUG = 0.freeze - # For ordinary log entries - INFO = 1.freeze - # For more interesting log entries which are not errors - NOTICE = 2.freeze - # Log entries for non-fatal errors - WARN = 3.freeze - # Log entries for non-fatal errors - WARNING = 3.freeze - # Log entries for fatal errors - ERR = 4.freeze - # Log entries for fatal errors - ERROR = 4.freeze - # Log entries that will be held and displayed/emailed at the end of deploy, - # cleanup, etc. - SUMMARY = 5.freeze - - autoload :Cleanup, 'mu/cleanup' autoload :Deploy, 'mu/deploy' autoload :MommaCat, 'mu/mommacat' diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index 0505d8b0d..02a370390 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -1043,7 +1043,7 @@ def notify } else db["identifier"] = @mu_name.downcase if db["identifier"].nil? # Is this still valid if we have read replicas? - database = MU::Cloud::AWS::Database.getDatabaseById(db["identifier"], region: db['region']) + database = MU::Cloud::AWS::Database.getDatabaseById(db["identifier"], region: db['region'], credentials: db['credentials']) # DNS records for the "real" zone should always be registered as late as possible so override_existing only overwrites the records after the resource is ready to use. unless db["add_cluster_node"] # It isn't necessarily clear what we should do with DNS records of cluster members. Probably need to expose this to the BoK somehow. @@ -1699,103 +1699,77 @@ def self.validateConfig(db, _configurator) # @param db [OpenStruct]: The cloud provider's description of the database artifact # @return [void] def self.terminate_rds_instance(db, noop: false, skipsnapshots: false, region: MU.curRegion, deploy_id: MU.deploy_id, mu_name: nil, cloud_id: nil, credentials: nil) - raise MuError, "terminate_rds_instance requires a non-nil database descriptor" if db.nil? - db_id = db.db_instance_identifier - - database_obj = MU::MommaCat.findStray( - "AWS", - "database", - region: region, - deploy_id: deploy_id, - cloud_id: cloud_id, - mu_name: mu_name + + db ||= MU::MommaCat.findStray( + "AWS", + "database", + region: region, + deploy_id: deploy_id, + cloud_id: cloud_id, + mu_name: mu_name ).first + cloud_id ||= db.db_instance_identifier + + raise MuError, "terminate_rds_instance requires a non-nil database descriptor" if db.nil? - rdssecgroups = Array.new + rdssecgroups = [] begin - secgroup = MU::Cloud::AWS.rds(region: region).describe_db_security_groups(db_security_group_name: db_id) + secgroup = MU::Cloud::AWS.rds(region: region).describe_db_security_groups(db_security_group_name: cloud_id) + rdssecgroups << cloud_id if !secgroup.nil? rescue Aws::RDS::Errors::DBSecurityGroupNotFound # this is normal in VPC world end - rdssecgroups << db_id if !secgroup.nil? - # We can use an AWS waiter for this. - unless db.db_instance_status == "available" - loop do - MU.log "Waiting for #{db_id} to be in a removable state...", MU::NOTICE - db = MU::Cloud::AWS::Database.getDatabaseById(db_id, region: region) - return if db.nil? - break unless %w{creating modifying backing-up}.include?(db.db_instance_status) - sleep 60 - end + if db.db_instance_status != "available" + MU.retrier([], wait: 60, loop_if: Proc.new { %w{creating modifying backing-up}.include?(db.db_instance_status) }) { + db = MU::Cloud::AWS::Database.getDatabaseById(cloud_id, region: region, credentials: credentials) + return if db.il? + } end - MU::Cloud::AWS::DNSZone.genericMuDNSEntry(name: db_id, target: db.endpoint.address, cloudclass: MU::Cloud::Database, delete: true) + MU::Cloud::AWS::DNSZone.genericMuDNSEntry(name: cloud_id, target: db.endpoint.address, cloudclass: MU::Cloud::Database, delete: true) if %w{deleting deleted}.include?(db.db_instance_status) - MU.log "#{db_id} has already been terminated", MU::WARN + MU.log "#{cloud_id} has already been terminated", MU::WARN else - def self.dbSkipSnap(db_id, region, credentials) - # We're calling this several times so lets declare it once - MU.log "Terminating #{db_id} (not saving final snapshot)" - MU::Cloud::AWS.rds(region: region, credentials: credentials).delete_db_instance(db_instance_identifier: db_id, skip_final_snapshot: true) - end - - def self.dbCreateSnap(db_id, region, credentials) - MU.log "Terminating #{db_id} (final snapshot: #{db_id}-mufinal)" - MU::Cloud::AWS.rds(region: region, credentials: credentials).delete_db_instance(db_instance_identifier: db_id, final_db_snapshot_identifier: "#{db_id}-mufinal", skip_final_snapshot: false) + params = { + db_instance_identifier: cloud_id + } + if skipsnapshots or db.db_cluster_identifier or db.read_replica_source_db_instance_identifier + MU.log "Terminating #{cloud_id} (not saving final snapshot)" + params[:skip_final_snapshot] = true + else + MU.log "Terminating #{cloud_id} (final snapshot: #{cloud_id}-mufinal)" + params[:skip_final_snapshot] = false + params[:final_db_snapshot_identifier] = "#{cloud_id}-mufinal" end if !noop - retries = 0 - begin - if db.db_cluster_identifier || db.read_replica_source_db_instance_identifier - # make sure we don't create final snapshot for a database instance that is part of a cluster, or if it's a read replica database instance - dbSkipSnap(db_id, region, credentials) - else - skipsnapshots ? dbSkipSnap(db_id, region, credentials) : dbCreateSnap(db_id, region, credentials) - end - rescue Aws::RDS::Errors::InvalidDBInstanceState => e - if retries < 5 - MU.log "#{db_id} is not in a removable state, retrying several times #{e.inspect}", MU::WARN - retries += 1 - sleep 30 - retry - else - MU.log "#{db_id} is not in a removable state after several retries, giving up. #{e.inspect}", MU::ERR + on_retry = Proc.new { |e| + if e.class == Aws::RDS::Errors::DBSnapshotAlreadyExists + MU.log "Snapshot of #{cloud_id} already exists", MU::WARN + params[:skip_final_snapshot] = true end - rescue Aws::RDS::Errors::DBSnapshotAlreadyExists - dbSkipSnap(db_id, region, credentials) - MU.log "Snapshot of #{db_id} already exists", MU::WARN - rescue Aws::RDS::Errors::SnapshotQuotaExceeded - dbSkipSnap(db_id, region, credentials) - MU.log "Snapshot quota exceeded while deleting #{db_id}", MU::ERR - end + } + MU.retrier([Aws::RDS::Errors::InvalidDBInstanceState, Aws::RDS::Errors::DBSnapshotAlreadyExists], wait: 30, max: 5, on_retry: on_retry) { + MU::Cloud::AWS.rds(region: region, credentials: credentials).delete_db_instance(params) + } end end - begin - attempts = 0 - loop do - MU.log "Waiting for #{db_id} termination to complete", MU::NOTICE if attempts % 6 == 0 - del_db = MU::Cloud::AWS::Database.getDatabaseById(db_id, region: region) - break if del_db.nil? || del_db.db_instance_status == "deleted" - sleep 10 - attempts += 1 - end - rescue Aws::RDS::Errors::DBInstanceNotFound - # we are ok with this - end + MU.retrier([], wait: 10, ignoreme: [Aws::RDS::Errors::DBInstanceNotFound]) { + del_db = MU::Cloud::AWS::Database.getDatabaseById(cloud_id, region: region) + break if del_db.nil? or del_db.db_instance_status == "deleted" + } # RDS security groups can depend on EC2 security groups, do these last begin rdssecgroups.each { |sg| MU.log "Removing RDS Security Group #{sg}" - MU::Cloud::AWS.rds(region: region).delete_db_security_group(db_security_group_name: sg) if !noop + MU::Cloud::AWS.rds(region: region, credentials: credentials).delete_db_security_group(db_security_group_name: sg) if !noop } rescue Aws::RDS::Errors::DBSecurityGroupNotFound - MU.log "RDS Security Group #{sg} disappeared before we could remove it", MU::WARN end # Cleanup the database vault @@ -1807,8 +1781,8 @@ def self.dbCreateSnap(db_id, region, credentials) end groomclass = MU::Groomer.loadGroomer(groomer) - groomclass.deleteSecret(vault: db_id.upcase) if !noop - MU.log "#{db_id} has been terminated" + groomclass.deleteSecret(vault: cloud_id.upcase) if !noop + MU.log "#{cloud_id} has been terminated" end private_class_method :terminate_rds_instance diff --git a/modules/mu/logger.rb b/modules/mu/logger.rb index c1b155c56..bb55e31c2 100644 --- a/modules/mu/logger.rb +++ b/modules/mu/logger.rb @@ -35,29 +35,29 @@ class Logger # stash a hash map for color outputs COLORMAP = { - DEBUG => { :html => "orange", :ansi => :yellow }, - INFO => { :html => "green", :ansi => :green }, - NOTICE => { :html => "yellow", :ansi => :yellow }, - WARN => { :html => "orange", :ansi => :light_red }, - ERR => { :html => "red", :ansi => :red } + MU::DEBUG => { :html => "orange", :ansi => :yellow }, + MU::INFO => { :html => "green", :ansi => :green }, + MU::NOTICE => { :html => "yellow", :ansi => :yellow }, + MU::WARN => { :html => "orange", :ansi => :light_red }, + MU::ERR => { :html => "red", :ansi => :red } }.freeze # minimum log verbosity at which we'll print various types of messages PRINT_MSG_IF = { - DEBUG => { :msg => LOUD, :details => LOUD }, - INFO => { :msg => NORMAL, :details => LOUD }, - NOTICE => { :msg => nil, :details => QUIET }, - WARN => { :msg => nil, :details => SILENT }, - ERR => { :msg => nil, :details => nil } + MU::DEBUG => { :msg => LOUD, :details => LOUD }, + MU::INFO => { :msg => NORMAL, :details => LOUD }, + MU::NOTICE => { :msg => nil, :details => QUIET }, + MU::WARN => { :msg => nil, :details => SILENT }, + MU::ERR => { :msg => nil, :details => nil } }.freeze # Syslog equivalents of our log levels SYSLOG_MAP = { - DEBUG => Syslog::LOG_DEBUG, - INFO => Syslog::LOG_NOTICE, - NOTICE => Syslog::LOG_NOTICE, - WARN => Syslog::LOG_WARNING, - ERR => Syslog::LOG_ERR + MU::DEBUG => Syslog::LOG_DEBUG, + MU::INFO => Syslog::LOG_NOTICE, + MU::NOTICE => Syslog::LOG_NOTICE, + MU::WARN => Syslog::LOG_WARNING, + MU::ERR => Syslog::LOG_ERR }.freeze attr_accessor :verbosity From 12abb7c9a7b601bc7e848805e013343abaf4112d Mon Sep 17 00:00:00 2001 From: John Stange Date: Fri, 20 Mar 2020 18:34:01 -0400 Subject: [PATCH 004/124] AWS::Database.cleanup: factor out tag checks --- modules/mu/clouds/aws/database.rb | 139 ++++++------------------------ modules/mu/logger.rb | 2 +- 2 files changed, 29 insertions(+), 112 deletions(-) diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index 02a370390..ed141c737 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -1198,6 +1198,7 @@ def self.quality MU::Cloud::RELEASE end + # Called by {MU::Cleanup}. Locates resources that were created by the # currently-loaded deployment, and purges them. # @param noop [Boolean]: If true, will only print what would be done @@ -1205,43 +1206,20 @@ def self.quality # @param region [String]: The cloud provider region in which to operate # @return [void] def self.cleanup(noop: false, ignoremaster: false, credentials: nil, region: MU.curRegion, flags: {}) - skipsnapshots = flags["skipsnapshots"] resp = MU::Cloud::AWS.rds(credentials: credentials, region: region).describe_db_instances - threads = [] + threads = [] resp.db_instances.each { |db| arn = MU::Cloud::AWS::Database.getARN(db.db_instance_identifier, "db", "rds", region: region, credentials: credentials) tags = MU::Cloud::AWS.rds(credentials: credentials, region: region).list_tags_for_resource(resource_name: arn).tag_list - found_muid = false - found_master = false - tags.each { |tag| - found_muid = true if tag.key == "MU-ID" && tag.value == MU.deploy_id - found_master = true if tag.key == "MU-MASTER-IP" && tag.value == MU.mu_public_ip - } - next if !found_muid - - delete = - if ignoremaster && found_muid - true - elsif !ignoremaster && found_muid && found_master - true - else - false - end - - if delete - parent_thread_id = Thread.current.object_id + if should_delete?(tags, ignoremaster) threads << Thread.new(db) { |mydb| - MU.dupGlobals(parent_thread_id) - Thread.abort_on_exception = true - terminate_rds_instance(mydb, noop: noop, skipsnapshots: skipsnapshots, region: region, deploy_id: MU.deploy_id, cloud_id: db.db_instance_identifier, mu_name: db.db_instance_identifier.upcase, credentials: credentials) + terminate_rds_instance(mydb, noop: noop, skipsnapshots: flags["skipsnapshots"], region: region, deploy_id: MU.deploy_id, cloud_id: db.db_instance_identifier, mu_name: db.db_instance_identifier.upcase, credentials: credentials) } end } - - # Wait for all of the databases to finish cleanup before proceeding threads.each { |t| t.join } @@ -1254,29 +1232,9 @@ def self.cleanup(noop: false, ignoremaster: false, credentials: nil, region: MU. arn = MU::Cloud::AWS::Database.getARN(cluster_id, "cluster", "rds", region: region, credentials: credentials) tags = MU::Cloud::AWS.rds(credentials: credentials, region: region).list_tags_for_resource(resource_name: arn).tag_list - found_muid = false - found_master = false - tags.each { |tag| - found_muid = true if tag.key == "MU-ID" && tag.value == MU.deploy_id - found_master = true if tag.key == "MU-MASTER-IP" && tag.value == MU.mu_public_ip - } - next if !found_muid - - delete = - if ignoremaster && found_muid - true - elsif !ignoremaster && found_muid && found_master - true - else - false - end - - if delete - parent_thread_id = Thread.current.object_id + if should_delete?(tags, ignoremaster) threads << Thread.new(cluster) { |mydbcluster| - MU.dupGlobals(parent_thread_id) - Thread.abort_on_exception = true - terminate_rds_cluster(mydbcluster, noop: noop, skipsnapshots: skipsnapshots, region: region, deploy_id: MU.deploy_id, cloud_id: cluster_id, mu_name: cluster_id.upcase, credentials: credentials) + terminate_rds_cluster(mydbcluster, noop: noop, skipsnapshots: flags["skipsnapshots"], region: region, deploy_id: MU.deploy_id, cloud_id: cluster_id, mu_name: cluster_id.upcase, credentials: credentials) } end } @@ -1293,28 +1251,8 @@ def self.cleanup(noop: false, ignoremaster: false, credentials: nil, region: MU. arn = MU::Cloud::AWS::Database.getARN(sub_group_id, "subgrp", "rds", region: region, credentials: credentials) tags = MU::Cloud::AWS.rds(credentials: credentials, region: region).list_tags_for_resource(resource_name: arn).tag_list - found_muid = false - found_master = false - tags.each { |tag| - found_muid = true if tag.key == "MU-ID" && tag.value == MU.deploy_id - found_master = true if tag.key == "MU-MASTER-IP" && tag.value == MU.mu_public_ip - } - next if !found_muid - - delete = - if ignoremaster && found_muid - true - elsif !ignoremaster && found_muid && found_master - true - else - false - end - - if delete - parent_thread_id = Thread.current.object_id + if should_delete?(tags, ignoremaster) threads << Thread.new(sub_group_id) { |mysubgroup| - MU.dupGlobals(parent_thread_id) - Thread.abort_on_exception = true delete_subnet_group(mysubgroup, region: region) unless noop } end @@ -1326,28 +1264,8 @@ def self.cleanup(noop: false, ignoremaster: false, credentials: nil, region: MU. arn = MU::Cloud::AWS::Database.getARN(param_group_id, "pg", "rds", region: region, credentials: credentials) tags = MU::Cloud::AWS.rds(credentials: credentials, region: region).list_tags_for_resource(resource_name: arn).tag_list - found_muid = false - found_master = false - tags.each { |tag| - found_muid = true if tag.key == "MU-ID" && tag.value == MU.deploy_id - found_master = true if tag.key == "MU-MASTER-IP" && tag.value == MU.mu_public_ip - } - next if !found_muid - - delete = - if ignoremaster && found_muid - true - elsif !ignoremaster && found_muid && found_master - true - else - false - end - - if delete - parent_thread_id = Thread.current.object_id + if should_delete?(tags, ignoremaster) threads << Thread.new(param_group_id) { |myparamgroup| - MU.dupGlobals(parent_thread_id) - Thread.abort_on_exception = true delete_db_parameter_group(myparamgroup, region: region) unless noop } end @@ -1359,28 +1277,8 @@ def self.cleanup(noop: false, ignoremaster: false, credentials: nil, region: MU. arn = MU::Cloud::AWS::Database.getARN(param_group_id, "cluster-pg", "rds", region: region, credentials: credentials) tags = MU::Cloud::AWS.rds(credentials: credentials, region: region).list_tags_for_resource(resource_name: arn).tag_list - found_muid = false - found_master = false - tags.each { |tag| - found_muid = true if tag.key == "MU-ID" && tag.value == MU.deploy_id - found_master = true if tag.key == "MU-MASTER-IP" && tag.value == MU.mu_public_ip - } - next if !found_muid - - delete = - if ignoremaster && found_muid - true - elsif !ignoremaster && found_muid && found_master - true - else - false - end - - if delete - parent_thread_id = Thread.current.object_id + if should_delete?(tags, ignoremaster) threads << Thread.new(param_group_id) { |myparamgroup| - MU.dupGlobals(parent_thread_id) - Thread.abort_on_exception = true delete_db_cluster_parameter_group(myparamgroup, region: region) unless noop } end @@ -1695,6 +1593,25 @@ def self.validateConfig(db, _configurator) private + def self.should_delete?(tags, ignoremaster = false, deploy_id = MU.deploy_id, master_ip = MU.mu_public_ip) + found_muid = false + found_master = false + tags.each { |tag| + found_muid = true if tag.key == "MU-ID" && tag.value == deploy_id + found_master = true if tag.key == "MU-MASTER-IP" && tag.value == master_ip + } + delete = + if ignoremaster && found_muid + true + elsif !ignoremaster && found_muid && found_master + true + else + false + end + delete + end + private_class_method :should_delete? + # Remove an RDS database and associated artifacts # @param db [OpenStruct]: The cloud provider's description of the database artifact # @return [void] diff --git a/modules/mu/logger.rb b/modules/mu/logger.rb index bb55e31c2..79387ced2 100644 --- a/modules/mu/logger.rb +++ b/modules/mu/logger.rb @@ -133,7 +133,7 @@ def log(msg, html_out "#{time} - #{caller_name} - #{msg}", COLORMAP[level][:html] else str = "#{time} - #{caller_name} - #{msg}" - str = str.call(COLORMAP[level][:ansi]).on_black if color + str = str.send(COLORMAP[level][:ansi]).on_black if color msgs << str end Syslog.log(SYSLOG_MAP[level], msg.gsub(/%/, '')) From bfd6cf7b4ee7c97f2d245e3e53e64f082bf0b4d3 Mon Sep 17 00:00:00 2001 From: John Stange Date: Fri, 20 Mar 2020 19:33:18 -0400 Subject: [PATCH 005/124] Logger: typo --- modules/mu/logger.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/mu/logger.rb b/modules/mu/logger.rb index 79387ced2..8e2d4e911 100644 --- a/modules/mu/logger.rb +++ b/modules/mu/logger.rb @@ -143,7 +143,7 @@ def log(msg, if html html_out " #{details}" else - details = details.white.on_block if color + details = details.white.on_black if color msgs << details end Syslog.log(SYSLOG_MAP[level], details.gsub(/%/, '')) From d65f67bb7c9a94f206c6e32f19c47f0cf9de07be Mon Sep 17 00:00:00 2001 From: John Stange Date: Sat, 21 Mar 2020 20:39:24 -0400 Subject: [PATCH 006/124] AWS::Database: rip out arahav's old notify logic --- modules/mu/clouds/aws/database.rb | 449 +++++++++++------------------- 1 file changed, 160 insertions(+), 289 deletions(-) diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index ed141c737..7734451b0 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -72,7 +72,6 @@ def create @config["subnet_group_name"] = @mu_name MU.log "Using the database identifier #{@config['identifier']}" - if @config["create_cluster"] getPassword createSubnetGroup @@ -84,75 +83,9 @@ def create @cloud_id = createDbCluster elsif @config["add_cluster_node"] - cluster = nil - rr = @config["member_of_cluster"] - cluster = @deploy.findLitterMate(type: "database", name: rr['db_name']) if rr['db_name'] - - if cluster.nil? - tag_key, tag_value = rr['tag'].split(/=/, 2) if !rr['tag'].nil? - found = MU::MommaCat.findStray( - rr['cloud'], - "database", - deploy_id: rr["deploy_id"], - cloud_id: rr["db_id"], - tag_key: tag_key, - tag_value: tag_value, - region: rr["region"], - dummy_ok: true - ) - cluster = found.first if found.size == 1 - end - - raise MuError, "Couldn't resolve cluster node reference to a unique live Database in #{@mu_name}" if cluster.nil? || cluster.cloud_id.nil? - @config['cluster_identifier'] = cluster.cloud_id.downcase - # We're overriding @config["subnet_group_name"] because we need each cluster member to use the cluster's subnet group instead of a unique subnet group - @config["subnet_group_name"] = @config['cluster_identifier'] - @config["creation_style"] = "new" if @config["creation_style"] != "new" - - if @config.has_key?("parameter_group_family") - @config["parameter_group_name"] = @config['identifier'] - createDBParameterGroup - end - - @cloud_id = createDb + @cloud_id = add_cluster_node else - source_db = nil - if @config['read_replica_of'] - rr = @config['read_replica_of'] - source_db = @deploy.findLitterMate(type: "database", name: rr['db_name']) if rr['db_name'] - - if source_db.nil? - tag_key, tag_value = rr['tag'].split(/=/, 2) if !rr['tag'].nil? - found = MU::MommaCat.findStray( - rr['cloud'], - "database", - deploy_id: rr["deploy_id"], - cloud_id: rr["db_id"], - tag_key: tag_key, - tag_value: tag_value, - region: rr["region"], - dummy_ok: true - ) - source_db = found.first if found.size == 1 - end - - raise MuError, "Couldn't resolve read replica reference to a unique live Database in #{@mu_name}" if source_db.nil? or source_db.cloud_id.nil? - @config['source_identifier'] = source_db.cloud_id - end - - getPassword - if source_db.nil? or @config['region'] != source_db.config['region'] - createSubnetGroup - else - MU.log "Note: Read Replicas automatically reside in the same subnet group as the source database, if they're both in the same region. This replica may not land in the VPC you intended.", MU::WARN - end - - if @config.has_key?("parameter_group_family") - @config["parameter_group_name"] = @config['identifier'] - createDBParameterGroup - end - - @cloud_id = createDb + @cloud_id = add_basic end end @@ -796,113 +729,27 @@ def groom ) end else - database = MU::Cloud::AWS::Database.getDatabaseById(@config['identifier'], region: @config['region'], credentials: @config['credentials']) # Run SQL on deploy if @config['run_sql_on_deploy'] - MU.log "Running initial SQL commands on #{@config['name']}", details: @config['run_sql_on_deploy'] - - # check if DB is private or public - if !database.publicly_accessible - # This doesn't necessarily mean what we think it does. publicly_accessible = true means resolve to public address. - # publicly_accessible can still be set to true even when only private subnets are included in the subnet group. We try to solve this during creation. - is_private = true - else - is_private = false - end - - #Setting up connection params - ssh_keydir = Etc.getpwuid(Process.uid).dir+"/.ssh" - keypairname, _ssh_private_key, _ssh_public_key = @deploy.SSHKey - if is_private and @vpc - if @config['vpc']['nat_host_name'] - begin - gateway = Net::SSH::Gateway.new( - @config['vpc']['nat_host_name'], - @config['vpc']['nat_ssh_user'], - :keys => [ssh_keydir+"/"+keypairname], - :keys_only => true, - :auth_methods => ['publickey'], - # :verbose => :info - ) - port = gateway.open(database.endpoint.address, database.endpoint.port) - address = "127.0.0.1" - MU.log "Tunneling #{@config['engine']} connection through #{nat_host_name} via local port #{port}", MU::DEBUG - rescue IOError => e - MU.log "Got #{e.inspect} while connecting to #{@config['identifier']} through NAT #{nat_host_name}", MU::ERR - end - else - MU.log "Can't run initial SQL commands! Database #{@config['identifier']} is not publicly accessible, but we have no NAT host for connecting to it", MU::WARN, details: @config['run_sql_on_deploy'] - end - else - port = database.endpoint.port - address = database.endpoint.address - end - - # Running SQL on deploy - if @config['engine'] == "postgres" - autoload :PG, 'pg' - begin - conn = PG::Connection.new( - :host => address, - :port => port, - :user => @config['master_user'], - :dbname => database.db_name, - :password => @config['password'] - ) - @config['run_sql_on_deploy'].each { |cmd| - MU.log "Running #{cmd} on database #{@config['name']}" - conn.exec(cmd) - } - conn.finish - rescue PG::Error => e - MU.log "Failed to run initial SQL commands on #{@config['name']} via #{address}:#{port}: #{e.inspect}", MU::WARN, details: conn - end - elsif @config['engine'] == "mysql" - autoload :Mysql, 'mysql' - MU.log "Initiating mysql connection to #{address}:#{port} as #{@config['master_user']}" - conn = Mysql.new(address, @config['master_user'], @config['password'], "mysql", port) - @config['run_sql_on_deploy'].each { |cmd| - MU.log "Running #{cmd} on database #{@config['name']}" - conn.query(cmd) - } - conn.close - end - - # close the SQL on deploy sessions - if is_private - begin - gateway.close(port) - rescue IOError => e - MU.log "Failed to close ssh session to NAT after running sql_on_deploy", MU::ERR, details: e.inspect - end - end + run_sql_commands end # set multi-az on deploy if @config['multi_az_on_deploy'] if !database.multi_az MU.log "Setting multi-az on #{@config['identifier']}" - attempts = 0 - begin + MU.retrier([Aws::RDS::Errors::InvalidParameterValue, Aws::RDS::Errors::InvalidDBInstanceState], wait: 15, max: 15) { MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_instance( - db_instance_identifier: @config['identifier'], - apply_immediately: true, - multi_az: true + db_instance_identifier: @config['identifier'], + apply_immediately: true, + multi_az: true ) - rescue Aws::RDS::Errors::InvalidParameterValue, Aws::RDS::Errors::InvalidDBInstanceState => e - if attempts < 15 - MU.log "Got #{e.inspect} while setting Multi-AZ on #{@config['identifier']}, retrying." - attempts += 1 - sleep 15 - retry - else - MU.log "Couldn't set Multi-AZ on #{@config['identifier']} after several retries, giving up. #{e.inspect}", MU::ERR - end - end + } end end end + end # Generate database user, database identifier, database name based on engine-specific constraints @@ -986,133 +833,13 @@ def self.getDatabaseClusterById(db_cluster_id, region: MU.curRegion, credentials # We're fine with this returning nil when searching for a database cluster the doesn't exist. end - # Register a description of this database instance with this deployment's metadata. - # Register read replicas as separate instances, while we're - # at it. - def notify - my_dbs = [@config] - if @config['read_replica'] - @config['read_replica']['creation_style'] = "read_replica" - @config['read_replica']['password'] = @config["password"] - my_dbs << @config['read_replica'] - end - - deploy_struct = {} - my_dbs.each { |db| - deploy_struct = - if db["create_cluster"] - db["identifier"] = @mu_name.downcase if db["identifier"].nil? - cluster = MU::Cloud::AWS::Database.getDatabaseClusterById(db["identifier"], region: db['region'], credentials: @config['credentials']) - # DNS records for the "real" zone should always be registered as late as possible so override_existing only overwrites the records after the resource is ready to use. - if db['dns_records'] - db['dns_records'].each { |dnsrec| - dnsrec['name'] = cluster.db_cluster_identifier if !dnsrec.has_key?('name') - dnsrec['name'] = "#{dnsrec['name']}.#{MU.environment.downcase}" if dnsrec["append_environment_name"] && !dnsrec['name'].match(/\.#{MU.environment.downcase}$/) - } - end - # XXX this should be a call to @deploy.nameKitten - MU::Cloud::AWS::DNSZone.createRecordsFromConfig(db['dns_records'], target: cluster.endpoint) - - vpc_sg_ids = [] - cluster.vpc_security_groups.each { |vpc_sg| - vpc_sg_ids << vpc_sg.vpc_security_group_id - } - - { - "allocated_storage" => cluster.allocated_storage, - "parameter_group" => cluster.db_cluster_parameter_group, - "subnet_group" => cluster.db_subnet_group, - "identifier" => cluster.db_cluster_identifier, - "region" => db['region'], - "engine" => cluster.engine, - "engine_version" => cluster.engine_version, - "backup_retention_period" => cluster.backup_retention_period, - "preferred_backup_window" => cluster.preferred_backup_window, - "preferred_maintenance_window" => cluster.preferred_maintenance_window, - "endpoint" => cluster.endpoint, - "port" => cluster.port, - "username" => cluster.master_username, - "vpc_sgs" => vpc_sg_ids, - "azs" => cluster.availability_zones, - "vault_name" => cluster.db_cluster_identifier.upcase, - "vault_item" => "database_credentials", - "password_field" => "password", - "create_style" => db['creation_style'], - "db_name" => cluster.database_name, - "db_cluster_members" => cluster.db_cluster_members - } - else - db["identifier"] = @mu_name.downcase if db["identifier"].nil? # Is this still valid if we have read replicas? - database = MU::Cloud::AWS::Database.getDatabaseById(db["identifier"], region: db['region'], credentials: db['credentials']) - # DNS records for the "real" zone should always be registered as late as possible so override_existing only overwrites the records after the resource is ready to use. - unless db["add_cluster_node"] - # It isn't necessarily clear what we should do with DNS records of cluster members. Probably need to expose this to the BoK somehow. - if db['dns_records'] - db['dns_records'].each { |dnsrec| - dnsrec['name'] = database.db_instance_identifier if !dnsrec.has_key?('name') - dnsrec['name'] = "#{dnsrec['name']}.#{MU.environment.downcase}" if dnsrec["append_environment_name"] && !dnsrec['name'].match(/\.#{MU.environment.downcase}$/) - } - # XXX this should be a call to @deploy.nameKitten - MU::Cloud::AWS::DNSZone.createRecordsFromConfig(db['dns_records'], target: database.endpoint.address) - end - end - - database = cloud_desc - - vpc_sg_ids = Array.new - database.vpc_security_groups.each { |vpc_sg| - vpc_sg_ids << vpc_sg.vpc_security_group_id - } - - rds_sg_ids = Array.new - database.db_security_groups.each { |rds_sg| - rds_sg_ids << rds_sg.db_security_group_name - } - - subnet_ids = [] - if database.db_subnet_group and database.db_subnet_group.subnets - database.db_subnet_group.subnets.each { |subnet| - subnet_ids << subnet.subnet_identifier - } - end - - { - "identifier" => database.db_instance_identifier, - "region" => db['region'], - "engine" => database.engine, - "engine_version" => database.engine_version, - "backup_retention_period" => database.backup_retention_period, - "preferred_backup_window" => database.preferred_backup_window, - "preferred_maintenance_window" => database.preferred_maintenance_window, - "auto_minor_version_upgrade" => database.auto_minor_version_upgrade, - "storage_encrypted" => database.storage_encrypted, - "endpoint" => database.endpoint.address, - "port" => database.endpoint.port, - "username" => database.master_username, - "rds_sgs" => rds_sg_ids, - "vpc_sgs" => vpc_sg_ids, - "az" => database.availability_zone, - "vault_name" => database.db_instance_identifier.upcase, - "vault_item" => "database_credentials", - "password_field" => "password", - "create_style" => db['creation_style'], - "db_name" => database.db_name, - "multi_az" => database.multi_az, - "publicly_accessible" => database.publicly_accessible, - "ca_certificate_identifier" => database.ca_certificate_identifier, - "subnets" => subnet_ids, - "read_replica_source_db" => database.read_replica_source_db_instance_identifier, - "read_replica_instance_identifiers" => database.read_replica_db_instance_identifiers, - "cluster_identifier" => database.db_cluster_identifier, - "size" => database.db_instance_class, - "storage" => database.allocated_storage - } - end - MU.log "Deploy structure is now #{deploy_struct}", MU::DEBUG - } - - raise MuError, "Can't find any deployment metadata" if deploy_struct.empty? - return deploy_struct + # Return the cloud descriptor for this database cluster or instance + def cloud_desc + if @config['create_cluster'] + MU::Cloud::AWS::Database.getDatabaseClusterById(@cloud_id, region: @config['region'], credentials: @credentials) + else + MU::Cloud::AWS::Database.getDatabaseById(@cloud_id, region: @config['region'], credentials: @credentials) + end end # Generate a snapshot from the database described in this instance. @@ -1593,6 +1320,150 @@ def self.validateConfig(db, _configurator) private + def add_basic + source_db = nil + if @config['read_replica_of'] + rr = @config['read_replica_of'] + source_db = @deploy.findLitterMate(type: "database", name: rr['db_name']) if rr['db_name'] + + if source_db.nil? + tag_key, tag_value = rr['tag'].split(/=/, 2) if !rr['tag'].nil? + found = MU::MommaCat.findStray( + rr['cloud'], + "database", + deploy_id: rr["deploy_id"], + cloud_id: rr["db_id"], + tag_key: tag_key, + tag_value: tag_value, + region: rr["region"], + dummy_ok: true + ) + source_db = found.first if found.size == 1 + end + + raise MuError, "Couldn't resolve read replica reference to a unique live Database in #{@mu_name}" if source_db.nil? or source_db.cloud_id.nil? + @config['source_identifier'] = source_db.cloud_id + end + + getPassword + if source_db.nil? or @config['region'] != source_db.config['region'] + createSubnetGroup + else + MU.log "Note: Read Replicas automatically reside in the same subnet group as the source database, if they're both in the same region. This replica may not land in the VPC you intended.", MU::WARN + end + + if @config.has_key?("parameter_group_family") + @config["parameter_group_name"] = @config['identifier'] + createDBParameterGroup + end + + createDb + end + + + def add_cluster_node + cluster = nil + rr = @config["member_of_cluster"] + cluster = @deploy.findLitterMate(type: "database", name: rr['db_name']) if rr['db_name'] + + if cluster.nil? + tag_key, tag_value = rr['tag'].split(/=/, 2) if !rr['tag'].nil? + found = MU::MommaCat.findStray( + rr['cloud'], + "database", + deploy_id: rr["deploy_id"], + cloud_id: rr["db_id"], + tag_key: tag_key, + tag_value: tag_value, + region: rr["region"], + dummy_ok: true + ) + cluster = found.first if found.size == 1 + end + + raise MuError, "Couldn't resolve cluster node reference to a unique live Database in #{@mu_name}" if cluster.nil? || cluster.cloud_id.nil? + @config['cluster_identifier'] = cluster.cloud_id.downcase + # We're overriding @config["subnet_group_name"] because we need each cluster member to use the cluster's subnet group instead of a unique subnet group + @config["subnet_group_name"] = @config['cluster_identifier'] + @config["creation_style"] = "new" if @config["creation_style"] != "new" + + if @config.has_key?("parameter_group_family") + @config["parameter_group_name"] = @config['identifier'] + createDBParameterGroup + end + + createDb + end + + def run_sql_commands + MU.log "Running initial SQL commands on #{@config['name']}", details: @config['run_sql_on_deploy'] + + port, address = if !cloud_desc.publicly_accessible and @vpc + if @config['vpc']['nat_host_name'] + keypairname, _ssh_private_key, _ssh_public_key = @deploy.SSHKey + begin + gateway = Net::SSH::Gateway.new( + @config['vpc']['nat_host_name'], + @config['vpc']['nat_ssh_user'], + :keys => [Etc.getpwuid(Process.uid).dir+"/.ssh"+"/"+keypairname], + :keys_only => true, + :auth_methods => ['publickey'] + ) + port = gateway.open(cloud_desc.endpoint.address, cloud_desc.endpoint.port) + MU.log "Tunneling #{@config['engine']} connection through #{@config['vpc']['nat_host_name']} via local port #{port}", MU::DEBUG + [port, "127.0.0.1"] + rescue IOError => e + MU.log "Got #{e.inspect} while connecting to #{@config['identifier']} through NAT #{@config['vpc']['nat_host_name']}", MU::ERR + return + end + else + MU.log "Can't run initial SQL commands! Database #{@config['identifier']} is not publicly accessible, but we have no NAT host for connecting to it", MU::WARN, details: @config['run_sql_on_deploy'] + return + end + else + [database.endpoint.port, database.endpoint.address] + end + + # Running SQL on deploy + if @config['engine'] == "postgres" + autoload :PG, 'pg' + begin + conn = PG::Connection.new( + :host => address, + :port => port, + :user => @config['master_user'], + :dbname => cloud_desc.db_name, + :password => @config['password'] + ) + @config['run_sql_on_deploy'].each { |cmd| + MU.log "Running #{cmd} on database #{@config['name']}" + conn.exec(cmd) + } + conn.finish + rescue PG::Error => e + MU.log "Failed to run initial SQL commands on #{@config['name']} via #{address}:#{port}: #{e.inspect}", MU::WARN, details: conn + end + elsif @config['engine'] == "mysql" + autoload :Mysql, 'mysql' + MU.log "Initiating mysql connection to #{address}:#{port} as #{@config['master_user']}" + conn = Mysql.new(address, @config['master_user'], @config['password'], "mysql", port) + @config['run_sql_on_deploy'].each { |cmd| + MU.log "Running #{cmd} on database #{@config['name']}" + conn.query(cmd) + } + conn.close + end + + # close the SQL on deploy sessions + if !cloud_desc.publicly_accessible + begin + gateway.close(port) + rescue IOError => e + MU.log "Failed to close ssh session to NAT after running sql_on_deploy", MU::ERR, details: e.inspect + end + end + end + def self.should_delete?(tags, ignoremaster = false, deploy_id = MU.deploy_id, master_ip = MU.mu_public_ip) found_muid = false found_master = false From e869196c3b82936bffed63741b3a3ea5475191cc Mon Sep 17 00:00:00 2001 From: John Stange Date: Sat, 21 Mar 2020 21:56:35 -0400 Subject: [PATCH 007/124] Database: adjust docs and validation for better referencing syntax --- modules/mu/cleanup.rb | 2 ++ modules/mu/config/database.rb | 38 ++++++++++++++++++++++++++--------- modules/mu/groomers/chef.rb | 3 +++ modules/tests/rds.yaml | 37 ++++++++++++++++++++++++++++++++++ 4 files changed, 70 insertions(+), 10 deletions(-) create mode 100644 modules/tests/rds.yaml diff --git a/modules/mu/cleanup.rb b/modules/mu/cleanup.rb index a0181d60e..54086e959 100644 --- a/modules/mu/cleanup.rb +++ b/modules/mu/cleanup.rb @@ -30,6 +30,8 @@ class Cleanup @onlycloud = false @skipcloud = false + # Resource types, in the order in which we generally have to clean them up + # to disentangle them from one another. TYPES_IN_ORDER = ["Collection", "Endpoint", "Function", "ServerPool", "ContainerCluster", "SearchDomain", "Server", "MsgQueue", "Database", "CacheCluster", "StoragePool", "LoadBalancer", "NoSQLDB", "FirewallRule", "Alarm", "Notifier", "Log", "VPC", "Role", "Group", "User", "Bucket", "DNSZone", "Collection"] # Purge all resources associated with a deployment. diff --git a/modules/mu/config/database.rb b/modules/mu/config/database.rb index cd693f817..8a5286cc1 100644 --- a/modules/mu/config/database.rb +++ b/modules/mu/config/database.rb @@ -144,22 +144,24 @@ def self.schema "default" => false }, "creation_style" => { - "type" => "string", - "enum" => ["existing", "new", "new_snapshot", "existing_snapshot", "point_in_time"], - "description" => "'new' - create a pristine database instances; 'existing' - use an existing database instance; 'new_snapshot' - create a snapshot of an existing database, and create a new one from that snapshot; 'existing_snapshot' - create database from an existing snapshot.; 'point_in_time' - create database from point in time backup of an existing database", - "default" => "new" + "type" => "string", + "enum" => ["existing", "new", "new_snapshot", "existing_snapshot", "point_in_time"], + "description" => "+new+ creates a pristine database instance; +existing+ clones an existing database instance; +new_snapshot+ creates a snapshot of an existing database, then creates a new instance from that snapshot; +existing_snapshot+ creates database from a pre-existing snapshot; +point_in_time+ create database from point in time backup of an existing database. All styles other than +new+ require that +identifier+ or +source+ be set.", + "default" => "new" }, "identifier" => { - "type" => "string", - "description" => "For any creation_style other than 'new' this parameter identifies the database to use. In the case of new_snapshot or point_in_time this is the identifier of an existing database instance; in the case of existing_snapshot this is the identifier of the snapshot." + "type" => "string", + "description" => "Cloud id of a source database to use for creation styles other than +new+; use +source+ for more sophisticated resource references." }, + "source" => MU::Config::Ref.schema(type: "databases", "desc": "Reference a source database to use for +creation_style+ settings +existing+, +new_snapshot+, +existing_snapshot+, or +point_in_time+."), "master_user" => { "type" => "string", "description" => "Set master user name for this database instance; if not specified a random username will be generated" }, "restore_time" => { "type" => "string", - "description" => "Must either be set to 'latest' or date/time value in the following format: 2015-09-12T22:30:00Z. Applies only to point_in_time creation_style" + "description" => "Must either be set to 'latest' or date/time value in the following format: 2015-09-12T22:30:00Z. Applies only to point_in_time creation_style", + "default" => "latest" }, "create_read_replica" => { "type" => "boolean", @@ -266,6 +268,22 @@ def self.validate(db, configurator) end end + if db["identifier"] + if db["source"] + if db["source"].to_h["id"] != db["identifier"] + MU.log "Database #{db['name']} specified identifier '#{db["identifier"]}' with a source parameter that doesn't match", MU::ERR, db["source"] + ok = false + end + else + db["source"] = MU::Config::Ref.get( + id: db["identifier"], + cloud: db["cloud"], + credentials: db["credentials"], + type: "databases" + ) + end + db.delete("identifier") + end if db["storage"].nil? and db["creation_style"] == "new" and !db['create_cluster'] MU.log "Must provide a value for 'storage' when creating a new database.", MU::ERR, details: db @@ -296,13 +314,13 @@ def self.validate(db, configurator) if db["creation_style"] == "point_in_time" && db["restore_time"].nil? ok = false - MU.log "You must provide restore_time when creation_style is point_in_time", MU::ERR + MU.log "Database '#{db['name']}' must provide restore_time when creation_style is point_in_time", MU::ERR end if %w{existing new_snapshot existing_snapshot point_in_time}.include?(db["creation_style"]) - if db["identifier"].nil? + if db["source"].nil? ok = false - MU.log "Using existing database (or snapshot thereof), but no identifier given", MU::ERR + MU.log "Database '#{db['name']}' needs existing database/snapshot, but no identifier or source was specified", MU::ERR end end diff --git a/modules/mu/groomers/chef.rb b/modules/mu/groomers/chef.rb index 9f559c183..746de001d 100644 --- a/modules/mu/groomers/chef.rb +++ b/modules/mu/groomers/chef.rb @@ -805,6 +805,9 @@ def saveDeployData end end + # Purge Chef resources matching a particular deploy + # @param deploy_id [String] + # @param noop [Boolean] def self.cleanup(deploy_id, noop = false) return nil if deploy_id.nil? or deploy_id.empty? begin diff --git a/modules/tests/rds.yaml b/modules/tests/rds.yaml new file mode 100644 index 000000000..d901f6ed5 --- /dev/null +++ b/modules/tests/rds.yaml @@ -0,0 +1,37 @@ +# clouds: AWS +--- +appname: smoketest +vpcs: +- name: rdstests +databases: +- name: pgcluster + size: db.m5.large + engine: postgres + engine_version: 9.6.6 + add_cluster_node: true + allow_major_version_upgrade: true + auto_minor_version_upgrade: false + backup_retention_period: 10 + cluster_node_count: 2 + create_cluster: true + vpc: + name: rdstests + master_user: Bob +- name: mysql-w-replica + size: db.t2.micro + engine: mysql + storage: 5 + vpc: + name: rdstests + create_read_replica: true + multi_az_on_create: true + master_user: Bob +- name: point-in-time + creation_style: point_in_time + size: db.t2.micro + engine: mysql + storage: 5 + source: + name: mysql-w-replica + vpc: + name: rdstests From 1af5013bcdb44edd3665bfdf0095c005dd96dc33 Mon Sep 17 00:00:00 2001 From: John Stange Date: Sun, 22 Mar 2020 14:03:11 -0400 Subject: [PATCH 008/124] Config::Ref: implement a [] so we can treat a Ref like a Hash indiscriminately --- modules/mu/config/ref.rb | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/modules/mu/config/ref.rb b/modules/mu/config/ref.rb index 1addece45..e93f5d0ba 100644 --- a/modules/mu/config/ref.rb +++ b/modules/mu/config/ref.rb @@ -130,6 +130,16 @@ def <=>(other) self.to_s <=> other.to_s end + # Lets callers access us like a {Hash} + # @param attribute [String,Symbol] + def [](attribute) + if respond_to?(attribute.to_sym) + send(attribute.to_sym) + else + nil + end + end + # Base configuration schema for declared kittens referencing other cloud objects. This is essentially a set of filters that we're going to pass to {MU::MommaCat.findStray}. # @param aliases [Array]: Key => value mappings to set backwards-compatibility aliases for attributes, such as the ubiquitous +vpc_id+ (+vpc_id+ => +id+). # @return [Hash] From b0dd89c6e228676231ebff47a41f628cf547be7c Mon Sep 17 00:00:00 2001 From: John Stange Date: Sun, 22 Mar 2020 17:18:03 -0400 Subject: [PATCH 009/124] Logger: reinstate misplaced SUMMARY log functionality --- bin/mu-deploy | 4 ++-- modules/mu.rb | 1 + modules/mu/logger.rb | 5 +++++ 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/bin/mu-deploy b/bin/mu-deploy index a0d990ba6..67889c459 100755 --- a/bin/mu-deploy +++ b/bin/mu-deploy @@ -105,7 +105,7 @@ if $opts[:dryrun] Thread.handle_interrupt(MU::Cloud::MuCloudResourceNotImplemented => :never) { begin Thread.handle_interrupt(MU::Cloud::MuCloudResourceNotImplemented => :immediate) { - MU.log "Cost calculator not available for this stack, as it uses a resource not implemented in Mu's CloudFormation layer.", MU::WARN, verbosity: MU::Logger::NORMAL + MU.log "Cost calculator not available for this stack, as it uses a resource not implemented in Mu's CloudFormation layer.", MU::NOTICE, verbosity: MU::Logger::NORMAL Thread.current.exit } ensure @@ -124,7 +124,7 @@ if $opts[:dryrun] ) cost_dummy_deploy.run rescue MU::Cloud::MuCloudResourceNotImplemented, MU::Cloud::MuCloudFlagNotImplemented - MU.log "Cost calculator not available for this stack, as it uses a resource not implemented in Mu's CloudFormation layer.", MU::WARN, verbosity: MU::Logger::NORMAL + MU.log "Cost calculator not available for this stack, as it uses a resource not implemented in Mu's CloudFormation layer.", MU::NOTICE, verbosity: MU::Logger::NORMAL end end exit diff --git a/modules/mu.rb b/modules/mu.rb index 6fa0d998c..2fb1f447f 100644 --- a/modules/mu.rb +++ b/modules/mu.rb @@ -230,6 +230,7 @@ module MU ERROR = 4.freeze # Log entries that will be held and displayed/emailed at the end of deploy, # cleanup, etc. + SUMMARY = 5.freeze end require 'mu/logger' diff --git a/modules/mu/logger.rb b/modules/mu/logger.rb index 8e2d4e911..3ba14e701 100644 --- a/modules/mu/logger.rb +++ b/modules/mu/logger.rb @@ -108,6 +108,11 @@ def log(msg, return end + if level == SUMMARY + @summary << msg + return + end + caller_name = extract_caller_name(caller[1]) time = Time.now.strftime("%b %d %H:%M:%S").to_s From b1b49c66198e461e0f86a61b10c212c042e41c67 Mon Sep 17 00:00:00 2001 From: John Stange Date: Sun, 22 Mar 2020 20:51:41 -0400 Subject: [PATCH 010/124] AWS::Database: Cleaning and refactoring enough for read-replicas and point-in-time builds of regular (non-cluster) instances --- modules/mu/clouds/aws/database.rb | 444 +++++++++++++----------------- modules/mu/config/database.rb | 91 +++--- modules/mu/config/ref.rb | 4 +- 3 files changed, 225 insertions(+), 314 deletions(-) diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index 7734451b0..8f91e190c 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -35,6 +35,12 @@ def initialize(**args) end @mu_name.gsub(/(--|-$)/i, "").gsub(/(_)/, "-").gsub!(/^[^a-z]/i, "") + + if @config['source'] + @config["source"] = MU::Config::Ref.get(@config["source"]) + elsif @config["read_replica_of"] + @config["source"] = MU::Config::Ref.get(@config["read_replica_of"]) + end end # Called automatically by {MU::Deploy#createResources} @@ -46,6 +52,7 @@ def create basename.gsub!(/[^a-z0-9]/i, "") @config["db_name"] = MU::Cloud::AWS::Database.getName(basename, type: "dbname", config: @config) @config['master_user'] = MU::Cloud::AWS::Database.getName(basename, type: "dbuser", config: @config) unless @config['master_user'] + @cloud_id = @mu_name # Lets make sure automatic backups are enabled when DB instance is deployed in Multi-AZ so failover actually works. Maybe default to 1 instead? if @config['multi_az_on_create'] or @config['multi_az_on_deploy'] or @config["create_cluster"] @@ -67,25 +74,22 @@ def create createNewSnapshot end - @config['source_identifier'] = @config['identifier'] if @config["creation_style"] == "point_in_time" - @config['identifier'] = @mu_name unless @config["creation_style"] == "existing" @config["subnet_group_name"] = @mu_name - MU.log "Using the database identifier #{@config['identifier']}" if @config["create_cluster"] getPassword createSubnetGroup if @config.has_key?("parameter_group_family") - @config["parameter_group_name"] = @config['identifier'] + @config["parameter_group_name"] = @mu_name createDBClusterParameterGroup end - @cloud_id = createDbCluster + createDbCluster elsif @config["add_cluster_node"] - @cloud_id = add_cluster_node + add_cluster_node else - @cloud_id = add_basic + add_basic end end @@ -183,12 +187,13 @@ def getPassword @groomclass.saveSecret(vault: @mu_name, item: "database_credentials", data: creds) end - # Create the database described in this instance + # Create a plain database instance or read replica, as described in our + # +@config+. # @return [String]: The cloud provider's identifier for this database instance. def createDb # Shared configuration elements between most database creation styles - config = { - db_instance_identifier: @config['identifier'], + params = { + db_instance_identifier: @cloud_id, db_instance_class: @config["size"], engine: @config["engine"], auto_minor_version_upgrade: @config["auto_minor_version_upgrade"], @@ -200,52 +205,56 @@ def createDb } unless @config["add_cluster_node"] - config[:storage_type] = @config["storage_type"] - config[:port] = @config["port"] if @config["port"] - config[:iops] = @config["iops"] if @config['storage_type'] == "io1" - config[:multi_az] = @config['multi_az_on_create'] + params[:storage_type] = @config["storage_type"] + params[:port] = @config["port"] if @config["port"] + params[:iops] = @config["iops"] if @config['storage_type'] == "io1" + params[:multi_az] = @config['multi_az_on_create'] end if @config["creation_style"] == "new" unless @config["add_cluster_node"] - config[:preferred_backup_window] = @config["preferred_backup_window"] - config[:backup_retention_period] = @config["backup_retention_period"] - config[:storage_encrypted] = @config["storage_encrypted"] - config[:allocated_storage] = @config["storage"] - config[:db_name] = @config["db_name"] - config[:master_username] = @config['master_user'] - config[:master_user_password] = @config['password'] - config[:vpc_security_group_ids] = @config["vpc_security_group_ids"] + params[:preferred_backup_window] = @config["preferred_backup_window"] + params[:backup_retention_period] = @config["backup_retention_period"] + params[:storage_encrypted] = @config["storage_encrypted"] + params[:allocated_storage] = @config["storage"] + params[:db_name] = @config["db_name"] + params[:master_username] = @config['master_user'] + params[:master_user_password] = @config['password'] + params[:vpc_security_group_ids] = @config["vpc_security_group_ids"] end - config[:engine_version] = @config["engine_version"] - config[:preferred_maintenance_window] = @config["preferred_maintenance_window"] if @config["preferred_maintenance_window"] - config[:db_parameter_group_name] = @config["parameter_group_name"] if @config["parameter_group_name"] - config[:db_cluster_identifier] = @config["cluster_identifier"] if @config["add_cluster_node"] + params[:engine_version] = @config["engine_version"] + params[:preferred_maintenance_window] = @config["preferred_maintenance_window"] if @config["preferred_maintenance_window"] + params[:db_parameter_group_name] = @config["parameter_group_name"] if @config["parameter_group_name"] + params[:db_cluster_identifier] = @config["cluster_identifier"] if @config["add_cluster_node"] end if %w{existing_snapshot new_snapshot}.include?(@config["creation_style"]) - config[:db_snapshot_identifier] = @config["snapshot_id"] - config[:db_cluster_identifier] = @config["cluster_identifier"] if @config["add_cluster_node"] + params[:db_snapshot_identifier] = @config["snapshot_id"] + params[:db_cluster_identifier] = @config["cluster_identifier"] if @config["add_cluster_node"] end - if @config["creation_style"] == "point_in_time" - point_in_time_config = config - point_in_time_config.delete(:db_instance_identifier) - point_in_time_config[:source_db_instance_identifier] = @config['source_identifier'] - point_in_time_config[:target_db_instance_identifier] = @config['identifier'] - point_in_time_config[:restore_time] = @config['restore_time'] unless @config["restore_time"] == "latest" - point_in_time_config[:use_latest_restorable_time] = true if @config['restore_time'] == "latest" - end - - if @config["read_replica_of"]# || @config["create_read_replica"] - srcdb = @config['source_identifier'] - if @config["read_replica_of"]["region"] and @config['region'] != @config["read_replica_of"]["region"] - srcdb = MU::Cloud::AWS::Database.getARN(@config['source_identifier'], "db", "rds", region: @config["read_replica_of"]["region"], credentials: @config['credentials']) + if @config["creation_style"] == "point_in_time" or @config["read_replica_of"] + @config["source"].kitten(@deploy, debug: true) + if !@config["source"].id + MU.log "Database '#{@config['name']}' couldn't resolve cloud id for source database", MU::ERR, details: @config["source"].to_h + raise MuError, "Database '#{@config['name']}' couldn't resolve cloud id for source database" end - read_replica_struct = { - db_instance_identifier: @config['identifier'], - source_db_instance_identifier: srcdb, + end + + if @config["creation_style"] == "point_in_time" + point_in_time_params = params.clone + point_in_time_params.delete(:db_instance_identifier) + point_in_time_params[:source_db_instance_identifier] = @config["source"].id + point_in_time_params[:target_db_instance_identifier] = @cloud_id + point_in_time_params[:restore_time] = @config['restore_time'] unless @config["restore_time"] == "latest" + point_in_time_params[:use_latest_restorable_time] = true if @config['restore_time'] == "latest" + end + + if @config["read_replica_of"] + read_replica_params = { + db_instance_identifier: @cloud_id, + source_db_instance_identifier: @config["source"].id, db_instance_class: @config["size"], auto_minor_version_upgrade: @config["auto_minor_version_upgrade"], publicly_accessible: @config["publicly_accessible"], @@ -253,80 +262,52 @@ def createDb db_subnet_group_name: @config["subnet_group_name"], storage_type: @config["storage_type"] } + if @config["source"].region and @config['region'] != @config["source"].region + read_replica_params[:source_db_instance_identifier] = MU::Cloud::AWS::Database.getARN(@config["source"].id, "db", "rds", region: @config["source"].region, credentials: @config['credentials']) + end - read_replica_struct[:port] = @config["port"] if @config["port"] - read_replica_struct[:iops] = @config["iops"] if @config['storage_type'] == "io1" + read_replica_params[:port] = @config["port"] if @config["port"] + read_replica_params[:iops] = @config["iops"] if @config['storage_type'] == "io1" end - # Creating DB instance - attempts = 0 - - begin + MU.retrier([Aws::RDS::Errors::InvalidParameterValue], max: 5, wait: 10) { if %w{existing_snapshot new_snapshot}.include?(@config["creation_style"]) - MU.log "Creating database instance #{@config['identifier']} from snapshot #{@config["snapshot_id"]}" - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).restore_db_instance_from_db_snapshot(config) + MU.log "Creating database instance #{@cloud_id} from snapshot #{@config["snapshot_id"]}" + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).restore_db_instance_from_db_snapshot(params) elsif @config["creation_style"] == "point_in_time" - MU.log "Creating database instance #{@config['identifier']} based on point in time backup #{@config['restore_time']} of #{@config['source_identifier']}" - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).restore_db_instance_to_point_in_time(point_in_time_config) + MU.log "Creating database instance #{@cloud_id} based on point in time backup #{@config['restore_time']} of #{@config['source'].id}" + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).restore_db_instance_to_point_in_time(point_in_time_params) elsif @config["read_replica_of"] - MU.log "Creating read replica database instance #{@config['identifier']} for #{@config['source_identifier']}" + MU.log "Creating read replica database instance #{@cloud_id} for #{@config['source'].id}" begin - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_instance_read_replica(read_replica_struct) + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_instance_read_replica(read_replica_params) rescue Aws::RDS::Errors::DBSubnetGroupNotAllowedFault => e MU.log "Being forced to use source database's subnet group: #{e.message}", MU::WARN - read_replica_struct.delete(:db_subnet_group_name) - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_instance_read_replica(read_replica_struct) + read_replica_params.delete(:db_subnet_group_name) + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_instance_read_replica(read_replica_params) end elsif @config["creation_style"] == "new" - MU.log "Creating pristine database instance #{@config['identifier']} (#{@config['name']}) in #{@config['region']}" - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_instance(config) - end - rescue Aws::RDS::Errors::InvalidParameterValue => e - if attempts < 5 - MU.log "Got #{e.inspect} creating #{@config['identifier']}, will retry a few times in case of transient errors.", MU::WARN, details: config - attempts += 1 - sleep 10 - retry - else - raise MuError, "Exhausted retries trying to create database instance #{@config['identifier']}: #{e.inspect}" + MU.log "Creating pristine database instance #{@cloud_id} (#{@config['name']}) in #{@config['region']}" + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_instance(params) end - end - - wait_start_time = Time.now - retries = 0 + } - begin - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).wait_until(:db_instance_available, db_instance_identifier: @config['identifier']) do |waiter| - # Does create_db_instance implement wait_until_available ? - waiter.max_attempts = nil - waiter.before_attempt do |w_attempts| - MU.log "Waiting for RDS database #{@config['identifier']} to be ready...", MU::NOTICE if w_attempts % 10 == 0 - end - waiter.before_wait do |_attempts, r| - throw :success if r.db_instances.first.db_instance_status == "available" - throw :failure if Time.now - wait_start_time > 3600 - end + # Sit on our hands until the instance shows as available + MU.retrier(wait: 10, max: 360, loop_if: Proc.new { cloud_desc(use_cache: false).db_instance_status != "available" }) { |retries, _wait| + if retries > 0 and retries % 20 == 0 + MU.log "Waiting for RDS database #{@cloud_id} to be ready...", MU::NOTICE end - rescue Aws::Waiters::Errors::TooManyAttemptsError => e - raise MuError, "Waited #{(Time.now - wait_start_time).round/60*(retries+1)} minutes for #{@config['identifier']} to become available, giving up. #{e}" if retries > 2 - wait_start_time = Time.now - retries += 1 - retry - end + } - database = MU::Cloud::AWS::Database.getDatabaseById(@config['identifier'], region: @config['region'], credentials: @config['credentials']) - MU::Cloud::AWS::DNSZone.genericMuDNSEntry(name: database.db_instance_identifier, target: "#{database.endpoint.address}.", cloudclass: MU::Cloud::Database, sync_wait: @config['dns_sync_wait']) - MU.log "Database #{@config['name']} is at #{database.endpoint.address}", MU::SUMMARY + MU::Cloud::AWS::DNSZone.genericMuDNSEntry(name: cloud_desc.db_instance_identifier, target: "#{cloud_desc.endpoint.address}.", cloudclass: MU::Cloud::Database, sync_wait: @config['dns_sync_wait']) + MU.log "Database #{@config['name']} is at #{cloud_desc.endpoint.address}", MU::SUMMARY if @config['auth_vault'] MU.log "knife vault show #{@config['auth_vault']['vault']} #{@config['auth_vault']['item']} for Database #{@config['name']} credentials", MU::SUMMARY end - # If referencing an existing DB, insert this deploy's DB security group so it can access db + # If referencing an existing DB, insert this deploy's DB security group so it can access the thing if @config["creation_style"] == 'existing' - vpc_sg_ids = [] - database.vpc_security_groups.each { |vpc_sg| - vpc_sg_ids << vpc_sg.vpc_security_group_id - } + vpc_sg_ids = cloud_desc.vpc_security_groups.map { |sg| sg.vpc_security_group_id } localdeploy_rule = @deploy.findLitterMate(type: "firewall_rule", name: "database"+@config['name']) if localdeploy_rule.nil? @@ -336,16 +317,20 @@ def createDb vpc_sg_ids << localdeploy_rule.cloud_id mod_config = Hash.new mod_config[:vpc_security_group_ids] = vpc_sg_ids - mod_config[:db_instance_identifier] = @config["identifier"] + mod_config[:db_instance_identifier] = @cloud_id MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_instance(mod_config) - MU.log "Modified database #{@config['identifier']} with new security groups: #{mod_config}", MU::NOTICE + MU.log "Modified database #{@cloud_id} with new security groups: #{mod_config}", MU::NOTICE end # When creating from a snapshot, some of the create arguments aren't # applicable- but we can apply them after the fact with a modify. if %w{existing_snapshot new_snapshot point_in_time}.include?(@config["creation_style"]) or @config["read_replica_of"] - mod_config = Hash.new + mod_config = { + db_instance_identifier: @cloud_id, + vpc_security_group_ids: @config["vpc_security_group_ids"], + apply_immediately: true + } if !@config["read_replica_of"] mod_config[:preferred_backup_window] = @config["preferred_backup_window"] mod_config[:backup_retention_period] = @config["backup_retention_period"] @@ -355,55 +340,33 @@ def createDb mod_config[:master_user_password] = @config['password'] mod_config[:allocated_storage] = @config["storage"] if @config["storage"] end - mod_config[:db_instance_identifier] = database.db_instance_identifier - mod_config[:preferred_maintenance_window] = @config["preferred_maintenance_window"] if @config["preferred_maintenance_window"] - mod_config[:vpc_security_group_ids] = @config["vpc_security_group_ids"] - mod_config[:apply_immediately] = true + if @config["preferred_maintenance_window"] + mod_config[:preferred_maintenance_window] = @config["preferred_maintenance_window"] + end MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_instance(mod_config) - wait_start_time = Time.now - retries = 0 - begin - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).wait_until(:db_instance_available, db_instance_identifier: @config['identifier']) do |waiter| - # Does create_db_instance implement wait_until_available ? - waiter.max_attempts = nil - waiter.before_attempt do |w_attempts| - MU.log "Waiting for RDS database #{@config['identifier'] } to be ready..", MU::NOTICE if w_attempts % 10 == 0 - end - waiter.before_wait do |_attempts, r| - throw :success if r.db_instances.first.db_instance_status == "available" - throw :failure if Time.now - wait_start_time > 2400 - end + MU.retrier(wait: 10, max: 240, loop_if: Proc.new { cloud_desc(use_cache: false).db_instance_status != "available" }) { |retries, _wait| + if retries > 0 and retries % 10 == 0 + MU.log "Waiting for modifications on RDS database #{@cloud_id}...", MU::NOTICE end - rescue Aws::Waiters::Errors::TooManyAttemptsError => e - raise MuError, "Waited #{(Time.now - wait_start_time).round/60*(retries+1)} minutes for #{@config['identifier']} to become available, giving up. #{e}" if retries > 2 - wait_start_time = Time.now - retries += 1 - retry - end + } + end # Maybe wait for DB instance to be in available state. DB should still be writeable at this state if @config['allow_major_version_upgrade'] && @config["creation_style"] == "new" - MU.log "Setting major database version upgrade on #{@config['identifier']}'" - database = MU::Cloud::AWS::Database.getDatabaseById(@config['identifier'], region: @config['region'], credentials: @config['credentials']) - begin - if database.db_instance_status != "available" - sleep 5 - database = MU::Cloud::AWS::Database.getDatabaseById(@config['identifier'], region: @config['region'], credentials: @config['credentials']) - end - end while database.db_instance_status != "available" + MU.log "Setting major database version upgrade on #{@cloud_id}'" MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_instance( - db_instance_identifier: @config['identifier'], + db_instance_identifier: @cloud_id, apply_immediately: true, allow_major_version_upgrade: true ) end - MU.log "Database #{@config['identifier']} is ready to use" - return database.db_instance_identifier + MU.log "Database #{@config['name']} (#{@mu_name}) is ready to use" + @cloud_id end # Create the database cluster described in this instance @@ -434,12 +397,12 @@ def createDbCluster cluster_config_struct[:snapshot_identifier] = @config["snapshot_id"] cluster_config_struct[:engine] = @config["engine"] cluster_config_struct[:engine_version] = @config["engine_version"] - cluster_config_struct[:database_name] = @config["db_name"] + cluster_config_struct[:database_name] = @cloud_id end if @config["creation_style"] == "new" cluster_config_struct[:backup_retention_period] = @config["backup_retention_period"] - cluster_config_struct[:database_name] = @config["db_name"] + cluster_config_struct[:database_name] = @cloud_id cluster_config_struct[:db_cluster_parameter_group_name] = @config["parameter_group_name"] cluster_config_struct[:engine] = @config["engine"] cluster_config_struct[:engine_version] = @config["engine_version"] @@ -450,7 +413,7 @@ def createDbCluster end if @config["creation_style"] == "point_in_time" - cluster_config_struct[:source_db_cluster_identifier] = @config["source_identifier"] + cluster_config_struct[:source_db_cluster_identifier] = @config["source"].id cluster_config_struct[:restore_to_time] = @config["restore_time"] unless @config["restore_time"] == "latest" cluster_config_struct[:use_latest_restorable_time] = true if @config["restore_time"] == "latest" end @@ -468,7 +431,7 @@ def createDbCluster MU.log "Creating new database cluster #{@config['identifier']} from snapshot #{@config["snapshot_id"]}" MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).restore_db_cluster_from_snapshot(cluster_config_struct) elsif @config["creation_style"] == "point_in_time" - MU.log "Creating new database cluster #{@config['identifier']} from point in time backup #{@config["restore_time"]} of #{@config["source_identifier"]}" + MU.log "Creating new database cluster #{@config['identifier']} from point in time backup #{@config["restore_time"]} of #{@config["source"].id}" MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).restore_db_cluster_to_point_in_time(cluster_config_struct) end rescue Aws::RDS::Errors::InvalidParameterValue => e @@ -524,76 +487,18 @@ def createDbCluster def createSubnetGroup # Finding subnets, creating security groups/adding holes, create subnet group subnet_ids = [] - vpc_id = nil - if @config['vpc'] and !@config['vpc'].empty? - raise MuError, "Didn't find the VPC specified in #{@config["vpc"]}" unless @vpc - - vpc_id = @vpc.cloud_id - # Getting subnet IDs - subnets = - if @config["vpc"]["subnets"].empty? - @vpc.subnets - else - subnet_objects= [] - @config["vpc"]["subnets"].each { |subnet| - sobj = @vpc.getSubnet(cloud_id: subnet["subnet_id"], name: subnet["subnet_name"]) - if sobj.nil? - MU.log "Got nil result from @vpc.getSubnet(cloud_id: #{subnet["subnet_id"]}, name: #{subnet["subnet_name"]})", MU::WARN - else - subnet_objects << sobj - end - } - subnet_objects - end - subnets.each{ |subnet| - next if subnet.nil? - next if @config["publicly_accessible"] and subnet.private? - subnet_ids << subnet.cloud_id - } - else - # If we didn't specify a VPC try to figure out if the account has a default VPC - vpc_id = nil - subnets = [] - MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_vpcs.vpcs.each { |vpc| - if vpc.is_default - vpc_id = vpc.vpc_id - subnets = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_subnets( - filters: [ - { - name: "vpc-id", - values: [vpc_id] - } - ] - ).subnets - break - end - } + raise MuError, "Didn't find the VPC specified in #{@config["vpc"]}" unless @vpc - if !subnets.empty? - mu_subnets = [] - subnets.each { |subnet| - subnet_ids << subnet.subnet_id - mu_subnets << {"subnet_id" => subnet.subnet_id} - } - - @config['vpc'] = { - "vpc_id" => vpc_id, - "subnets" => mu_subnets - } - # Default VPC has only public subnets by default so setting publicly_accessible = true - @config["publicly_accessible"] = true - MU.log "Using default VPC for cache cluster #{@config['identifier']}" - end - end + mySubnets.each { |subnet| + next if @config["publicly_accessible"] and subnet.private? + subnet_ids << subnet.cloud_id + } if @config['creation_style'] == "existing" - srcdb = MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).describe_db_instances( - db_instance_identifier: @config['identifier'] - ) - srcdb_vpc = srcdb.db_instances.first.db_subnet_group.vpc_id - if srcdb_vpc != vpc_id - MU.log "#{self} is deploying into #{vpc_id}, but our source database, #{@config['identifier']}, is in #{srcdb_vpc}", MU::ERR + srcdb_vpc = @config['source'].kitten.cloud_desc.db_subnet_group.vpc_id + if srcdb_vpc != @vpc.cloud_id + MU.log "#{self} is deploying into #{@vpc.cloud_id}, but our source database, #{@config['identifier']}, is in #{srcdb_vpc}", MU::ERR raise MuError, "Can't use 'existing' to deploy into a different VPC from the source database; try 'new_snapshot' instead" end end @@ -603,10 +508,10 @@ def createSubnetGroup else # Create subnet group resp = MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_subnet_group( - db_subnet_group_name: @config["subnet_group_name"], - db_subnet_group_description: @config["subnet_group_name"], - subnet_ids: subnet_ids, - tags: allTags + db_subnet_group_name: @config["subnet_group_name"], + db_subnet_group_description: @config["subnet_group_name"], + subnet_ids: subnet_ids, + tags: allTags ) @config["subnet_group_name"] = resp.db_subnet_group.db_subnet_group_name @@ -619,9 +524,8 @@ def createSubnetGroup end # Find NAT and create holes in security groups. - if @config["vpc"]["nat_host_name"] || @config["vpc"]["nat_host_id"] || @config["vpc"]["nat_host_tag"] || @config["vpc"]["nat_host_ip"] - nat = @nat - if nat.is_a?(Struct) && nat.nat_gateway_id && nat.nat_gateway_id.start_with?("nat-") + if @nat + if @nat.is_a?(Struct) and @nat.respond_to?(:nat_gateway_id) and @nat.nat_gateway_id.start_with?("nat-") MU.log "Using NAT Gateway, not modifying security groups" else _nat_name, _nat_conf, nat_deploydata = @nat.describe @@ -833,13 +737,27 @@ def self.getDatabaseClusterById(db_cluster_id, region: MU.curRegion, credentials # We're fine with this returning nil when searching for a database cluster the doesn't exist. end + # Return the metadata for this ContainerCluster + # @return [Hash] + def notify + deploy_struct = MU.structToHash(cloud_desc) + deploy_struct['cloud_id'] = @cloud_id + deploy_struct["region"] ||= @config['region'] + deploy_struct["db_name"] ||= @config['db_name'] + deploy_struct + end + # Return the cloud descriptor for this database cluster or instance - def cloud_desc - if @config['create_cluster'] + def cloud_desc(use_cache: true) + return @cloud_desc_cache if @cloud_desc_cache and use_cache + + @cloud_desc_cache = if @config['create_cluster'] MU::Cloud::AWS::Database.getDatabaseClusterById(@cloud_id, region: @config['region'], credentials: @credentials) else MU::Cloud::AWS::Database.getDatabaseById(@cloud_id, region: @config['region'], credentials: @credentials) end + + @cloud_desc_cache end # Generate a snapshot from the database described in this instance. @@ -852,13 +770,13 @@ def createNewSnapshot if @config["create_cluster"] MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_cluster_snapshot( db_cluster_snapshot_identifier: snap_id, - db_cluster_identifier: @config["identifier"], + db_cluster_identifier: @mu_name, tags: allTags ) else MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_snapshot( db_snapshot_identifier: snap_id, - db_instance_identifier: @config["identifier"], + db_instance_identifier: @mu_name, tags: allTags ) end @@ -871,8 +789,8 @@ def createNewSnapshot attempts = 0 loop do - MU.log "Waiting for RDS snapshot of #{@config["identifier"]} to be ready...", MU::NOTICE if attempts % 20 == 0 - MU.log "Waiting for RDS snapshot of #{@config["identifier"]} to be ready...", MU::DEBUG + MU.log "Waiting for RDS snapshot of #{@mu_name} to be ready...", MU::NOTICE if attempts % 20 == 0 + MU.log "Waiting for RDS snapshot of #{@mu_name} to be ready...", MU::DEBUG snapshot_resp = if @config["create_cluster"] MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).describe_db_cluster_snapshots(db_cluster_snapshot_identifier: snap_id) @@ -895,11 +813,12 @@ def createNewSnapshot # Fetch the latest snapshot of the database described in this instance. # @return [String]: The cloud provider's identifier for the snapshot. def getExistingSnapshot + src_ref = MU::Config::Ref.get(@config["source"]) resp = if @config["create_cluster"] - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).describe_db_cluster_snapshots(db_cluster_snapshot_identifier: @config["identifier"]) + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).describe_db_cluster_snapshots(db_cluster_snapshot_identifier: src_ref.id) else - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).describe_db_snapshots(db_snapshot_identifier: @config["identifier"]) + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).describe_db_snapshots(db_snapshot_identifier: src_ref.id) end snapshots = @config["create_cluster"] ? resp.db_cluster_snapshots : resp.db_snapshots @@ -1137,10 +1056,11 @@ def self.schema(_config) def self.validateConfig(db, _configurator) ok = true + if db['creation_style'] == "existing_snapshot" and !db['create_cluster'] and - db['identifier'] and db['identifier'].match(/:cluster-snapshot:/) - MU.log "Database #{db['name']}: Existing snapshot #{db['identifier']} looks like a cluster snapshot, but create_cluster is not set. Add 'create_cluster: true' if you're building an RDS cluster.", MU::ERR + db['source'] and db["source"]["id"] and db['source']["id"].match(/:cluster-snapshot:/) + MU.log "Database #{db['name']}: Existing snapshot #{db["source"]["id"]} looks like a cluster snapshot, but create_cluster is not set. Add 'create_cluster: true' if you're building an RDS cluster.", MU::ERR ok = false end @@ -1241,10 +1161,10 @@ def self.validateConfig(db, _configurator) if db["creation_style"] == "existing" begin MU::Cloud::AWS.rds(region: db['region']).describe_db_instances( - db_instance_identifier: db['identifier'] + db_instance_identifier: db['source']['id'] ) rescue Aws::RDS::Errors::DBInstanceNotFound - MU.log "Source database #{db['identifier']} was specified for #{db['name']}, but no such database exists in #{db['region']}", MU::ERR + MU.log "Source database was specified for #{db['name']}, but no such database exists in #{db['region']}", MU::ERR, db['source'] ok = false end end @@ -1305,6 +1225,30 @@ def self.validateConfig(db, _configurator) end end + if !db["vpc"] + MU::Cloud::AWS.ec2(region: db['region'], credentials: db['credentials']).describe_vpcs.vpcs.each { |vpc| + if vpc.is_default + db["publicly_accessible"] = true + db['vpc'] = { + "id" => vpc.vpc_id, + "cloud" => "AWS", + "region" => db['region'], + "credentials" => db['credentials'] + } + db['vpc']['subnets'] = MU::Cloud::AWS.ec2(region: db['region'], credentials: db['credentials']).describe_subnets( + filters: [ + { + name: "vpc-id", + values: [vpc.vpc_id] + } + ] + ).subnets.map { |s| { "subnet_id" => s.subnet_id } } + MU.log "Using default VPC for database #{db['name']}" + break + end + } + end + if db["vpc"] if db["vpc"]["subnet_pref"] == "all_public" and !db['publicly_accessible'] and (db["vpc"]['subnets'].nil? or db["vpc"]['subnets'].empty?) MU.log "Setting publicly_accessible to true on database '#{db['name']}', since deploying into public subnets.", MU::WARN @@ -1321,39 +1265,16 @@ def self.validateConfig(db, _configurator) private def add_basic - source_db = nil - if @config['read_replica_of'] - rr = @config['read_replica_of'] - source_db = @deploy.findLitterMate(type: "database", name: rr['db_name']) if rr['db_name'] - - if source_db.nil? - tag_key, tag_value = rr['tag'].split(/=/, 2) if !rr['tag'].nil? - found = MU::MommaCat.findStray( - rr['cloud'], - "database", - deploy_id: rr["deploy_id"], - cloud_id: rr["db_id"], - tag_key: tag_key, - tag_value: tag_value, - region: rr["region"], - dummy_ok: true - ) - source_db = found.first if found.size == 1 - end - - raise MuError, "Couldn't resolve read replica reference to a unique live Database in #{@mu_name}" if source_db.nil? or source_db.cloud_id.nil? - @config['source_identifier'] = source_db.cloud_id - end getPassword - if source_db.nil? or @config['region'] != source_db.config['region'] + if @config['source'].nil? or @config['region'] != @config['source'].region createSubnetGroup else MU.log "Note: Read Replicas automatically reside in the same subnet group as the source database, if they're both in the same region. This replica may not land in the VPC you intended.", MU::WARN end if @config.has_key?("parameter_group_family") - @config["parameter_group_name"] = @config['identifier'] + @config["parameter_group_name"] = @mu_name createDBParameterGroup end @@ -1388,7 +1309,7 @@ def add_cluster_node @config["creation_style"] = "new" if @config["creation_style"] != "new" if @config.has_key?("parameter_group_family") - @config["parameter_group_name"] = @config['identifier'] + @config["parameter_group_name"] = @mu_name createDBParameterGroup end @@ -1398,7 +1319,9 @@ def add_cluster_node def run_sql_commands MU.log "Running initial SQL commands on #{@config['name']}", details: @config['run_sql_on_deploy'] - port, address = if !cloud_desc.publicly_accessible and @vpc + port = address = nil + + if !cloud_desc.publicly_accessible and @vpc if @config['vpc']['nat_host_name'] keypairname, _ssh_private_key, _ssh_public_key = @deploy.SSHKey begin @@ -1410,18 +1333,19 @@ def run_sql_commands :auth_methods => ['publickey'] ) port = gateway.open(cloud_desc.endpoint.address, cloud_desc.endpoint.port) + address = "127.0.0.1" MU.log "Tunneling #{@config['engine']} connection through #{@config['vpc']['nat_host_name']} via local port #{port}", MU::DEBUG - [port, "127.0.0.1"] rescue IOError => e - MU.log "Got #{e.inspect} while connecting to #{@config['identifier']} through NAT #{@config['vpc']['nat_host_name']}", MU::ERR + MU.log "Got #{e.inspect} while connecting to #{@mu_name} through NAT #{@config['vpc']['nat_host_name']}", MU::ERR return end else - MU.log "Can't run initial SQL commands! Database #{@config['identifier']} is not publicly accessible, but we have no NAT host for connecting to it", MU::WARN, details: @config['run_sql_on_deploy'] + MU.log "Can't run initial SQL commands! Database #{@mu_name} is not publicly accessible, but we have no NAT host for connecting to it", MU::WARN, details: @config['run_sql_on_deploy'] return end else - [database.endpoint.port, database.endpoint.address] + port = database.endpoint.port + address = database.endpoint.address end # Running SQL on deploy @@ -1496,7 +1420,7 @@ def self.terminate_rds_instance(db, noop: false, skipsnapshots: false, region: M cloud_id: cloud_id, mu_name: mu_name ).first - cloud_id ||= db.db_instance_identifier + cloud_id ||= db.cloud_id raise MuError, "terminate_rds_instance requires a non-nil database descriptor" if db.nil? @@ -1512,7 +1436,7 @@ def self.terminate_rds_instance(db, noop: false, skipsnapshots: false, region: M if db.db_instance_status != "available" MU.retrier([], wait: 60, loop_if: Proc.new { %w{creating modifying backing-up}.include?(db.db_instance_status) }) { db = MU::Cloud::AWS::Database.getDatabaseById(cloud_id, region: region, credentials: credentials) - return if db.il? + return if db.nil? } end @@ -1540,7 +1464,7 @@ def self.terminate_rds_instance(db, noop: false, skipsnapshots: false, region: M params[:skip_final_snapshot] = true end } - MU.retrier([Aws::RDS::Errors::InvalidDBInstanceState, Aws::RDS::Errors::DBSnapshotAlreadyExists], wait: 30, max: 5, on_retry: on_retry) { + MU.retrier([Aws::RDS::Errors::InvalidDBInstanceState, Aws::RDS::Errors::DBSnapshotAlreadyExists], wait: 60, max: 20, on_retry: on_retry) { MU::Cloud::AWS.rds(region: region, credentials: credentials).delete_db_instance(params) } end @@ -1562,8 +1486,8 @@ def self.terminate_rds_instance(db, noop: false, skipsnapshots: false, region: M # Cleanup the database vault groomer = - if database_obj - database_obj.config.has_key?("groomer") ? database_obj.config["groomer"] : MU::Config.defaultGroomer + if db and db.respond_to?(:config) and db.config + db.config.has_key?("groomer") ? db.config["groomer"] : MU::Config.defaultGroomer else MU::Config.defaultGroomer end diff --git a/modules/mu/config/database.rb b/modules/mu/config/database.rb index 8a5286cc1..a1f8ab0e7 100644 --- a/modules/mu/config/database.rb +++ b/modules/mu/config/database.rb @@ -23,7 +23,7 @@ def self.schema { "type" => "object", "description" => "Create a dedicated database server.", - "required" => ["name", "engine", "size", "cloud", "storage"], + "required" => ["name", "engine", "size", "cloud"], "additionalProperties" => false, "properties" => { "groomer" => { @@ -78,7 +78,8 @@ def self.schema }, "storage" => { "type" => "integer", - "description" => "Storage space for this database instance (GB)." + "description" => "Storage space for this database instance (GB).", + "default" => 5 }, "storage_type" => { "enum" => ["standard", "gp2", "io1"], @@ -86,12 +87,12 @@ def self.schema "default" => "gp2" }, "run_sql_on_deploy" => { - "type" => "array", - "minItems" => 1, - "items" => { - "description" => "Arbitrary SQL commands to run after the database is fully configred (PostgreSQL databases only).", - "type" => "string" - } + "type" => "array", + "minItems" => 1, + "items" => { + "description" => "Arbitrary SQL commands to run after the database is fully configred (PostgreSQL databases only).", + "type" => "string" + } }, "port" => {"type" => "integer"}, "vpc" => MU::Config::VPC.reference(MU::Config::VPC::MANY_SUBNETS, MU::Config::VPC::NAT_OPTS, "all_public"), @@ -217,27 +218,11 @@ def self.schema # Schema block for other resources to use when referencing a sibling Database # @return [Hash] def self.reference - { - "type" => "object", - "description" => "Incorporate a database object", - "minProperties" => 1, - "additionalProperties" => false, - "properties" => { - "db_id" => {"type" => "string"}, - "db_name" => {"type" => "string"}, - "region" => MU::Config.region_primitive, - "cloud" => MU::Config.cloud_primitive, - "tag" => { - "type" => "string", - "description" => "Identify this Database by a tag (key=value). Note that this tag must not match more than one resource.", - "pattern" => "^[^=]+=.+" - }, - "deploy_id" => { - "type" => "string", - "description" => "Look for a Database fitting this description in another Mu deployment with this id.", - } - } - } + schema_aliases = [ + { "db_id" => "id" }, + { "db_name" => "name" } + ] + MU::Config::Ref.schema(schema_aliases, type: "databases") end # Generic pre-processing of {MU::Config::BasketofKittens::databases}, bare and unvalidated. @@ -270,7 +255,7 @@ def self.validate(db, configurator) if db["identifier"] if db["source"] - if db["source"].to_h["id"] != db["identifier"] + if db["source"]["id"] != db["identifier"] MU.log "Database #{db['name']} specified identifier '#{db["identifier"]}' with a source parameter that doesn't match", MU::ERR, db["source"] ok = false end @@ -355,8 +340,9 @@ def self.validate(db, configurator) replica['create_read_replica'] = false replica["create_cluster"] = false replica['read_replica_of'] = { - "db_name" => db['name'], + "name" => db['name'], "cloud" => db['cloud'], + "credentials" => db['credentials'], "region" => db['read_replica_region'] || db['region'] } replica['dependencies'] << { @@ -405,27 +391,12 @@ def self.validate(db, configurator) end if !db['read_replica_of'].nil? - rr = db['read_replica_of'] - if !rr['db_name'].nil? - db['dependencies'] << { "name" => rr['db_name'], "type" => "database" } - else - rr['cloud'] = db['cloud'] if rr['cloud'].nil? - tag_key, tag_value = rr['tag'].split(/=/, 2) if !rr['tag'].nil? - found = MU::MommaCat.findStray( - rr['cloud'], - "database", - deploy_id: rr["deploy_id"], - cloud_id: rr["db_id"], - tag_key: tag_key, - tag_value: tag_value, - region: rr["region"], - dummy_ok: true - ) - ext_database = found.first if !found.nil? and found.size == 1 - if !ext_database - MU.log "Couldn't resolve Database reference to a unique live Database in #{db['name']}", MU::ERR, details: rr - ok = false - end + rr = MU::Config::Ref.get(db['read_replica_of']) + if rr.name and !rr.deploy_id + db['dependencies'] << { "name" => rr.name, "type" => "database" } + elsif !rr.kitten + MU.log "Couldn't resolve Database reference to a unique live Database in #{db['name']}", MU::ERR, details: rr + ok = false end elsif db["member_of_cluster"] rr = db["member_of_cluster"] @@ -454,6 +425,22 @@ def self.validate(db, configurator) end end end + + if db["source"] + + if db["source"]["name"] and + !db["source"]["deploy_id"] and + configurator.haveLitterMate?(db["source"]["name"], "databases") + db["dependencies"] ||= [] + db["dependencies"] << { + "type" => "database", + "name" => db["source"]["name"], + "phase" => "groom" + } + end + db["source"]["cloud"] ||= db["cloud"] + end + db['dependencies'].uniq! read_replicas.each { |new_replica| diff --git a/modules/mu/config/ref.rb b/modules/mu/config/ref.rb index e93f5d0ba..da80b9421 100644 --- a/modules/mu/config/ref.rb +++ b/modules/mu/config/ref.rb @@ -259,7 +259,7 @@ def cloud_id # called in a live deploy, which is to say that if called during initial # configuration parsing, results may be incorrect. # @param mommacat [MU::MommaCat]: A deploy object which will be searched for the referenced resource if provided, before restoring to broader, less efficient searches. - def kitten(mommacat = @mommacat, shallow: false) + def kitten(mommacat = @mommacat, shallow: false, debug: false) return nil if !@cloud or !@type if @obj @@ -270,7 +270,7 @@ def kitten(mommacat = @mommacat, shallow: false) end if mommacat - @obj = mommacat.findLitterMate(type: @type, name: @name, cloud_id: @id, credentials: @credentials, debug: false) + @obj = mommacat.findLitterMate(type: @type, name: @name, cloud_id: @id, credentials: @credentials, debug: debug) if @obj # initialize missing attributes, if we can @id ||= @obj.cloud_id @mommacat ||= mommacat From d58e38e1e96ddbfaf5432e9bd6d598036d3c7dc3 Mon Sep 17 00:00:00 2001 From: John Stange Date: Mon, 23 Mar 2020 12:03:57 -0400 Subject: [PATCH 011/124] AWS::Database: dismantle and recombobulate non-cluster database creation --- modules/mu/clouds/aws/database.rb | 358 ++++++++++++++++-------------- 1 file changed, 194 insertions(+), 164 deletions(-) diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index 8f91e190c..a84fb1bc4 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -187,11 +187,7 @@ def getPassword @groomclass.saveSecret(vault: @mu_name, item: "database_credentials", data: creds) end - # Create a plain database instance or read replica, as described in our - # +@config+. - # @return [String]: The cloud provider's identifier for this database instance. - def createDb - # Shared configuration elements between most database creation styles + def genericParams params = { db_instance_identifier: @cloud_id, db_instance_class: @config["size"], @@ -204,169 +200,11 @@ def createDb tags: allTags } - unless @config["add_cluster_node"] - params[:storage_type] = @config["storage_type"] - params[:port] = @config["port"] if @config["port"] - params[:iops] = @config["iops"] if @config['storage_type'] == "io1" - params[:multi_az] = @config['multi_az_on_create'] - end - - if @config["creation_style"] == "new" - unless @config["add_cluster_node"] - params[:preferred_backup_window] = @config["preferred_backup_window"] - params[:backup_retention_period] = @config["backup_retention_period"] - params[:storage_encrypted] = @config["storage_encrypted"] - params[:allocated_storage] = @config["storage"] - params[:db_name] = @config["db_name"] - params[:master_username] = @config['master_user'] - params[:master_user_password] = @config['password'] - params[:vpc_security_group_ids] = @config["vpc_security_group_ids"] - end - - params[:engine_version] = @config["engine_version"] - params[:preferred_maintenance_window] = @config["preferred_maintenance_window"] if @config["preferred_maintenance_window"] - params[:db_parameter_group_name] = @config["parameter_group_name"] if @config["parameter_group_name"] - params[:db_cluster_identifier] = @config["cluster_identifier"] if @config["add_cluster_node"] - end - if %w{existing_snapshot new_snapshot}.include?(@config["creation_style"]) params[:db_snapshot_identifier] = @config["snapshot_id"] - params[:db_cluster_identifier] = @config["cluster_identifier"] if @config["add_cluster_node"] end - if @config["creation_style"] == "point_in_time" or @config["read_replica_of"] - @config["source"].kitten(@deploy, debug: true) - if !@config["source"].id - MU.log "Database '#{@config['name']}' couldn't resolve cloud id for source database", MU::ERR, details: @config["source"].to_h - raise MuError, "Database '#{@config['name']}' couldn't resolve cloud id for source database" - end - end - - if @config["creation_style"] == "point_in_time" - point_in_time_params = params.clone - point_in_time_params.delete(:db_instance_identifier) - point_in_time_params[:source_db_instance_identifier] = @config["source"].id - point_in_time_params[:target_db_instance_identifier] = @cloud_id - point_in_time_params[:restore_time] = @config['restore_time'] unless @config["restore_time"] == "latest" - point_in_time_params[:use_latest_restorable_time] = true if @config['restore_time'] == "latest" - end - - if @config["read_replica_of"] - read_replica_params = { - db_instance_identifier: @cloud_id, - source_db_instance_identifier: @config["source"].id, - db_instance_class: @config["size"], - auto_minor_version_upgrade: @config["auto_minor_version_upgrade"], - publicly_accessible: @config["publicly_accessible"], - tags: allTags, - db_subnet_group_name: @config["subnet_group_name"], - storage_type: @config["storage_type"] - } - if @config["source"].region and @config['region'] != @config["source"].region - read_replica_params[:source_db_instance_identifier] = MU::Cloud::AWS::Database.getARN(@config["source"].id, "db", "rds", region: @config["source"].region, credentials: @config['credentials']) - end - - read_replica_params[:port] = @config["port"] if @config["port"] - read_replica_params[:iops] = @config["iops"] if @config['storage_type'] == "io1" - end - - MU.retrier([Aws::RDS::Errors::InvalidParameterValue], max: 5, wait: 10) { - if %w{existing_snapshot new_snapshot}.include?(@config["creation_style"]) - MU.log "Creating database instance #{@cloud_id} from snapshot #{@config["snapshot_id"]}" - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).restore_db_instance_from_db_snapshot(params) - elsif @config["creation_style"] == "point_in_time" - MU.log "Creating database instance #{@cloud_id} based on point in time backup #{@config['restore_time']} of #{@config['source'].id}" - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).restore_db_instance_to_point_in_time(point_in_time_params) - elsif @config["read_replica_of"] - MU.log "Creating read replica database instance #{@cloud_id} for #{@config['source'].id}" - begin - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_instance_read_replica(read_replica_params) - rescue Aws::RDS::Errors::DBSubnetGroupNotAllowedFault => e - MU.log "Being forced to use source database's subnet group: #{e.message}", MU::WARN - read_replica_params.delete(:db_subnet_group_name) - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_instance_read_replica(read_replica_params) - end - elsif @config["creation_style"] == "new" - MU.log "Creating pristine database instance #{@cloud_id} (#{@config['name']}) in #{@config['region']}" - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_instance(params) - end - } - - # Sit on our hands until the instance shows as available - MU.retrier(wait: 10, max: 360, loop_if: Proc.new { cloud_desc(use_cache: false).db_instance_status != "available" }) { |retries, _wait| - if retries > 0 and retries % 20 == 0 - MU.log "Waiting for RDS database #{@cloud_id} to be ready...", MU::NOTICE - end - } - - MU::Cloud::AWS::DNSZone.genericMuDNSEntry(name: cloud_desc.db_instance_identifier, target: "#{cloud_desc.endpoint.address}.", cloudclass: MU::Cloud::Database, sync_wait: @config['dns_sync_wait']) - MU.log "Database #{@config['name']} is at #{cloud_desc.endpoint.address}", MU::SUMMARY - if @config['auth_vault'] - MU.log "knife vault show #{@config['auth_vault']['vault']} #{@config['auth_vault']['item']} for Database #{@config['name']} credentials", MU::SUMMARY - end - - # If referencing an existing DB, insert this deploy's DB security group so it can access the thing - if @config["creation_style"] == 'existing' - vpc_sg_ids = cloud_desc.vpc_security_groups.map { |sg| sg.vpc_security_group_id } - - localdeploy_rule = @deploy.findLitterMate(type: "firewall_rule", name: "database"+@config['name']) - if localdeploy_rule.nil? - raise MU::MuError, "Database #{@config['name']} failed to find its generic security group 'database#{@config['name']}'" - end - MU.log "Found this deploy's DB security group: #{localdeploy_rule.cloud_id}", MU::DEBUG - vpc_sg_ids << localdeploy_rule.cloud_id - mod_config = Hash.new - mod_config[:vpc_security_group_ids] = vpc_sg_ids - mod_config[:db_instance_identifier] = @cloud_id - - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_instance(mod_config) - MU.log "Modified database #{@cloud_id} with new security groups: #{mod_config}", MU::NOTICE - end - - # When creating from a snapshot, some of the create arguments aren't - # applicable- but we can apply them after the fact with a modify. - if %w{existing_snapshot new_snapshot point_in_time}.include?(@config["creation_style"]) or @config["read_replica_of"] - mod_config = { - db_instance_identifier: @cloud_id, - vpc_security_group_ids: @config["vpc_security_group_ids"], - apply_immediately: true - } - if !@config["read_replica_of"] - mod_config[:preferred_backup_window] = @config["preferred_backup_window"] - mod_config[:backup_retention_period] = @config["backup_retention_period"] - mod_config[:engine_version] = @config["engine_version"] - mod_config[:allow_major_version_upgrade] = @config["allow_major_version_upgrade"] if @config['allow_major_version_upgrade'] - mod_config[:db_parameter_group_name] = @config["parameter_group_name"] if @config["parameter_group_name"] - mod_config[:master_user_password] = @config['password'] - mod_config[:allocated_storage] = @config["storage"] if @config["storage"] - end - if @config["preferred_maintenance_window"] - mod_config[:preferred_maintenance_window] = @config["preferred_maintenance_window"] - end - - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_instance(mod_config) - - MU.retrier(wait: 10, max: 240, loop_if: Proc.new { cloud_desc(use_cache: false).db_instance_status != "available" }) { |retries, _wait| - if retries > 0 and retries % 10 == 0 - MU.log "Waiting for modifications on RDS database #{@cloud_id}...", MU::NOTICE - end - } - - end - - # Maybe wait for DB instance to be in available state. DB should still be writeable at this state - if @config['allow_major_version_upgrade'] && @config["creation_style"] == "new" - MU.log "Setting major database version upgrade on #{@cloud_id}'" - - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_instance( - db_instance_identifier: @cloud_id, - apply_immediately: true, - allow_major_version_upgrade: true - ) - end - - MU.log "Database #{@config['name']} (#{@mu_name}) is ready to use" - @cloud_id + params end # Create the database cluster described in this instance @@ -1316,6 +1154,198 @@ def add_cluster_node createDb end + # creation_style = new, existing, new_snapshot, existing_snapshot + def create_basic + params = genericParams + params[:preferred_backup_window] = @config["preferred_backup_window"] + params[:backup_retention_period] = @config["backup_retention_period"] + params[:storage_encrypted] = @config["storage_encrypted"] + params[:allocated_storage] = @config["storage"] + params[:db_name] = @config["db_name"] + params[:master_username] = @config['master_user'] + params[:master_user_password] = @config['password'] + params[:vpc_security_group_ids] = @config["vpc_security_group_ids"] + params[:engine_version] = @config["engine_version"] + params[:preferred_maintenance_window] = @config["preferred_maintenance_window"] if @config["preferred_maintenance_window"] + params[:db_parameter_group_name] = @config["parameter_group_name"] if @config["parameter_group_name"] + + if @config['add_cluster_node'] + params[:db_cluster_identifier] = @config["cluster_identifier"] + else + params[:storage_type] = @config["storage_type"] + params[:port] = @config["port"] if @config["port"] + params[:iops] = @config["iops"] if @config['storage_type'] == "io1" + params[:multi_az] = @config['multi_az_on_create'] + end + + MU.retrier([Aws::RDS::Errors::InvalidParameterValue], max: 5, wait: 10) { + if %w{existing_snapshot new_snapshot}.include?(@config["creation_style"]) + MU.log "Creating database instance #{@cloud_id} from snapshot #{@config["snapshot_id"]}" + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).restore_db_instance_from_db_snapshot(params) + else + MU.log "Creating pristine database instance #{@cloud_id} (#{@config['name']}) in #{@config['region']}" + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_instance(params) + end + } + end + + # creation_style = point_in_time + def create_point_in_time + @config["source"].kitten(@deploy, debug: true) + if !@config["source"].id + MU.log "Database '#{@config['name']}' couldn't resolve cloud id for source database", MU::ERR, details: @config["source"].to_h + raise MuError, "Database '#{@config['name']}' couldn't resolve cloud id for source database" + end + + params = genericParams + params.delete(:db_instance_identifier) + params[:source_db_instance_identifier] = @config["source"].id + params[:target_db_instance_identifier] = @cloud_id + params[:restore_time] = @config['restore_time'] unless @config["restore_time"] == "latest" + params[:use_latest_restorable_time] = true if @config['restore_time'] == "latest" + + MU.retrier([Aws::RDS::Errors::InvalidParameterValue], max: 5, wait: 10) { + MU.log "Creating database instance #{@cloud_id} based on point in time backup #{@config['restore_time']} of #{@config['source'].id}" + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).restore_db_instance_to_point_in_time(params) + } + end + + # creation_style = new, existing and read_replica_of is not nil + def create_read_replica + @config["source"].kitten(@deploy, debug: true) + if !@config["source"].id + MU.log "Database '#{@config['name']}' couldn't resolve cloud id for source database", MU::ERR, details: @config["source"].to_h + raise MuError, "Database '#{@config['name']}' couldn't resolve cloud id for source database" + end + + params = { + db_instance_identifier: @cloud_id, + source_db_instance_identifier: @config["source"].id, + db_instance_class: @config["size"], + auto_minor_version_upgrade: @config["auto_minor_version_upgrade"], + publicly_accessible: @config["publicly_accessible"], + tags: allTags, + db_subnet_group_name: @config["subnet_group_name"], + storage_type: @config["storage_type"] + } + if @config["source"].region and @config['region'] != @config["source"].region + params[:source_db_instance_identifier] = MU::Cloud::AWS::Database.getARN(@config["source"].id, "db", "rds", region: @config["source"].region, credentials: @config['credentials']) + end + + params[:port] = @config["port"] if @config["port"] + params[:iops] = @config["iops"] if @config['storage_type'] == "io1" + + on_retry = Proc.new { |e| + if e.class == Aws::RDS::Errors::DBSubnetGroupNotAllowedFault + MU.log "Being forced to use source database's subnet group: #{e.message}", MU::WARN + params.delete(:db_subnet_group_name) + end + } + + MU.retrier([Aws::RDS::Errors::InvalidParameterValue, Aws::RDS::Errors::DBSubnetGroupNotAllowedFault], max: 5, wait: 10, on_retry: on_retry) { + MU.log "Creating read replica database instance #{@cloud_id} for #{@config['source'].id}" + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_instance_read_replica(params) + } + end + + # Sit on our hands until we show as available + def wait_until_available + MU.retrier(wait: 10, max: 360, loop_if: Proc.new { cloud_desc(use_cache: false).db_instance_status != "available" }) { |retries, _wait| + if retries > 0 and retries % 20 == 0 + MU.log "Waiting for RDS database #{@cloud_id} to be ready...", MU::NOTICE + end + } + end + + def do_naming + MU::Cloud::AWS::DNSZone.genericMuDNSEntry(name: cloud_desc.db_instance_identifier, target: "#{cloud_desc.endpoint.address}.", cloudclass: MU::Cloud::Database, sync_wait: @config['dns_sync_wait']) + MU.log "Database #{@config['name']} is at #{cloud_desc.endpoint.address}", MU::SUMMARY + if @config['auth_vault'] + MU.log "knife vault show #{@config['auth_vault']['vault']} #{@config['auth_vault']['item']} for Database #{@config['name']} credentials", MU::SUMMARY + end + end + + # Create a plain database instance or read replica, as described in our + # +@config+. + # @return [String]: The cloud provider's identifier for this database instance. + def createDb + + if @config['creation_style'] == "point_in_time" + create_point_in_time + elsif @config['read_replica_of'] + create_read_replica + else + create_basic + end + + wait_until_available + do_naming + + # If referencing an existing DB, insert this deploy's DB security group so it can access the thing + if @config["creation_style"] == 'existing' + vpc_sg_ids = cloud_desc.vpc_security_groups.map { |sg| sg.vpc_security_group_id } + + localdeploy_rule = @deploy.findLitterMate(type: "firewall_rule", name: "database"+@config['name']) + if localdeploy_rule.nil? + raise MU::MuError, "Database #{@config['name']} failed to find its generic security group 'database#{@config['name']}'" + end + MU.log "Found this deploy's DB security group: #{localdeploy_rule.cloud_id}", MU::DEBUG + vpc_sg_ids << localdeploy_rule.cloud_id + mod_config = Hash.new + mod_config[:vpc_security_group_ids] = vpc_sg_ids + mod_config[:db_instance_identifier] = @cloud_id + + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_instance(mod_config) + MU.log "Modified database #{@cloud_id} with new security groups: #{mod_config}", MU::NOTICE + end + + # When creating from a snapshot or replicating an existing database, + # some of the create arguments that we'd want to carry over aren't + # applicable- but we can apply them after the fact with a modify. + if %w{existing_snapshot new_snapshot point_in_time}.include?(@config["creation_style"]) or @config["read_replica_of"] + mod_config = { + db_instance_identifier: @cloud_id, + vpc_security_group_ids: @config["vpc_security_group_ids"], + apply_immediately: true + } + if !@config["read_replica_of"] + mod_config[:preferred_backup_window] = @config["preferred_backup_window"] + mod_config[:backup_retention_period] = @config["backup_retention_period"] + mod_config[:engine_version] = @config["engine_version"] + mod_config[:allow_major_version_upgrade] = @config["allow_major_version_upgrade"] if @config['allow_major_version_upgrade'] + mod_config[:db_parameter_group_name] = @config["parameter_group_name"] if @config["parameter_group_name"] + mod_config[:master_user_password] = @config['password'] + mod_config[:allocated_storage] = @config["storage"] if @config["storage"] + end + if @config["preferred_maintenance_window"] + mod_config[:preferred_maintenance_window] = @config["preferred_maintenance_window"] + end + + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_instance(mod_config) + + MU.retrier(wait: 10, max: 240, loop_if: Proc.new { cloud_desc(use_cache: false).db_instance_status != "available" }) { |retries, _wait| + if retries > 0 and retries % 10 == 0 + MU.log "Waiting for modifications on RDS database #{@cloud_id}...", MU::NOTICE + end + } + + end + + # Maybe wait for DB instance to be in available state. DB should still be writeable at this state + if @config['allow_major_version_upgrade'] && @config["creation_style"] == "new" + MU.log "Setting major database version upgrade on #{@cloud_id}'" + + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_instance( + db_instance_identifier: @cloud_id, + apply_immediately: true, + allow_major_version_upgrade: true + ) + end + + MU.log "Database #{@config['name']} (#{@mu_name}) is ready to use" + @cloud_id + end + def run_sql_commands MU.log "Running initial SQL commands on #{@config['name']}", details: @config['run_sql_on_deploy'] From d6a16f4360da4f4df08e1d7ce636406a094603c3 Mon Sep 17 00:00:00 2001 From: John Stange Date: Tue, 24 Mar 2020 12:21:29 -0400 Subject: [PATCH 012/124] AWS::Database: initial pass on refactoring the house of horrors that is createDbCluster --- modules/mu/clouds/aws/database.rb | 222 ++++++++++++++---------------- 1 file changed, 103 insertions(+), 119 deletions(-) diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index a84fb1bc4..e826a9ef7 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -1,4 +1,4 @@ -# Copyright:: Copyright (c) 2014 eGlobalTech, Inc., all rights reserved +## Copyright:: Copyright (c) 2014 eGlobalTech, Inc., all rights reserved # # Licensed under the BSD-3 license (the "License"); # you may not use this file except in compliance with the License. @@ -188,20 +188,49 @@ def getPassword end def genericParams - params = { - db_instance_identifier: @cloud_id, - db_instance_class: @config["size"], - engine: @config["engine"], - auto_minor_version_upgrade: @config["auto_minor_version_upgrade"], - license_model: @config["license_model"], - db_subnet_group_name: @config["subnet_group_name"], - publicly_accessible: @config["publicly_accessible"], - copy_tags_to_snapshot: true, - tags: allTags - } + params = if @config['create_cluster'] + paramhash = { + db_cluster_identifier: @cloud_id, + engine: @config["engine"], + db_subnet_group_name: @config["subnet_group_name"].downcase, + vpc_security_group_ids: @config["vpc_security_group_ids"], + tags: allTags + } + if @config['cloudwatch_logs'] + paramhash[:enable_cloudwatch_logs_exports ] = @config['cloudwatch_logs'] + end + if @config['cluster_mode'] + paramhash[:engine_mode] = @config['cluster_mode'] + if @config['cluster_mode'] == "serverless" + paramhash[:scaling_configuration] = { + :auto_pause => @config['serverless_scaling']['auto_pause'], + :min_capacity => @config['serverless_scaling']['min_capacity'], + :max_capacity => @config['serverless_scaling']['max_capacity'], + :seconds_until_auto_pause => @config['serverless_scaling']['seconds_until_auto_pause'] + } + end + end + paramhash + else + { + db_instance_identifier: @cloud_id, + db_instance_class: @config["size"], + engine: @config["engine"], + auto_minor_version_upgrade: @config["auto_minor_version_upgrade"], + license_model: @config["license_model"], + db_subnet_group_name: @config["subnet_group_name"], + publicly_accessible: @config["publicly_accessible"], + copy_tags_to_snapshot: true, + tags: allTags + } + end if %w{existing_snapshot new_snapshot}.include?(@config["creation_style"]) - params[:db_snapshot_identifier] = @config["snapshot_id"] + if @config['create_cluster'] + params[:snapshot_identifier] = @config["snapshot_id"] + else + params[:db_snapshot_identifier] = @config["snapshot_id"] + end end params @@ -210,92 +239,17 @@ def genericParams # Create the database cluster described in this instance # @return [String]: The cloud provider's identifier for this database cluster. def createDbCluster - cluster_config_struct = { - db_cluster_identifier: @config['identifier'], - # downcasing @config["subnet_group_name"] becuase the API is choking on upper case. - db_subnet_group_name: @config["subnet_group_name"].downcase, - vpc_security_group_ids: @config["vpc_security_group_ids"], - tags: allTags - } - cluster_config_struct[:port] = @config["port"] if @config["port"] - - if @config['cluster_mode'] - cluster_config_struct[:engine_mode] = @config['cluster_mode'] - if @config['cluster_mode'] == "serverless" - cluster_config_struct[:scaling_configuration] = { - :auto_pause => @config['serverless_scaling']['auto_pause'], - :min_capacity => @config['serverless_scaling']['min_capacity'], - :max_capacity => @config['serverless_scaling']['max_capacity'], - :seconds_until_auto_pause => @config['serverless_scaling']['seconds_until_auto_pause'] - } - end - end - - if %w{existing_snapshot new_snapshot}.include?(@config["creation_style"]) - cluster_config_struct[:snapshot_identifier] = @config["snapshot_id"] - cluster_config_struct[:engine] = @config["engine"] - cluster_config_struct[:engine_version] = @config["engine_version"] - cluster_config_struct[:database_name] = @cloud_id - end - - if @config["creation_style"] == "new" - cluster_config_struct[:backup_retention_period] = @config["backup_retention_period"] - cluster_config_struct[:database_name] = @cloud_id - cluster_config_struct[:db_cluster_parameter_group_name] = @config["parameter_group_name"] - cluster_config_struct[:engine] = @config["engine"] - cluster_config_struct[:engine_version] = @config["engine_version"] - cluster_config_struct[:master_username] = @config["master_user"] - cluster_config_struct[:master_user_password] = @config["password"] - cluster_config_struct[:preferred_backup_window] = @config["preferred_backup_window"] - cluster_config_struct[:preferred_maintenance_window] = @config["preferred_maintenance_window"] - end - - if @config["creation_style"] == "point_in_time" - cluster_config_struct[:source_db_cluster_identifier] = @config["source"].id - cluster_config_struct[:restore_to_time] = @config["restore_time"] unless @config["restore_time"] == "latest" - cluster_config_struct[:use_latest_restorable_time] = true if @config["restore_time"] == "latest" - end - - if @config['cloudwatch_logs'] - cluster_config_struct[:enable_cloudwatch_logs_exports ] = @config['cloudwatch_logs'] - end - - attempts = 0 - begin - if @config["creation_style"] == "new" - MU.log "Creating new database cluster #{@config['identifier']}" - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_cluster(cluster_config_struct) - elsif %w{existing_snapshot new_snapshot}.include?(@config["creation_style"]) - MU.log "Creating new database cluster #{@config['identifier']} from snapshot #{@config["snapshot_id"]}" - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).restore_db_cluster_from_snapshot(cluster_config_struct) - elsif @config["creation_style"] == "point_in_time" - MU.log "Creating new database cluster #{@config['identifier']} from point in time backup #{@config["restore_time"]} of #{@config["source"].id}" - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).restore_db_cluster_to_point_in_time(cluster_config_struct) - end - rescue Aws::RDS::Errors::InvalidParameterValue => e - if attempts < 5 - MU.log "Got #{e.inspect} while creating database cluster #{@config['identifier']}, will retry a few times in case of transient errors.", MU::WARN, details: cluster_config_struct - attempts += 1 - sleep 10 - retry - else - MU.log "Exhausted retries trying to create database cluster #{@config['identifier']}", MU::ERR, details: e.inspect - raise MuError, "Exhausted retries trying to create database cluster #{@config['identifier']}" - end + if @config['creation_style'] == "point_in_time" + create_point_in_time + else + create_basic end - attempts = 0 - loop do - MU.log "Waiting for #{@config['identifier']} to become available", MU::NOTICE if attempts % 5 == 0 - attempts += 1 - cluster = MU::Cloud::AWS::Database.getDatabaseClusterById(@config['identifier'], region: @config['region'], credentials: @config['credentials']) - break unless cluster.status != "available" - sleep 30 - end + wait_until_available if %w{existing_snapshot new_snapshot point_in_time}.include?(@config["creation_style"]) modify_db_cluster_struct = { - db_cluster_identifier: @config['identifier'], + db_cluster_identifier: @cloud_id, apply_immediately: true, backup_retention_period: @config["backup_retention_period"], db_cluster_parameter_group_name: @config["parameter_group_name"], @@ -306,19 +260,15 @@ def createDbCluster modify_db_cluster_struct[:preferred_maintenance_window] = @config["preferred_maintenance_window"] if @config["preferred_maintenance_window"] MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_cluster(modify_db_cluster_struct) - attempts = 0 - loop do - MU.log "Waiting for #{@config['identifier']} to become available", MU::NOTICE if attempts % 5 == 0 - attempts += 1 - cluster = MU::Cloud::AWS::Database.getDatabaseClusterById(@config['identifier'], region: @config['region'], credentials: @config['credentials']) - break unless cluster.status != "available" - sleep 30 - end + MU.retrier(wait: 10, max: 240, loop_if: Proc.new { cloud_desc(use_cache: false).status != "available" }) { |retries, _wait| + if retries > 0 and retries % 10 == 0 + MU.log "Waiting for modifications on RDS cluster #{@cloud_id}...", MU::NOTICE + end + } end - cluster = MU::Cloud::AWS::Database.getDatabaseClusterById(@config['identifier'], region: @config['region'], credentials: @config['credentials']) - MU::Cloud::AWS::DNSZone.genericMuDNSEntry(name: cluster.db_cluster_identifier, target: "#{cluster.endpoint}.", cloudclass: MU::Cloud::Database, sync_wait: @config['dns_sync_wait']) - return cluster.db_cluster_identifier + do_naming + @cloud_id end # Create a subnet group for a database. @@ -1161,13 +1111,19 @@ def create_basic params[:backup_retention_period] = @config["backup_retention_period"] params[:storage_encrypted] = @config["storage_encrypted"] params[:allocated_storage] = @config["storage"] - params[:db_name] = @config["db_name"] params[:master_username] = @config['master_user'] params[:master_user_password] = @config['password'] params[:vpc_security_group_ids] = @config["vpc_security_group_ids"] params[:engine_version] = @config["engine_version"] params[:preferred_maintenance_window] = @config["preferred_maintenance_window"] if @config["preferred_maintenance_window"] - params[:db_parameter_group_name] = @config["parameter_group_name"] if @config["parameter_group_name"] + + if @config['create_cluster'] + params[:database_name] = @cloud_id + params[:db_cluster_parameter_group_name] = @config["parameter_group_name"] + else + params[:db_name] = @config["db_name"] + params[:db_parameter_group_name] = @config["parameter_group_name"] if @config["parameter_group_name"] + end if @config['add_cluster_node'] params[:db_cluster_identifier] = @config["cluster_identifier"] @@ -1180,11 +1136,19 @@ def create_basic MU.retrier([Aws::RDS::Errors::InvalidParameterValue], max: 5, wait: 10) { if %w{existing_snapshot new_snapshot}.include?(@config["creation_style"]) - MU.log "Creating database instance #{@cloud_id} from snapshot #{@config["snapshot_id"]}" - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).restore_db_instance_from_db_snapshot(params) + MU.log "Creating database #{@config['create_cluster'] ? "cluster" : "instance" } #{@cloud_id} from snapshot #{@config["snapshot_id"]}" + if @config['create_cluster'] + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).restore_db_cluster_from_snapshot(params) + else + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).restore_db_instance_from_db_snapshot(params) + end else - MU.log "Creating pristine database instance #{@cloud_id} (#{@config['name']}) in #{@config['region']}" - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_instance(params) + MU.log "Creating pristine database #{@config['create_cluster'] ? "cluster" : "instance" } #{@cloud_id} (#{@config['name']}) in #{@config['region']}" + if @config['create_cluster'] + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_instance(params) + else + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_cluster(params) + end end } end @@ -1199,14 +1163,24 @@ def create_point_in_time params = genericParams params.delete(:db_instance_identifier) - params[:source_db_instance_identifier] = @config["source"].id - params[:target_db_instance_identifier] = @cloud_id + if @config['create_cluster'] + params[:source_db_cluster_identifier] = @config["source"].id + params[:restore_to_time] = @config["restore_time"] unless @config["restore_time"] == "latest" + else + params[:source_db_instance_identifier] = @config["source"].id + params[:target_db_instance_identifier] = @cloud_id + end params[:restore_time] = @config['restore_time'] unless @config["restore_time"] == "latest" params[:use_latest_restorable_time] = true if @config['restore_time'] == "latest" + MU.retrier([Aws::RDS::Errors::InvalidParameterValue], max: 5, wait: 10) { - MU.log "Creating database instance #{@cloud_id} based on point in time backup #{@config['restore_time']} of #{@config['source'].id}" - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).restore_db_instance_to_point_in_time(params) + MU.log "Creating database #{@config['create_cluster'] ? "cluster" : "instance" } #{@cloud_id} based on point in time backup #{@config['restore_time']} of #{@config['source'].id}" + if @config['create_cluster'] + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).restore_db_cluster_to_point_in_time(params) + else + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).restore_db_instance_to_point_in_time(params) + end } end @@ -1250,16 +1224,26 @@ def create_read_replica # Sit on our hands until we show as available def wait_until_available - MU.retrier(wait: 10, max: 360, loop_if: Proc.new { cloud_desc(use_cache: false).db_instance_status != "available" }) { |retries, _wait| + loop_if = if @config["create_cluster"] + Proc.new { cloud_desc(use_cache: false).status != "available" } + else + Proc.new { cloud_desc(use_cache: false).db_instance_status != "available" } + end + MU.retrier(wait: 10, max: 360, loop_if: loop_if) { |retries, _wait| if retries > 0 and retries % 20 == 0 - MU.log "Waiting for RDS database #{@cloud_id} to be ready...", MU::NOTICE + MU.log "Waiting for RDS #{@config['create_cluster'] ? "cluster" : "database" } #{@cloud_id} to be ready...", MU::NOTICE end } end def do_naming - MU::Cloud::AWS::DNSZone.genericMuDNSEntry(name: cloud_desc.db_instance_identifier, target: "#{cloud_desc.endpoint.address}.", cloudclass: MU::Cloud::Database, sync_wait: @config['dns_sync_wait']) - MU.log "Database #{@config['name']} is at #{cloud_desc.endpoint.address}", MU::SUMMARY + if @config["create_cluster"] + MU::Cloud::AWS::DNSZone.genericMuDNSEntry(name: cloud_desc.db_cluster_identifier, target: "#{cloud_desc.endpoint}.", cloudclass: MU::Cloud::Database, sync_wait: @config['dns_sync_wait']) + MU.log "Database cluster #{@config['name']} is at #{cloud_desc.endpoint}", MU::SUMMARY + else + MU::Cloud::AWS::DNSZone.genericMuDNSEntry(name: cloud_desc.db_instance_identifier, target: "#{cloud_desc.endpoint.address}.", cloudclass: MU::Cloud::Database, sync_wait: @config['dns_sync_wait']) + MU.log "Database #{@config['name']} is at #{cloud_desc.endpoint.address}", MU::SUMMARY + end if @config['auth_vault'] MU.log "knife vault show #{@config['auth_vault']['vault']} #{@config['auth_vault']['item']} for Database #{@config['name']} credentials", MU::SUMMARY end From 4b707c13cdd9c46cbec55411f5eb67bdc59625a2 Mon Sep 17 00:00:00 2001 From: John Stange Date: Tue, 24 Mar 2020 15:55:30 -0400 Subject: [PATCH 013/124] AWS::Database: cluster builds in working order --- modules/mu/clouds/aws/database.rb | 27 ++++++++++++++++----------- modules/mu/config/database.rb | 1 + 2 files changed, 17 insertions(+), 11 deletions(-) diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index e826a9ef7..facbed4ec 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -239,6 +239,8 @@ def genericParams # Create the database cluster described in this instance # @return [String]: The cloud provider's identifier for this database cluster. def createDbCluster + @config["cluster_identifier"] ||= @cloud_id + if @config['creation_style'] == "point_in_time" create_point_in_time else @@ -1107,31 +1109,34 @@ def add_cluster_node # creation_style = new, existing, new_snapshot, existing_snapshot def create_basic params = genericParams - params[:preferred_backup_window] = @config["preferred_backup_window"] - params[:backup_retention_period] = @config["backup_retention_period"] params[:storage_encrypted] = @config["storage_encrypted"] - params[:allocated_storage] = @config["storage"] - params[:master_username] = @config['master_user'] params[:master_user_password] = @config['password'] params[:vpc_security_group_ids] = @config["vpc_security_group_ids"] params[:engine_version] = @config["engine_version"] params[:preferred_maintenance_window] = @config["preferred_maintenance_window"] if @config["preferred_maintenance_window"] if @config['create_cluster'] - params[:database_name] = @cloud_id - params[:db_cluster_parameter_group_name] = @config["parameter_group_name"] + params[:database_name] = @config["db_name"] + params[:db_cluster_parameter_group_name] = @config["parameter_group_name"] if @config["parameter_group_name"] else - params[:db_name] = @config["db_name"] + params[:db_name] = @config["db_name"] if !@config['add_cluster_node'] params[:db_parameter_group_name] = @config["parameter_group_name"] if @config["parameter_group_name"] end - if @config['add_cluster_node'] + if @config['create_cluster'] or @config['add_cluster_node'] params[:db_cluster_identifier] = @config["cluster_identifier"] else params[:storage_type] = @config["storage_type"] + params[:allocated_storage] = @config["storage"] + params[:multi_az] = @config['multi_az_on_create'] + end + + if !@config['add_cluster_node'] + params[:backup_retention_period] = @config["backup_retention_period"] + params[:preferred_backup_window] = @config["preferred_backup_window"] + params[:master_username] = @config['master_user'] params[:port] = @config["port"] if @config["port"] params[:iops] = @config["iops"] if @config['storage_type'] == "io1" - params[:multi_az] = @config['multi_az_on_create'] end MU.retrier([Aws::RDS::Errors::InvalidParameterValue], max: 5, wait: 10) { @@ -1145,9 +1150,9 @@ def create_basic else MU.log "Creating pristine database #{@config['create_cluster'] ? "cluster" : "instance" } #{@cloud_id} (#{@config['name']}) in #{@config['region']}" if @config['create_cluster'] - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_instance(params) - else MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_cluster(params) + else + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_instance(params) end end } diff --git a/modules/mu/config/database.rb b/modules/mu/config/database.rb index a1f8ab0e7..548aee6af 100644 --- a/modules/mu/config/database.rb +++ b/modules/mu/config/database.rb @@ -358,6 +358,7 @@ def self.validate(db, configurator) # duplicating the declaration of the master as a new first-class # resource and tweaking it. if db["create_cluster"] and db['cluster_mode'] != "serverless" + db["add_cluster_node"] = false (1..db["cluster_node_count"]).each{ |num| node = Marshal.load(Marshal.dump(db)) node["name"] = "#{db['name']}-#{num}" From 3f88398d8a9c99cbe9b3b8df188c252d7c7caf42 Mon Sep 17 00:00:00 2001 From: John Stange Date: Tue, 24 Mar 2020 22:53:26 -0400 Subject: [PATCH 014/124] AWS::Database: refactor .cleanup some more --- modules/mu/clouds/aws/database.rb | 224 ++++++++++++++---------------- modules/tests/rds.yaml | 12 +- 2 files changed, 109 insertions(+), 127 deletions(-) diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index facbed4ec..33f31a764 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -634,6 +634,28 @@ def self.quality MU::Cloud::RELEASE end + # @return [Array] + def self.threaded_resource_purge(describe_method, list_method, id_method, arn_type, region, credentials, ignoremaster, deletion_proc) + deletia = [] + resp = MU::Cloud::AWS.rds(credentials: credentials, region: region).send(describe_method) + resp.send(list_method).each { |resource| + arn = MU::Cloud::AWS::Database.getARN(resource.send(id_method), arn_type, "rds", region: region, credentials: credentials) + tags = MU::Cloud::AWS.rds(credentials: credentials, region: region).list_tags_for_resource(resource_name: arn).tag_list + + if should_delete?(tags, ignoremaster) + deletia << resource.send(id_method) + end + } + + threads = [] + deletia.each { |id| + threads << Thread.new(id) { |resource_id| + deletion_proc.call(resource_id) + } + } + + threads + end # Called by {MU::Cleanup}. Locates resources that were created by the # currently-loaded deployment, and purges them. @@ -643,82 +665,35 @@ def self.quality # @return [void] def self.cleanup(noop: false, ignoremaster: false, credentials: nil, region: MU.curRegion, flags: {}) - resp = MU::Cloud::AWS.rds(credentials: credentials, region: region).describe_db_instances - - threads = [] - resp.db_instances.each { |db| - arn = MU::Cloud::AWS::Database.getARN(db.db_instance_identifier, "db", "rds", region: region, credentials: credentials) - tags = MU::Cloud::AWS.rds(credentials: credentials, region: region).list_tags_for_resource(resource_name: arn).tag_list - - if should_delete?(tags, ignoremaster) - threads << Thread.new(db) { |mydb| - terminate_rds_instance(mydb, noop: noop, skipsnapshots: flags["skipsnapshots"], region: region, deploy_id: MU.deploy_id, cloud_id: db.db_instance_identifier, mu_name: db.db_instance_identifier.upcase, credentials: credentials) - } - end + delete_dbs = Proc.new { |id| + terminate_rds_instance(nil, noop: noop, skipsnapshots: flags["skipsnapshots"], region: region, deploy_id: MU.deploy_id, cloud_id: id, mu_name: id.upcase, credentials: credentials) } - threads.each { |t| + + threaded_resource_purge(:describe_db_instances, :db_instances, :db_instance_identifier, "db", region, credentials, ignoremaster, delete_dbs).each { |t| t.join } - # Cleanup database clusters - threads = [] - resp = MU::Cloud::AWS.rds(credentials: credentials, region: region).describe_db_clusters - resp.db_clusters.each { |cluster| - cluster_id = cluster.db_cluster_identifier - arn = MU::Cloud::AWS::Database.getARN(cluster_id, "cluster", "rds", region: region, credentials: credentials) - tags = MU::Cloud::AWS.rds(credentials: credentials, region: region).list_tags_for_resource(resource_name: arn).tag_list - - if should_delete?(tags, ignoremaster) - threads << Thread.new(cluster) { |mydbcluster| - terminate_rds_cluster(mydbcluster, noop: noop, skipsnapshots: flags["skipsnapshots"], region: region, deploy_id: MU.deploy_id, cloud_id: cluster_id, mu_name: cluster_id.upcase, credentials: credentials) - } - end + delete_clusters = Proc.new { |id| + terminate_rds_cluster(nil, noop: noop, skipsnapshots: flags["skipsnapshots"], region: region, deploy_id: MU.deploy_id, cloud_id: id, mu_name: id.upcase, credentials: credentials) } - - # Wait for all of the database clusters to finish cleanup before proceeding - threads.each { |t| + threaded_resource_purge(:describe_db_clusters, :db_clusters, :db_cluster_identifier, "cluster", region, credentials, ignoremaster, delete_clusters).each { |t| t.join } - threads = [] - # Cleanup database subnet group - MU::Cloud::AWS.rds(credentials: credentials, region: region).describe_db_subnet_groups.db_subnet_groups.each { |sub_group| - sub_group_id = sub_group.db_subnet_group_name - arn = MU::Cloud::AWS::Database.getARN(sub_group_id, "subgrp", "rds", region: region, credentials: credentials) - tags = MU::Cloud::AWS.rds(credentials: credentials, region: region).list_tags_for_resource(resource_name: arn).tag_list - - if should_delete?(tags, ignoremaster) - threads << Thread.new(sub_group_id) { |mysubgroup| - delete_subnet_group(mysubgroup, region: region) unless noop - } - end + delete_subnet_groups = Proc.new { |id| + delete_subnet_group(id, region: region) unless noop } - - # Cleanup database parameter group - MU::Cloud::AWS.rds(credentials: credentials, region: region).describe_db_parameter_groups.db_parameter_groups.each { |param_group| - param_group_id = param_group.db_parameter_group_name - arn = MU::Cloud::AWS::Database.getARN(param_group_id, "pg", "rds", region: region, credentials: credentials) - tags = MU::Cloud::AWS.rds(credentials: credentials, region: region).list_tags_for_resource(resource_name: arn).tag_list + threads = threaded_resource_purge(:describe_db_subnet_groups, :db_subnet_groups, :db_subnet_group_name, "subgrp", region, credentials, ignoremaster, delete_subnet_groups) - if should_delete?(tags, ignoremaster) - threads << Thread.new(param_group_id) { |myparamgroup| - delete_db_parameter_group(myparamgroup, region: region) unless noop - } - end + delete_parameter_groups = Proc.new { |id| + delete_db_parameter_group(id, region: region) unless noop } - - # Cleanup database cluster parameter group - MU::Cloud::AWS.rds(credentials: credentials, region: region).describe_db_cluster_parameter_groups.db_cluster_parameter_groups.each { |param_group| - param_group_id = param_group.db_cluster_parameter_group_name - arn = MU::Cloud::AWS::Database.getARN(param_group_id, "cluster-pg", "rds", region: region, credentials: credentials) - tags = MU::Cloud::AWS.rds(credentials: credentials, region: region).list_tags_for_resource(resource_name: arn).tag_list + threads.concat threaded_resource_purge(:describe_db_parameter_groups, :db_parameter_groups, :db_parameter_group_name, "pg", region, credentials, ignoremaster, delete_parameter_groups) - if should_delete?(tags, ignoremaster) - threads << Thread.new(param_group_id) { |myparamgroup| - delete_db_cluster_parameter_group(myparamgroup, region: region) unless noop - } - end + delete_cluster_parameter_groups = Proc.new { |id| + delete_db_cluster_parameter_group(id, region: region) unless noop } + threads.concat threaded_resource_purge(:describe_db_cluster_parameter_groups, :db_cluster_parameter_groups, :db_cluster_parameter_group_name, "pg", region, credentials, ignoremaster, delete_cluster_parameter_groups) # Wait for all of the databases subnet/parameter groups to finish cleanup before proceeding threads.each { |t| @@ -1430,18 +1405,22 @@ def self.should_delete?(tags, ignoremaster = false, deploy_id = MU.deploy_id, ma # @param db [OpenStruct]: The cloud provider's description of the database artifact # @return [void] def self.terminate_rds_instance(db, noop: false, skipsnapshots: false, region: MU.curRegion, deploy_id: MU.deploy_id, mu_name: nil, cloud_id: nil, credentials: nil) - - db ||= MU::MommaCat.findStray( + db ||= MU::Cloud::AWS::Database.getDatabaseById(cloud_id, region: region, credentials: credentials) if cloud_id + db_obj ||= MU::MommaCat.findStray( "AWS", "database", region: region, deploy_id: deploy_id, cloud_id: cloud_id, - mu_name: mu_name + mu_name: mu_name, + dummy_ok: true ).first - cloud_id ||= db.cloud_id + if db_obj + cloud_id ||= db_obj.cloud_id + db ||= db_obj.cloud_desc + end - raise MuError, "terminate_rds_instance requires a non-nil database descriptor" if db.nil? + raise MuError, "terminate_rds_instance requires a non-nil database descriptor (#{cloud_id})" if db.nil? rdssecgroups = [] begin @@ -1459,7 +1438,7 @@ def self.terminate_rds_instance(db, noop: false, skipsnapshots: false, region: M } end - MU::Cloud::AWS::DNSZone.genericMuDNSEntry(name: cloud_id, target: db.endpoint.address, cloudclass: MU::Cloud::Database, delete: true) + MU::Cloud::AWS::DNSZone.genericMuDNSEntry(name: cloud_id, target: db.endpoint.address, cloudclass: MU::Cloud::Database, delete: true) if !noop if %w{deleting deleted}.include?(db.db_instance_status) MU.log "#{cloud_id} has already been terminated", MU::WARN @@ -1486,13 +1465,13 @@ def self.terminate_rds_instance(db, noop: false, skipsnapshots: false, region: M MU.retrier([Aws::RDS::Errors::InvalidDBInstanceState, Aws::RDS::Errors::DBSnapshotAlreadyExists], wait: 60, max: 20, on_retry: on_retry) { MU::Cloud::AWS.rds(region: region, credentials: credentials).delete_db_instance(params) } + MU.retrier([], wait: 10, ignoreme: [Aws::RDS::Errors::DBInstanceNotFound]) { + del_db = MU::Cloud::AWS::Database.getDatabaseById(cloud_id, region: region) + break if del_db.nil? or del_db.db_instance_status == "deleted" + } end end - MU.retrier([], wait: 10, ignoreme: [Aws::RDS::Errors::DBInstanceNotFound]) { - del_db = MU::Cloud::AWS::Database.getDatabaseById(cloud_id, region: region) - break if del_db.nil? or del_db.db_instance_status == "deleted" - } # RDS security groups can depend on EC2 security groups, do these last begin @@ -1505,15 +1484,15 @@ def self.terminate_rds_instance(db, noop: false, skipsnapshots: false, region: M # Cleanup the database vault groomer = - if db and db.respond_to?(:config) and db.config - db.config.has_key?("groomer") ? db.config["groomer"] : MU::Config.defaultGroomer + if db_obj and db_obj.respond_to?(:config) and db_obj.config + db_obj.config.has_key?("groomer") ? db_obj.config["groomer"] : MU::Config.defaultGroomer else MU::Config.defaultGroomer end groomclass = MU::Groomer.loadGroomer(groomer) groomclass.deleteSecret(vault: cloud_id.upcase) if !noop - MU.log "#{cloud_id} has been terminated" + MU.log "#{cloud_id} has been terminated" if !noop end private_class_method :terminate_rds_instance @@ -1521,75 +1500,80 @@ def self.terminate_rds_instance(db, noop: false, skipsnapshots: false, region: M # @param cluster [OpenStruct]: The cloud provider's description of the database artifact # @return [void] def self.terminate_rds_cluster(cluster, noop: false, skipsnapshots: false, region: MU.curRegion, deploy_id: MU.deploy_id, mu_name: nil, cloud_id: nil, credentials: nil) - raise MuError, "terminate_rds_cluster requires a non-nil database cluster descriptor" if cluster.nil? - cluster_id = cluster.db_cluster_identifier - cluster_obj = MU::MommaCat.findStray( + cluster ||= MU::Cloud::AWS::Database.getDatabaseClusterById(cloud_id, region: region, credentials: credentials) if cloud_id + cluster_obj ||= MU::MommaCat.findStray( "AWS", "database", region: region, deploy_id: deploy_id, cloud_id: cloud_id, - credentials: credentials, - mu_name: mu_name + mu_name: mu_name, + dummy_ok: true ).first + if cluster_obj + cloud_id ||= cluster_obj.cloud_id + cluster ||= cluster_obj.cloud_desc + end + + raise MuError, "terminate_rds_cluster requires a non-nil database cluster descriptor" if cluster.nil? # We can use an AWS waiter for this. unless cluster.status == "available" loop do - MU.log "Waiting for #{cluster_id} to be in a removable state...", MU::NOTICE - cluster = MU::Cloud::AWS::Database.getDatabaseClusterById(cluster_id, region: region, credentials: credentials) + MU.log "Waiting for #{cloud_id} to be in a removable state...", MU::NOTICE + cluster = MU::Cloud::AWS::Database.getDatabaseClusterById(cloud_id, region: region, credentials: credentials) break unless %w{creating modifying backing-up}.include?(cluster.status) sleep 60 end end - MU::Cloud::AWS::DNSZone.genericMuDNSEntry(name: cluster_id, target: cluster.endpoint, cloudclass: MU::Cloud::Database, delete: true) + MU::Cloud::AWS::DNSZone.genericMuDNSEntry(name: cloud_id, target: cluster.endpoint, cloudclass: MU::Cloud::Database, delete: true) if %w{deleting deleted}.include?(cluster.status) - MU.log "#{cluster_id} has already been terminated", MU::WARN + MU.log "#{cloud_id} has already been terminated", MU::WARN else - unless noop - def self.clusterSkipSnap(cluster_id, region, credentials) - # We're calling this several times so lets declare it once - MU.log "Terminating #{cluster_id}. Not saving final snapshot" - MU::Cloud::AWS.rds(region: region, credentials: credentials).delete_db_cluster(db_cluster_identifier: cluster_id, skip_final_snapshot: true) - end + clusterSkipSnap = Proc.new { + MU.log "Terminating #{cloud_id}. Not saving final snapshot" + MU::Cloud::AWS.rds(region: region, credentials: credentials).delete_db_cluster(db_cluster_identifier: cloud_id, skip_final_snapshot: true) if !noop + } - def self.clusterCreateSnap(cluster_id, region, credentials) - MU.log "Terminating #{cluster_id}. Saving final snapshot: #{cluster_id}-mufinal" - MU::Cloud::AWS.rds(region: region, credentials: credentials).delete_db_cluster(db_cluster_identifier: cluster_id, skip_final_snapshot: false, final_db_snapshot_identifier: "#{cluster_id}-mufinal") + clusterCreateSnap = Proc.new { + MU.log "Terminating #{cloud_id}. Saving final snapshot: #{cloud_id}-mufinal" + MU::Cloud::AWS.rds(region: region, credentials: credentials).delete_db_cluster(db_cluster_identifier: cloud_id, skip_final_snapshot: false, final_db_snapshot_identifier: "#{cloud_id}-mufinal") if !noop + } + + retries = 0 + begin + skipsnapshots ? clusterSkipSnap.call : clusterCreateSnap.call + rescue Aws::RDS::Errors::InvalidDBClusterStateFault => e + if retries < 5 + MU.log "#{cloud_id} is not in a removable state, retrying several times", MU::WARN + retries += 1 + sleep 30 + retry + else + MU.log "#{cloud_id} is not in a removable state after several retries, giving up. #{e.inspect}", MU::ERR end + rescue Aws::RDS::Errors::DBClusterSnapshotAlreadyExistsFault + clusterSkipSnap.call + MU.log "Snapshot of #{cloud_id} already exists", MU::WARN + rescue Aws::RDS::Errors::DBClusterQuotaExceeded + clusterSkipSnap.call + MU.log "Snapshot quota exceeded while deleting #{cloud_id}", MU::ERR + end - retries = 0 - begin - skipsnapshots ? clusterSkipSnap(cluster_id, region, credentials) : clusterCreateSnap(cluster_id, region, credentials) - rescue Aws::RDS::Errors::InvalidDBClusterStateFault => e - if retries < 5 - MU.log "#{cluster_id} is not in a removable state, retrying several times", MU::WARN - retries += 1 - sleep 30 - retry - else - MU.log "#{cluster_id} is not in a removable state after several retries, giving up. #{e.inspect}", MU::ERR - end - rescue Aws::RDS::Errors::DBClusterSnapshotAlreadyExistsFault - clusterSkipSnap(cluster_id, region, credentials) - MU.log "Snapshot of #{cluster_id} already exists", MU::WARN - rescue Aws::RDS::Errors::DBClusterQuotaExceeded - clusterSkipSnap(cluster_id, region, credentials) - MU.log "Snapshot quota exceeded while deleting #{cluster_id}", MU::ERR + unless noop + loop do + MU.log "Waiting for #{cloud_id} to terminate", MU::NOTICE + cluster = MU::Cloud::AWS::Database.getDatabaseClusterById(cloud_id, region: region, credentials: credentials) + break unless cluster + sleep 30 end end end # We're wating until getDatabaseClusterById returns nil. This assumes the database cluster object doesn't linger around in "deleted" state for a while. - loop do - MU.log "Waiting for #{cluster_id} to terminate", MU::NOTICE - cluster = MU::Cloud::AWS::Database.getDatabaseClusterById(cluster_id, region: region, credentials: credentials) - break unless cluster - sleep 30 - end # Cleanup the cluster vault groomer = @@ -1600,9 +1584,9 @@ def self.clusterCreateSnap(cluster_id, region, credentials) end groomclass = MU::Groomer.loadGroomer(groomer) - groomclass.deleteSecret(vault: cluster_id.upcase) if !noop + groomclass.deleteSecret(vault: cloud_id.upcase) if !noop - MU.log "#{cluster_id} has been terminated" + MU.log "#{cloud_id} has been terminated" if !noop end private_class_method :terminate_rds_cluster diff --git a/modules/tests/rds.yaml b/modules/tests/rds.yaml index d901f6ed5..58e499bae 100644 --- a/modules/tests/rds.yaml +++ b/modules/tests/rds.yaml @@ -5,10 +5,8 @@ vpcs: - name: rdstests databases: - name: pgcluster - size: db.m5.large + size: db.t3.medium engine: postgres - engine_version: 9.6.6 - add_cluster_node: true allow_major_version_upgrade: true auto_minor_version_upgrade: false backup_retention_period: 10 @@ -17,8 +15,8 @@ databases: vpc: name: rdstests master_user: Bob -- name: mysql-w-replica - size: db.t2.micro +- name: mysql-base + size: db.t2.small engine: mysql storage: 5 vpc: @@ -26,12 +24,12 @@ databases: create_read_replica: true multi_az_on_create: true master_user: Bob -- name: point-in-time +- name: mysql-point-in-time creation_style: point_in_time size: db.t2.micro engine: mysql storage: 5 source: - name: mysql-w-replica + name: mysql-base vpc: name: rdstests From 5ba9a2506c9f597720e13bc691d1270a24f89655 Mon Sep 17 00:00:00 2001 From: John Stange Date: Tue, 24 Mar 2020 23:08:00 -0400 Subject: [PATCH 015/124] AWS::Database: fork out engine-specific bits of run_sql_commands --- modules/mu/clouds/aws/database.rb | 67 ++++++++++++++++++------------- 1 file changed, 40 insertions(+), 27 deletions(-) diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index 33f31a764..aa9497243 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -1343,33 +1343,10 @@ def run_sql_commands end # Running SQL on deploy - if @config['engine'] == "postgres" - autoload :PG, 'pg' - begin - conn = PG::Connection.new( - :host => address, - :port => port, - :user => @config['master_user'], - :dbname => cloud_desc.db_name, - :password => @config['password'] - ) - @config['run_sql_on_deploy'].each { |cmd| - MU.log "Running #{cmd} on database #{@config['name']}" - conn.exec(cmd) - } - conn.finish - rescue PG::Error => e - MU.log "Failed to run initial SQL commands on #{@config['name']} via #{address}:#{port}: #{e.inspect}", MU::WARN, details: conn - end - elsif @config['engine'] == "mysql" - autoload :Mysql, 'mysql' - MU.log "Initiating mysql connection to #{address}:#{port} as #{@config['master_user']}" - conn = Mysql.new(address, @config['master_user'], @config['password'], "mysql", port) - @config['run_sql_on_deploy'].each { |cmd| - MU.log "Running #{cmd} on database #{@config['name']}" - conn.query(cmd) - } - conn.close + if @config['engine'].match(/postgres/) + MU::Cloud::AWS::Database.run_sql_postgres(address, port, @config['master_user'], @config['password'], cloud_desc.db_name, @config['run_sql_on_deploy'], @config['name']) + elsif @config['engine'].match(/mysql|maria/) + MU::Cloud::AWS::Database.run_sql_mysql(address, port, @config['master_user'], @config['password'], cloud_desc.db_name, @config['run_sql_on_deploy'], @config['name']) end # close the SQL on deploy sessions @@ -1382,6 +1359,42 @@ def run_sql_commands end end + def self.run_sql_postgres(address, port, user, password, db, cmds = [], identifier = nil) + identifier ||= address + MU.log "Initiating postgres connection to #{address}:#{port} as #{user}" + autoload :PG, 'pg' + begin + conn = PG::Connection.new( + :host => address, + :port => port, + :user => user, + :password => password, + :dbname => db + ) + cmds.each { |cmd| + MU.log "Running #{cmd} on database #{identifier}" + conn.exec(cmd) + } + conn.finish + rescue PG::Error => e + MU.log "Failed to run initial SQL commands on #{identifier} via #{address}:#{port}: #{e.inspect}", MU::WARN, details: conn + end + end + private_class_method :run_sql_postgres + + def self.run_sql_mysql(address, port, user, password, db, cmds = [], identifier = nil) + identifier ||= address + autoload :Mysql, 'mysql' + MU.log "Initiating mysql connection to #{address}:#{port} as #{user}" + conn = Mysql.new(address, user, password, db, port) + cmds.each { |cmd| + MU.log "Running #{cmd} on database #{identifier}" + conn.query(cmd) + } + conn.close + end + private_class_method :run_sql_mysql + def self.should_delete?(tags, ignoremaster = false, deploy_id = MU.deploy_id, master_ip = MU.mu_public_ip) found_muid = false found_master = false From 556cd017a199dba5e688360b80ddeb394f801472 Mon Sep 17 00:00:00 2001 From: John Stange Date: Tue, 24 Mar 2020 23:23:55 -0400 Subject: [PATCH 016/124] AWS::Database: rip out some more overlong cleanup code --- modules/mu/clouds/aws/database.rb | 88 +++++++------------------------ 1 file changed, 18 insertions(+), 70 deletions(-) diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index aa9497243..4c23bd612 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -681,17 +681,32 @@ def self.cleanup(noop: false, ignoremaster: false, credentials: nil, region: MU. } delete_subnet_groups = Proc.new { |id| - delete_subnet_group(id, region: region) unless noop + MU.log "Deleting RDS subnet group #{id}" + if !noop + MU.retrier([Aws::RDS::Errors::InvalidDBSubnetGroupStateFault], wait: 30, max: 5, ignoreme: [Aws::RDS::Errors::DBSubnetGroupNotFoundFault]) { + MU::Cloud::AWS.rds(region: region).delete_db_subnet_group(db_subnet_group_name: id) + } + end } threads = threaded_resource_purge(:describe_db_subnet_groups, :db_subnet_groups, :db_subnet_group_name, "subgrp", region, credentials, ignoremaster, delete_subnet_groups) delete_parameter_groups = Proc.new { |id| - delete_db_parameter_group(id, region: region) unless noop + MU.log "Deleting RDS database parameter group #{id}" + if !noop + MU.retrier([Aws::RDS::Errors::InvalidDBParameterGroupState], wait: 30, max: 5, ignoreme: [Aws::RDS::Errors::DBParameterGroupNotFound]) { + MU::Cloud::AWS.rds(region: region).delete_db_parameter_group(db_parameter_group_name: id) + } + end } threads.concat threaded_resource_purge(:describe_db_parameter_groups, :db_parameter_groups, :db_parameter_group_name, "pg", region, credentials, ignoremaster, delete_parameter_groups) delete_cluster_parameter_groups = Proc.new { |id| - delete_db_cluster_parameter_group(id, region: region) unless noop + MU.log "Deleting RDS cluster parameter group #{id}" + if !noop + MU.retrier([Aws::RDS::Errors::InvalidDBParameterGroupState], wait: 30, max: 5, ignoreme: [Aws::RDS::Errors::DBParameterGroupNotFound]) { + MU::Cloud::AWS.rds(region: region).delete_db_cluster_parameter_group(db_cluster_parameter_group_name: id) + } + end } threads.concat threaded_resource_purge(:describe_db_cluster_parameter_groups, :db_cluster_parameter_groups, :db_cluster_parameter_group_name, "pg", region, credentials, ignoremaster, delete_cluster_parameter_groups) @@ -1603,73 +1618,6 @@ def self.terminate_rds_cluster(cluster, noop: false, skipsnapshots: false, regio end private_class_method :terminate_rds_cluster - # Remove a database subnet group. - # @param subnet_group_id [string]: The cloud provider's ID of the database subnet group. - # @param region [String]: The cloud provider's region in which to operate. - # @return [void] - def self.delete_subnet_group(subnet_group_id, region: MU.curRegion) - retries ||= 0 - MU.log "Deleting DB subnet group #{subnet_group_id}" - MU::Cloud::AWS.rds(region: region).delete_db_subnet_group(db_subnet_group_name: subnet_group_id) - rescue Aws::RDS::Errors::DBSubnetGroupNotFoundFault => e - MU.log "DB subnet group #{subnet_group_id} disappeared before we could remove it", MU::WARN - rescue Aws::RDS::Errors::InvalidDBSubnetGroupStateFault=> e - if retries < 5 - MU.log "DB subnet group #{subnet_group_id} is not in a removable state, retrying", MU::WARN - retries += 1 - sleep 30 - retry - else - MU.log "#{subnet_group_id} is not in a removable state after several retries, giving up. #{e.inspect}", MU::ERR - end - end - private_class_method :delete_subnet_group - - # Remove a database parameter group. - # @param parameter_group_id [string]: The cloud provider's ID of the database parameter group. - # @param region [String]: The cloud provider's region in which to operate. - # @return [void] - def self.delete_db_parameter_group(parameter_group_id, region: MU.curRegion) - retries ||= 0 - MU.log "Deleting DB parameter group #{parameter_group_id}" - MU::Cloud::AWS.rds(region: region).delete_db_parameter_group(db_parameter_group_name: parameter_group_id) - rescue Aws::RDS::Errors::DBParameterGroupNotFound - MU.log "DB parameter group #{parameter_group_id} disappeared before we could remove it", MU::WARN - rescue Aws::RDS::Errors::InvalidDBParameterGroupState => e - if retries < 5 - MU.log "DB parameter group #{parameter_group_id} is not in a removable state, retrying", MU::WARN - retries += 1 - sleep 30 - retry - else - MU.log "DB parameter group #{parameter_group_id} is not in a removable state after several retries, giving up. #{e.inspect}", MU::ERR - end - end - private_class_method :delete_db_parameter_group - - # Remove a database cluster parameter group. - # @param parameter_group_id [string]: The cloud provider's ID of the database cluster parameter group. - # @param region [String]: The cloud provider's region in which to operate. - # @return [void] - def self.delete_db_cluster_parameter_group(parameter_group_id, region: MU.curRegion) - retries ||= 0 - MU.log "Deleting cluster parameter group #{parameter_group_id}" - MU::Cloud::AWS.rds(region: region).delete_db_cluster_parameter_group(db_cluster_parameter_group_name: parameter_group_id) - # AWS API sucks. instead of returning the documented error DBClusterParameterGroupNotFoundFault it errors out with DBParameterGroupNotFound. - rescue Aws::RDS::Errors::DBParameterGroupNotFound - MU.log "Cluster parameter group #{parameter_group_id} disappeared before we could remove it", MU::WARN - rescue Aws::RDS::Errors::InvalidDBParameterGroupState => e - if retries < 5 - MU.log "Cluster parameter group #{parameter_group_id} is not in a removable state, retrying", MU::WARN - retries += 1 - sleep 30 - retry - else - MU.log "Cluster parameter group #{parameter_group_id} is not in a removable state after several retries, giving up. #{e.inspect}", MU::ERR - end - end - private_class_method :delete_db_cluster_parameter_group - end #class end #class end From 7a61beee25284fba5fe0030ce996554497ab4575 Mon Sep 17 00:00:00 2001 From: John Stange Date: Wed, 25 Mar 2020 19:29:05 -0400 Subject: [PATCH 017/124] AWS::Database: some (some) factoring in validateConfig --- modules/mu/clouds/aws/database.rb | 250 +++++++++++++++--------------- modules/mu/clouds/aws/vpc.rb | 27 ++++ 2 files changed, 152 insertions(+), 125 deletions(-) diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index 4c23bd612..462ec9860 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -20,6 +20,31 @@ class AWS # A database as configured in {MU::Config::BasketofKittens::databases} class Database < MU::Cloud::Database + STORAGE_RANGES = { + "io1" => { + "postgres" => 100..6144, + "mysql" => 100..6144, + "oracle-se1" => 100..6144, + "oracle-se" => 100..6144, + "oracle-ee" => 100..6144, + "sqlserver-ex" => 100..4096, + "sqlserver-web" => 100..4096, + "sqlserver-ee" => 200..4096, + "sqlserver-se" => 200..4096 + }, + "standard" => { + "postgres" => 5..6144, + "mysql" => 5..6144, + "oracle-se1" => 10..6144, + "oracle-se" => 10..6144, + "oracle-ee" => 10..6144, + "sqlserver-ex" => 20..4096, + "sqlserver-web" => 20..4096, + "sqlserver-ee" => 200..4096, + "sqlserver-se" => 200..4096 + } + }.freeze + # Initialize this cloud resource object. Calling +super+ will invoke the initializer defined under {MU::Cloud}, which should set the attribtues listed in {MU::Cloud::PUBLIC_ATTRS} as well as applicable dependency shortcuts, like +@vpc+, for us. # @param args [Hash]: Hash of named arguments passed via Ruby's double-splat def initialize(**args) @@ -829,37 +854,37 @@ def self.schema(_config) [toplevel_required, schema] end - # Cloud-specific pre-processing of {MU::Config::BasketofKittens::databases}, bare and unvalidated. - # @param db [Hash]: The resource to process and validate - # @param _configurator [MU::Config]: The overall deployment configurator of which this resource is a member - # @return [Boolean]: True if validation succeeded, False otherwise - def self.validateConfig(db, _configurator) - ok = true - + @@engine_cache= {} + def self.get_supported_engines(region = MU.myRegion, credentials = nil) + @@engine_cache ||= {} + @@engine_cache[credentials] ||= {} + @@engine_cache[credentials][region] ||= {} - if db['creation_style'] == "existing_snapshot" and - !db['create_cluster'] and - db['source'] and db["source"]["id"] and db['source']["id"].match(/:cluster-snapshot:/) - MU.log "Database #{db['name']}: Existing snapshot #{db["source"]["id"]} looks like a cluster snapshot, but create_cluster is not set. Add 'create_cluster: true' if you're building an RDS cluster.", MU::ERR - ok = false + if !@@engine_cache[credentials][region].empty? + return @@engine_cache[credentials][region] end - pgroup_families = [] engines = {} - marker = nil begin - resp = MU::Cloud::AWS.rds(credentials: db['credentials'], region: db['region']).describe_db_engine_versions(marker: marker) + resp = MU::Cloud::AWS.rds(credentials: credentials, region: region).describe_db_engine_versions(marker: marker) marker = resp.marker if resp and resp.db_engine_versions resp.db_engine_versions.each { |version| engines[version.engine] ||= { "versions" => [], - "families" => [] + "families" => [], + "features" => {} } engines[version.engine]['versions'] << version.engine_version engines[version.engine]['families'] << version.db_parameter_group_family + [:supports_read_replica, :supports_log_exports_to_cloudwatch_logs].each { |feature| + if version.respond_to?(feature) and version.send(feature) == true + engines[version.engine]['features'][version.engine_version] ||= [] + engines[version.engine]['features'][version.engine_version] << feature + end + } } engines.keys.each { |engine| @@ -871,8 +896,27 @@ def self.validateConfig(db, _configurator) MU.log "Failed to get list of valid RDS engine versions in #{db['region']}, proceeding without proper validation", MU::WARN end end while !marker.nil? + @@engine_cache[credentials][region] = engines + engines + end + private_class_method :get_supported_engines + + # Cloud-specific pre-processing of {MU::Config::BasketofKittens::databases}, bare and unvalidated. + # @param db [Hash]: The resource to process and validate + # @param _configurator [MU::Config]: The overall deployment configurator of which this resource is a member + # @return [Boolean]: True if validation succeeded, False otherwise + def self.validateConfig(db, _configurator) + ok = true + + if db['creation_style'] == "existing_snapshot" and + !db['create_cluster'] and + db['source'] and db["source"]["id"] and db['source']["id"].match(/:cluster-snapshot:/) + MU.log "Database #{db['name']}: Existing snapshot #{db["source"]["id"]} looks like a cluster snapshot, but create_cluster is not set. Add 'create_cluster: true' if you're building an RDS cluster.", MU::ERR + ok = false + end + - if db['create_cluster'] or db['engine'] == "aurora" or db["member_of_cluster"] + if db['create_cluster'] or (db['engine'] and db['engine'].match(/aurora/)) or db["member_of_cluster"] case db['engine'] when "mysql", "aurora", "aurora-mysql" if db["engine_version"].match(/^5\.6/) or db["cluster_mode"] == "serverless" @@ -888,39 +932,7 @@ def self.validateConfig(db, _configurator) end end - if db['engine'] == "aurora-postgresql" - db.delete('cloudwatch_logs') - end - - if db['engine'].match(/^aurora/) and !db['create_cluster'] and !db['add_cluster_node'] - MU.log "Database #{db['name']}: #{db['engine']} looks like a cluster engine, but create_cluster is not set. Add 'create_cluster: true' if you're building an RDS cluster.", MU::ERR - ok = false - end - - if engines.size > 0 - if !engines[db['engine']] - MU.log "RDS engine #{db['engine']} is not supported in #{db['region']}", MU::ERR, details: engines.keys.sort - ok = false - else - if db["engine_version"] and - engines[db['engine']]['versions'].size > 0 and - !engines[db['engine']]['versions'].include?(db['engine_version']) and - !engines[db['engine']]['versions'].grep(/^#{Regexp.quote(db["engine_version"])}.+/) - MU.log "RDS engine '#{db['engine']}' version '#{db['engine_version']}' is not supported in #{db['region']}", MU::ERR, details: { "Known-good versions:" => engines[db['engine']]['versions'].uniq.sort } - ok = false - end - if db["parameter_group_family"] and - engines[db['engine']]['families'].size > 0 and - !engines[db['engine']]['families'].include?(db['parameter_group_family']) - MU.log "RDS engine '#{db['engine']}' parameter group family '#{db['parameter_group_family']}' is not supported in #{db['region']}", MU::ERR, details: { "Valid parameter families:" => engines[db['engine']]['families'].uniq.sort } - ok = false - end - end - end - - if db['parameter_group_family'] and pgroup_families.size > 0 and - !pgroup_families.include?(db['parameter_group_family']) - end + ok = false if !validate_engine(db) db["license_model"] ||= if ["postgres", "postgresql", "aurora-postgresql"].include?(db["engine"]) @@ -931,13 +943,6 @@ def self.validateConfig(db, _configurator) "license-included" end - if db["create_read_replica"] or db['read_replica_of'] - if !["postgres", "postgresql", "mysql", "aurora-mysql", "aurora-postgresql", "mariadb"].include?(db["engine"]) - MU.log "Read replica(s) database instances not supported for #{db["engine"]}.", MU::ERR - ok = false - end - end - if db["creation_style"] == "existing" begin MU::Cloud::AWS.rds(region: db['region']).describe_db_instances( @@ -957,79 +962,29 @@ def self.validateConfig(db, _configurator) MU.log "Both of multi_az_on_create and multi_az_on_deploy cannot be true", MU::ERR ok = false end - if db.has_key?("db_parameter_group_parameters") || db.has_key?("cluster_parameter_group_parameters") - if db["parameter_group_family"].nil? - MU.log "parameter_group_family must be set when setting db_parameter_group_parameters", MU::ERR - ok = false - end + + if (db["db_parameter_group_parameters"] or db["cluster_parameter_group_parameters"]) and db["parameter_group_family"].nil? + MU.log "parameter_group_family must be set when setting db_parameter_group_parameters", MU::ERR + ok = false end + # Adding rules for Database instance storage. This varies depending on storage type and database type. - if !db["storage"].nil? and (db["storage_type"] == "standard" or db["storage_type"] == "gp2") - if db["engine"] == "postgres" or db["engine"] == "mysql" - if !(5..6144).include? db["storage"] - MU.log "Database storage size is set to #{db["storage"]}. #{db["engine"]} only supports storage sizes between 5 to 6144 GB for #{db["storage_type"]} volume types", MU::ERR - ok = false - end - elsif %w{oracle-se1 oracle-se oracle-ee}.include? db["engine"] - if !(10..6144).include? db["storage"] - MU.log "Database storage size is set to #{db["storage"]}. #{db["engine"]} only supports storage sizes between 10 to 6144 GB for #{db["storage_type"]} volume types", MU::ERR - ok = false - end - elsif %w{sqlserver-ex sqlserver-web}.include? db["engine"] - if !(20..4096).include? db["storage"] - MU.log "Database storage size is set to #{db["storage"]}. #{db["engine"]} only supports storage sizes between 20 to 4096 GB for #{db["storage_type"]} volume types", MU::ERR - ok = false - end - elsif %w{sqlserver-ee sqlserver-se}.include? db["engine"] - if !(200..4096).include? db["storage"] - MU.log "Database storage size is set to #{db["storage"]}. #{db["engine"]} only supports storage sizes between 200 to 4096 GB for #{db["storage_type"]} volume types", MU::ERR - ok = false - end - end - elsif db["storage_type"] == "io1" - if %w{postgres mysql oracle-se1 oracle-se oracle-ee}.include? db["engine"] - if !(100..6144).include? db["storage"] - MU.log "Database storage size is set to #{db["storage"]}. #{db["engine"]} only supports storage sizes between 100 to 6144 GB for #{db["storage_type"]} volume types", MU::ERR - ok = false - end - elsif %w{sqlserver-ex sqlserver-web}.include? db["engine"] - if !(100..4096).include? db["storage"] - MU.log "Database storage size is set to #{db["storage"]}. #{db["engine"]} only supports storage sizes between 100 to 4096 GB for #{db["storage_type"]} volume types", MU::ERR - ok = false - end - elsif %w{sqlserver-ee sqlserver-se}.include? db["engine"] - if !(200..4096).include? db["storage"] - MU.log "Database storage size is set to #{db["storage"]}. #{db["engine"]} only supports storage sizes between 200 to 4096 GB #{db["storage_type"]} volume types", MU::ERR - ok = false - end + if !db["storage"].nil? and !db["create_cluster"] and !db["add_cluster_node"] + if db["storage_type"] == "io1" and !STORAGE_RANGES["io1"][db['engine']].include?(db["storage"]) + MU.log "Database storage size is set to #{db["storage"]}. #{db["engine"]} only supports storage sizes from #{STORAGE_RANGES["io1"][db['engine']]} GB for #{db["storage_type"]} volumes.", MU::ERR + elsif !STORAGE_RANGES["standard"][db['engine']].include?(db["storage"]) + MU.log "Database storage size is set to #{db["storage"]}. #{db["engine"]} only supports storage sizes from #{STORAGE_RANGES["standard"][db['engine']]} GB for #{db["storage_type"]} volumes.", MU::ERR + ok = false end end - if !db["vpc"] - MU::Cloud::AWS.ec2(region: db['region'], credentials: db['credentials']).describe_vpcs.vpcs.each { |vpc| - if vpc.is_default - db["publicly_accessible"] = true - db['vpc'] = { - "id" => vpc.vpc_id, - "cloud" => "AWS", - "region" => db['region'], - "credentials" => db['credentials'] - } - db['vpc']['subnets'] = MU::Cloud::AWS.ec2(region: db['region'], credentials: db['credentials']).describe_subnets( - filters: [ - { - name: "vpc-id", - values: [vpc.vpc_id] - } - ] - ).subnets.map { |s| { "subnet_id" => s.subnet_id } } - MU.log "Using default VPC for database #{db['name']}" - break - end - } - end - - if db["vpc"] + if !db['vpc'] + db["vpc"] = MU::Cloud::AWS::VPC.defaultVpc(db['region'], db['credentials']) + if db['vpc'] + MU.log "Using default VPC for database '#{db['name']}; this sets 'publicly_accessible' to true.", MU::WARN + db['publicly_accessible'] = true + end + else if db["vpc"]["subnet_pref"] == "all_public" and !db['publicly_accessible'] and (db["vpc"]['subnets'].nil? or db["vpc"]['subnets'].empty?) MU.log "Setting publicly_accessible to true on database '#{db['name']}', since deploying into public subnets.", MU::WARN db['publicly_accessible'] = true @@ -1044,6 +999,51 @@ def self.validateConfig(db, _configurator) private + def self.validate_engine(db) + ok = true + + engines = get_supported_engines(db['region'], db['credentials']) + return if engines.nil? or engines.empty? + engine_cfg = engines[db['engine']] + + if !engine_cfg or engine_cfg['versions'].empty? or engine_cfg['families'].empty? + MU.log "RDS engine #{db['engine']} has no supported versions in #{db['region']}", MU::ERR, details: engines.keys.sort + return false + end + + if db['engine'].match(/^aurora/) and !db['create_cluster'] and !db['add_cluster_node'] + MU.log "Database #{db['name']}: #{db['engine']} looks like a cluster engine, but create_cluster is not set. Add 'create_cluster: true' if you're building an RDS cluster.", MU::ERR + ok = false + end + + db['engine_version'] ||= engine_cfg['versions'].sort.last + if !engine_cfg['versions'].grep(/^#{Regexp.quote(db["engine_version"])}.+/) + MU.log "RDS engine '#{db['engine']}' version '#{db['engine_version']}' is not supported in #{db['region']}", MU::ERR, details: { "Known-good versions:" => engine_cfg['versions'].uniq.sort } + ok = false + end + + if db["parameter_group_family"] and + !engine_cfg['families'].include?(db['parameter_group_family']) + MU.log "RDS engine '#{db['engine']}' parameter group family '#{db['parameter_group_family']}' is not supported in #{db['region']}", MU::ERR, details: { "Valid parameter families:" => engine_cfg['families'].uniq.sort } + ok = false + end + + features_cfg = engine_cfg['features'][db['engine_version']] + + if (db['create_read_replica'] or db['read_replica_of']) and + (!features_cfg or !features_cfg.include?(:supports_read_replica)) + MU.log "Engine #{db['engine']} #{db['engine_version']} does not appear to support read replicas", MU::ERR + ok = false + end + if db['cloudwatch_logs'] and (!features_cfg or !features_cfg.include?(:supports_log_exports_to_cloudwatch_logs)) +# MU.log "Engine #{db['engine']} #{db['engine_version']} does not support CloudWatch Logs exports, disabling", MU::WARN + db.delete('cloudwatch_logs') + end + + ok + end + private_class_method :validate_engine + def add_basic getPassword diff --git a/modules/mu/clouds/aws/vpc.rb b/modules/mu/clouds/aws/vpc.rb index 961bb8196..3d3f9673a 100644 --- a/modules/mu/clouds/aws/vpc.rb +++ b/modules/mu/clouds/aws/vpc.rb @@ -1265,6 +1265,33 @@ def self.getDefaultSg(vpc_id, region: MU.curRegion, credentials: nil) nil end + # Try to locate the default VPC for a region, and return a BoK-style + # config fragment for something that might want to live in it. + def defaultVpc(region, credentials) + cfg_fragment = nil + MU::Cloud::AWS.ec2(region: region, credentials: credentials).describe_vpcs.vpcs.each { |vpc| + if vpc.is_default + cfg_fragment = { + "id" => vpc.vpc_id, + "cloud" => "AWS", + "region" => region, + "credentials" => credentials + } + cfg_fragment['subnets'] = MU::Cloud::AWS.ec2(region: region, credentials: credentials).describe_subnets( + filters: [ + { + name: "vpc-id", + values: [vpc.vpc_id] + } + ] + ).subnets.map { |s| { "subnet_id" => s.subnet_id } } + break + end + } + + cfg_fragment + end + private def peerWith(peer) From c03ccb6d7721ee9ef2a21ef9f7ae2c8deba3c39a Mon Sep 17 00:00:00 2001 From: John Stange Date: Wed, 25 Mar 2020 19:43:55 -0400 Subject: [PATCH 018/124] AWS::Database: some CodeClimate trivia --- modules/mu/clouds/aws/database.rb | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index 462ec9860..759cd277a 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -332,7 +332,7 @@ def createSubnetGroup if @dependencies.has_key?('firewall_rule') @config["vpc_security_group_ids"] = [] - @dependencies['firewall_rule'].values.each { |sg| + @dependencies['firewall_rule'].each_value { |sg| @config["vpc_security_group_ids"] << sg.cloud_id } end @@ -344,7 +344,7 @@ def createSubnetGroup MU.log "Using NAT Gateway, not modifying security groups" else _nat_name, _nat_conf, nat_deploydata = @nat.describe - @deploy.kittens['firewall_rules'].values.each { |acl| + @deploy.kittens['firewall_rules'].each_value { |acl| # XXX if a user doesn't set up dependencies correctly, this can die horribly on a NAT that's still in mid-creation. Fix this... possibly in the config parser. if acl.config["admin"] acl.addRule([nat_deploydata["private_ip_address"]], proto: "tcp") @@ -476,26 +476,26 @@ def groom def self.getName(basename, type: 'dbname', config: nil) if type == 'dbname' # Apply engine-specific db name constraints - if config["engine"].match(/^oracle/) + if config["engine"] =~ /^oracle/ (MU.seed.downcase+config["name"])[0..7] - elsif config["engine"].match(/^sqlserver/) + elsif config["engine"] =~ /^sqlserver/ nil - elsif config["engine"].match(/^mysql/) + elsif config["engine"] =~ /^mysql/ basename[0..63] - elsif config["engine"].match(/^aurora/) + elsif config["engine"] =~ /^aurora/ (MU.seed.downcase+config["name"])[0..7] else basename end elsif type == 'dbuser' # Apply engine-specific master username constraints - if config["engine"].match(/^oracle/) + if config["engine"] =~ /^oracle/ basename[0..29].gsub(/[^a-z0-9]/i, "") - elsif config["engine"].match(/^sqlserver/) + elsif config["engine"] =~ /^sqlserver/ basename[0..127].gsub(/[^a-z0-9]/i, "") - elsif config["engine"].match(/^(mysql|maria)/) + elsif config["engine"] =~ /^(mysql|maria)/ basename[0..15].gsub(/[^a-z0-9]/i, "") - elsif config["engine"].match(/^aurora/) + elsif config["engine"] =~ /^aurora/ basename[0..15].gsub(/[^a-z0-9]/i, "") else basename.gsub(/[^a-z0-9]/i, "") @@ -887,7 +887,7 @@ def self.get_supported_engines(region = MU.myRegion, credentials = nil) } } - engines.keys.each { |engine| + engines.each_key { |engine| engines[engine]["versions"].uniq! engines[engine]["families"].uniq! } @@ -1358,9 +1358,9 @@ def run_sql_commands end # Running SQL on deploy - if @config['engine'].match(/postgres/) + if @config['engine'] =~ /postgres/ MU::Cloud::AWS::Database.run_sql_postgres(address, port, @config['master_user'], @config['password'], cloud_desc.db_name, @config['run_sql_on_deploy'], @config['name']) - elsif @config['engine'].match(/mysql|maria/) + elsif @config['engine'] =~ /mysql|maria/ MU::Cloud::AWS::Database.run_sql_mysql(address, port, @config['master_user'], @config['password'], cloud_desc.db_name, @config['run_sql_on_deploy'], @config['name']) end From 16f0b763deff12db26ce55463728dc2ab32fb978 Mon Sep 17 00:00:00 2001 From: John Stange Date: Wed, 25 Mar 2020 20:15:17 -0400 Subject: [PATCH 019/124] AWS::Database: munge parameter group creation for clusters and instances together --- modules/mu/clouds/aws/database.rb | 63 ++++++++++++------------------- 1 file changed, 24 insertions(+), 39 deletions(-) diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index 759cd277a..8a4009112 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -107,7 +107,7 @@ def create if @config.has_key?("parameter_group_family") @config["parameter_group_name"] = @mu_name - createDBClusterParameterGroup + createDBParameterGroup(true) end createDbCluster @@ -356,52 +356,37 @@ def createSubnetGroup end end - # Create a database cluster parameter group. - def createDBClusterParameterGroup - MU.log "Creating a cluster parameter group #{@config["parameter_group_name"]}" - - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_cluster_parameter_group( - db_cluster_parameter_group_name: @config["parameter_group_name"], - db_parameter_group_family: @config["parameter_group_family"], - description: "Parameter group for #{@config["parameter_group_family"]}", - tags: allTags - ) - - if @config["cluster_parameter_group_parameters"] && !@config["cluster_parameter_group_parameters"].empty? - params = [] - @config["cluster_parameter_group_parameters"].each { |item| - params << {parameter_name: item['name'], parameter_value: item['value'], apply_method: item['apply_method']} - } - - MU.log "Modifiying cluster parameter group #{@config["parameter_group_name"]}" - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_cluster_parameter_group( - db_cluster_parameter_group_name: @config["parameter_group_name"], - parameters: params - ) - end - end - # Create a database parameter group. - def createDBParameterGroup - MU.log "Creating a database parameter group #{@config["parameter_group_name"]}" - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_parameter_group( - db_parameter_group_name: @config["parameter_group_name"], + def createDBParameterGroup(cluster = false) + params = { db_parameter_group_family: @config["parameter_group_family"], - description: "Parameter group for #{@config["parameter_group_family"]}", + description: "Parameter group for #{@mu_name}", tags: allTags - ) + } + params[cluster ? :db_cluster_parameter_group_name : :db_parameter_group_name] = @config["parameter_group_name"] + MU.log "Creating a #{cluster ? "cluster" : "database" } parameter group #{@config["parameter_group_name"]}" + + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).send(cluster ? :create_db_cluster_parameter_group : :create_db_parameter_group, params) + fieldname = cluster ? "cluster_parameter_group_parameters" : "db_parameter_group_parameters" - if @config["db_parameter_group_parameters"] && !@config["db_parameter_group_parameters"].empty? + if @config[fieldname] && !@config[fieldname].empty? params = [] - @config["db_parameter_group_parameters"].each { |item| + @config[fieldname].each { |item| params << {parameter_name: item['name'], parameter_value: item['value'], apply_method: item['apply_method']} } - MU.log "Modifiying database parameter group #{@config["parameter_group_name"]}" - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_parameter_group( - db_parameter_group_name: @config["parameter_group_name"], - parameters: params - ) + MU.log "Modifiying parameter group #{@config["parameter_group_name"]}" + if cluster + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_cluster_parameter_group( + db_cluster_parameter_group_name: @config["parameter_group_name"], + parameters: params + ) + else + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_parameter_group( + db_parameter_group_name: @config["parameter_group_name"], + parameters: params + ) + end end end From 4eabcc7c9b7441b23a741c1da88a836ab5573f9d Mon Sep 17 00:00:00 2001 From: John Stange Date: Wed, 25 Mar 2020 22:18:39 -0400 Subject: [PATCH 020/124] AWS::Database: more refinement in cleanup --- modules/mu/clouds/aws/database.rb | 35 +++++++++++++------------------ 1 file changed, 15 insertions(+), 20 deletions(-) diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index 8a4009112..578da1cf3 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -331,9 +331,9 @@ def createSubnetGroup @config["subnet_group_name"] = resp.db_subnet_group.db_subnet_group_name if @dependencies.has_key?('firewall_rule') - @config["vpc_security_group_ids"] = [] - @dependencies['firewall_rule'].each_value { |sg| - @config["vpc_security_group_ids"] << sg.cloud_id + @config["vpc_security_group_ids"] = [] + @dependencies['firewall_rule'].each_value { |sg| + @config["vpc_security_group_ids"] << sg.cloud_id } end end @@ -509,7 +509,7 @@ def allowHost(cidr) # Otherwise go get our generic EC2 ruleset and punch a hole in it if @dependencies.has_key?('firewall_rule') - @dependencies['firewall_rule'].values.each { |sg| + @dependencies['firewall_rule'].each_value { |sg| sg.addRule([cidr], proto: "tcp", port: cloud_desc.endpoint.port) break } @@ -645,7 +645,7 @@ def self.quality end # @return [Array] - def self.threaded_resource_purge(describe_method, list_method, id_method, arn_type, region, credentials, ignoremaster, deletion_proc) + def self.threaded_resource_purge(describe_method, list_method, id_method, arn_type, region, credentials, ignoremaster) deletia = [] resp = MU::Cloud::AWS.rds(credentials: credentials, region: region).send(describe_method) resp.send(list_method).each { |resource| @@ -660,7 +660,7 @@ def self.threaded_resource_purge(describe_method, list_method, id_method, arn_ty threads = [] deletia.each { |id| threads << Thread.new(id) { |resource_id| - deletion_proc.call(resource_id) + yield(resource_id) } } @@ -675,22 +675,20 @@ def self.threaded_resource_purge(describe_method, list_method, id_method, arn_ty # @return [void] def self.cleanup(noop: false, ignoremaster: false, credentials: nil, region: MU.curRegion, flags: {}) - delete_dbs = Proc.new { |id| + threaded_resource_purge(:describe_db_instances, :db_instances, :db_instance_identifier, "db", region, credentials, ignoremaster) { |id| terminate_rds_instance(nil, noop: noop, skipsnapshots: flags["skipsnapshots"], region: region, deploy_id: MU.deploy_id, cloud_id: id, mu_name: id.upcase, credentials: credentials) - } - threaded_resource_purge(:describe_db_instances, :db_instances, :db_instance_identifier, "db", region, credentials, ignoremaster, delete_dbs).each { |t| + }.each { |t| t.join } - delete_clusters = Proc.new { |id| + threaded_resource_purge(:describe_db_clusters, :db_clusters, :db_cluster_identifier, "cluster", region, credentials, ignoremaster) { |id| terminate_rds_cluster(nil, noop: noop, skipsnapshots: flags["skipsnapshots"], region: region, deploy_id: MU.deploy_id, cloud_id: id, mu_name: id.upcase, credentials: credentials) - } - threaded_resource_purge(:describe_db_clusters, :db_clusters, :db_cluster_identifier, "cluster", region, credentials, ignoremaster, delete_clusters).each { |t| + }.each { |t| t.join } - delete_subnet_groups = Proc.new { |id| + threads = threaded_resource_purge(:describe_db_subnet_groups, :db_subnet_groups, :db_subnet_group_name, "subgrp", region, credentials, ignoremaster) { |id| MU.log "Deleting RDS subnet group #{id}" if !noop MU.retrier([Aws::RDS::Errors::InvalidDBSubnetGroupStateFault], wait: 30, max: 5, ignoreme: [Aws::RDS::Errors::DBSubnetGroupNotFoundFault]) { @@ -698,9 +696,8 @@ def self.cleanup(noop: false, ignoremaster: false, credentials: nil, region: MU. } end } - threads = threaded_resource_purge(:describe_db_subnet_groups, :db_subnet_groups, :db_subnet_group_name, "subgrp", region, credentials, ignoremaster, delete_subnet_groups) - delete_parameter_groups = Proc.new { |id| + threads.concat threaded_resource_purge(:describe_db_parameter_groups, :db_parameter_groups, :db_parameter_group_name, "pg", region, credentials, ignoremaster) { |id| MU.log "Deleting RDS database parameter group #{id}" if !noop MU.retrier([Aws::RDS::Errors::InvalidDBParameterGroupState], wait: 30, max: 5, ignoreme: [Aws::RDS::Errors::DBParameterGroupNotFound]) { @@ -708,9 +705,8 @@ def self.cleanup(noop: false, ignoremaster: false, credentials: nil, region: MU. } end } - threads.concat threaded_resource_purge(:describe_db_parameter_groups, :db_parameter_groups, :db_parameter_group_name, "pg", region, credentials, ignoremaster, delete_parameter_groups) - delete_cluster_parameter_groups = Proc.new { |id| + threads.concat threaded_resource_purge(:describe_db_cluster_parameter_groups, :db_cluster_parameter_groups, :db_cluster_parameter_group_name, "pg", region, credentials, ignoremaster) { |id| MU.log "Deleting RDS cluster parameter group #{id}" if !noop MU.retrier([Aws::RDS::Errors::InvalidDBParameterGroupState], wait: 30, max: 5, ignoreme: [Aws::RDS::Errors::DBParameterGroupNotFound]) { @@ -718,7 +714,6 @@ def self.cleanup(noop: false, ignoremaster: false, credentials: nil, region: MU. } end } - threads.concat threaded_resource_purge(:describe_db_cluster_parameter_groups, :db_cluster_parameter_groups, :db_cluster_parameter_group_name, "pg", region, credentials, ignoremaster, delete_cluster_parameter_groups) # Wait for all of the databases subnet/parameter groups to finish cleanup before proceeding threads.each { |t| @@ -874,6 +869,7 @@ def self.get_supported_engines(region = MU.myRegion, credentials = nil) } engines.each_key { |engine| engines[engine]["versions"].uniq! + engines[engine]["versions"].sort! { |a, b| MU.version_sort(a, b) } engines[engine]["families"].uniq! } @@ -900,7 +896,6 @@ def self.validateConfig(db, _configurator) ok = false end - if db['create_cluster'] or (db['engine'] and db['engine'].match(/aurora/)) or db["member_of_cluster"] case db['engine'] when "mysql", "aurora", "aurora-mysql" @@ -1001,7 +996,7 @@ def self.validate_engine(db) ok = false end - db['engine_version'] ||= engine_cfg['versions'].sort.last + db['engine_version'] ||= engine_cfg['versions'].last if !engine_cfg['versions'].grep(/^#{Regexp.quote(db["engine_version"])}.+/) MU.log "RDS engine '#{db['engine']}' version '#{db['engine_version']}' is not supported in #{db['region']}", MU::ERR, details: { "Known-good versions:" => engine_cfg['versions'].uniq.sort } ok = false From f6a5a7c87facbdc8130301d8bdbccf69bca4c35a Mon Sep 17 00:00:00 2001 From: John Stange Date: Thu, 26 Mar 2020 13:43:33 -0400 Subject: [PATCH 021/124] AWS::Database: validate the minutiae of Cloudwatch Logs, and default engine versions better --- modules/mu/clouds/aws/database.rb | 73 ++++++++++++++++++++++--------- 1 file changed, 52 insertions(+), 21 deletions(-) diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index 578da1cf3..8799b530a 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -767,10 +767,9 @@ def self.schema(_config) }, "cloudwatch_logs" => { "type" => "array", - "default" => ["error"], "items" => { "type" => "string", - "enum" => ["error", "general", "audit", "slow_query"], + "enum" => ["audit", "error", "general", "slowquery", "profiler", "postgresql", "alert", "listener", "trace", "upgrade", "agent"] } }, "serverless_scaling" => { @@ -835,17 +834,18 @@ def self.schema(_config) end @@engine_cache= {} - def self.get_supported_engines(region = MU.myRegion, credentials = nil) + def self.get_supported_engines(region = MU.myRegion, credentials = nil, engine: nil) @@engine_cache ||= {} @@engine_cache[credentials] ||= {} @@engine_cache[credentials][region] ||= {} if !@@engine_cache[credentials][region].empty? - return @@engine_cache[credentials][region] + return engine ? @@engine_cache[credentials][region][engine] : @@engine_cache[credentials][region] end engines = {} marker = nil + begin resp = MU::Cloud::AWS.rds(credentials: credentials, region: region).describe_db_engine_versions(marker: marker) marker = resp.marker @@ -855,10 +855,12 @@ def self.get_supported_engines(region = MU.myRegion, credentials = nil) engines[version.engine] ||= { "versions" => [], "families" => [], - "features" => {} + "features" => {}, + "raw" => {} } engines[version.engine]['versions'] << version.engine_version engines[version.engine]['families'] << version.db_parameter_group_family + engines[version.engine]['raw'][version.engine_version] = version [:supports_read_replica, :supports_log_exports_to_cloudwatch_logs].each { |feature| if version.respond_to?(feature) and version.send(feature) == true engines[version.engine]['features'][version.engine_version] ||= [] @@ -867,18 +869,19 @@ def self.get_supported_engines(region = MU.myRegion, credentials = nil) } } - engines.each_key { |engine| - engines[engine]["versions"].uniq! - engines[engine]["versions"].sort! { |a, b| MU.version_sort(a, b) } - engines[engine]["families"].uniq! + engines.each_key { |e| + engines[e]["versions"].uniq! + engines[e]["versions"].sort! { |a, b| MU.version_sort(a, b) } + engines[e]["families"].uniq! } else MU.log "Failed to get list of valid RDS engine versions in #{db['region']}, proceeding without proper validation", MU::WARN end end while !marker.nil? + @@engine_cache[credentials][region] = engines - engines + return engine ? @@engine_cache[credentials][region][engine] : @@engine_cache[credentials][region] end private_class_method :get_supported_engines @@ -979,12 +982,39 @@ def self.validateConfig(db, _configurator) private + def self.can_read_replica?(db) + engine = get_supported_engines(db['region'], db['credentials'], engine: db['engine']) + if engine.nil? or !engine['features'] or !engine['features'][db['engine_version']] + return true # we can't be sure, so let the API sort it out later + end + engine['features'][db['engine_version']].include?(:supports_read_replica) + end + private_class_method :can_read_replica? + + def self.valid_cloudwatch_logs?(db) + return true if !db['cloudwatch_logs'] + engine = get_supported_engines(db['region'], db['credentials'], engine: db['engine']) + if engine.nil? or !engine['features'] or !engine['features'][db['engine_version']] or !engine['features'][db['engine_version']].include?(:supports_read_replica) + MU.log "CloudWatch Logs not supported for #{db['engine']} #{db['engine_version']}", MU::ERR + return false + end + + ok = true + db['cloudwatch_logs'].each { |logtype| + if !engine['raw'][db['engine_version']].exportable_log_types.include?(logtype) + ok = false + MU.log "CloudWatch Log type #{logtype} is not valid for #{db['engine']} #{db['engine_version']}. List of valid types:", MU::ERR, details: engine['raw'][db['engine_version']].exportable_log_types + end + } + + ok + end + private_class_method :valid_cloudwatch_logs? + def self.validate_engine(db) ok = true - engines = get_supported_engines(db['region'], db['credentials']) - return if engines.nil? or engines.empty? - engine_cfg = engines[db['engine']] + engine_cfg = get_supported_engines(db['region'], db['credentials'], engine: db['engine']) if !engine_cfg or engine_cfg['versions'].empty? or engine_cfg['families'].empty? MU.log "RDS engine #{db['engine']} has no supported versions in #{db['region']}", MU::ERR, details: engines.keys.sort @@ -996,8 +1026,12 @@ def self.validate_engine(db) ok = false end + # Resolve or default our engine version to something reasonable db['engine_version'] ||= engine_cfg['versions'].last - if !engine_cfg['versions'].grep(/^#{Regexp.quote(db["engine_version"])}.+/) + if !engine_cfg['versions'].include?(db["engine_version"]) + db['engine_version'] = engine_cfg['versions'].grep(/^#{Regexp.quote(db["engine_version"])}/).last + end + if !engine_cfg['versions'].include?(db["engine_version"]) MU.log "RDS engine '#{db['engine']}' version '#{db['engine_version']}' is not supported in #{db['region']}", MU::ERR, details: { "Known-good versions:" => engine_cfg['versions'].uniq.sort } ok = false end @@ -1008,16 +1042,13 @@ def self.validate_engine(db) ok = false end - features_cfg = engine_cfg['features'][db['engine_version']] - - if (db['create_read_replica'] or db['read_replica_of']) and - (!features_cfg or !features_cfg.include?(:supports_read_replica)) + if (db['create_read_replica'] or db['read_replica_of']) and !can_read_replica?(db) MU.log "Engine #{db['engine']} #{db['engine_version']} does not appear to support read replicas", MU::ERR ok = false end - if db['cloudwatch_logs'] and (!features_cfg or !features_cfg.include?(:supports_log_exports_to_cloudwatch_logs)) -# MU.log "Engine #{db['engine']} #{db['engine_version']} does not support CloudWatch Logs exports, disabling", MU::WARN - db.delete('cloudwatch_logs') + + if db['cloudwatch_logs'] and !valid_cloudwatch_logs?(db) + ok = false end ok From e3788023b506dde396f4c95a9d7b7027113425f0 Mon Sep 17 00:00:00 2001 From: John Stange Date: Fri, 27 Mar 2020 12:51:43 -0400 Subject: [PATCH 022/124] AWS::Database: get storage limits correcter --- modules/mu/clouds/aws/database.rb | 68 +++++++++++++++++++------------ modules/mu/config/database.rb | 7 +--- modules/tests/rds.yaml | 8 +++- 3 files changed, 50 insertions(+), 33 deletions(-) diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index 8799b530a..3ed905cda 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -22,26 +22,43 @@ class Database < MU::Cloud::Database STORAGE_RANGES = { "io1" => { - "postgres" => 100..6144, - "mysql" => 100..6144, - "oracle-se1" => 100..6144, - "oracle-se" => 100..6144, - "oracle-ee" => 100..6144, - "sqlserver-ex" => 100..4096, - "sqlserver-web" => 100..4096, - "sqlserver-ee" => 200..4096, - "sqlserver-se" => 200..4096 + "postgres" => 100..65536, + "mysql" => 100..65536, + "mariadb" => 100..65536, + "oracle-se1" => 100..65536, + "oracle-se2" => 100..65536, + "oracle-se" => 100..65536, + "oracle-ee" => 100..65536, + "sqlserver-ex" => 100..16384, + "sqlserver-web" => 100..16384, + "sqlserver-ee" => 200..16384, + "sqlserver-se" => 200..16384 + }, + "gp2" => { + "postgres" => 20..65536, + "mysql" => 20..65536, + "mariadb" => 20..65536, + "oracle-se1" => 20..65536, + "oracle-se2" => 20..65536, + "oracle-se" => 20..65536, + "oracle-ee" => 20..65536, + "sqlserver-ex" => 20..16384, + "sqlserver-web" => 20..16384, + "sqlserver-ee" => 200..16384, + "sqlserver-se" => 200..16384 }, "standard" => { - "postgres" => 5..6144, - "mysql" => 5..6144, - "oracle-se1" => 10..6144, - "oracle-se" => 10..6144, - "oracle-ee" => 10..6144, - "sqlserver-ex" => 20..4096, - "sqlserver-web" => 20..4096, - "sqlserver-ee" => 200..4096, - "sqlserver-se" => 200..4096 + "postgres" => 5..3072, + "mysql" => 5..3072, + "mariadb" => 5..3072, + "oracle-se1" => 10..3072, + "oracle-se2" => 10..3072, + "oracle-se" => 10..3072, + "oracle-ee" => 10..3072, + "sqlserver-ex" => 20..1024, # ??? + "sqlserver-web" => 20..1024, # ??? + "sqlserver-ee" => 200..4096, # ??? + "sqlserver-se" => 200..4096 # ??? } }.freeze @@ -765,6 +782,11 @@ def self.schema(_config) "enum" => ["provisioned", "serverless", "parallelquery", "global"], "default" => "provisioned" }, + "storage_type" => { + "enum" => ["standard", "gp2", "io1"], + "type" => "string", + "default" => "gp2" + }, "cloudwatch_logs" => { "type" => "array", "items" => { @@ -952,13 +974,9 @@ def self.validateConfig(db, _configurator) end # Adding rules for Database instance storage. This varies depending on storage type and database type. - if !db["storage"].nil? and !db["create_cluster"] and !db["add_cluster_node"] - if db["storage_type"] == "io1" and !STORAGE_RANGES["io1"][db['engine']].include?(db["storage"]) - MU.log "Database storage size is set to #{db["storage"]}. #{db["engine"]} only supports storage sizes from #{STORAGE_RANGES["io1"][db['engine']]} GB for #{db["storage_type"]} volumes.", MU::ERR - elsif !STORAGE_RANGES["standard"][db['engine']].include?(db["storage"]) - MU.log "Database storage size is set to #{db["storage"]}. #{db["engine"]} only supports storage sizes from #{STORAGE_RANGES["standard"][db['engine']]} GB for #{db["storage_type"]} volumes.", MU::ERR - ok = false - end + if !db["storage"].nil? and !db["create_cluster"] and !db["add_cluster_node"] and !STORAGE_RANGES[db["storage_type"]][db['engine']].include?(db["storage"]) + MU.log "Database storage size is set to #{db["storage"]}. #{db["engine"]} only supports storage sizes from #{STORAGE_RANGES[db["storage_type"]][db['engine']]} GB for #{db["storage_type"]} volumes.", MU::ERR + ok = false end if !db['vpc'] diff --git a/modules/mu/config/database.rb b/modules/mu/config/database.rb index 548aee6af..5060a994d 100644 --- a/modules/mu/config/database.rb +++ b/modules/mu/config/database.rb @@ -79,12 +79,7 @@ def self.schema "storage" => { "type" => "integer", "description" => "Storage space for this database instance (GB).", - "default" => 5 - }, - "storage_type" => { - "enum" => ["standard", "gp2", "io1"], - "type" => "string", - "default" => "gp2" + "default" => 20 }, "run_sql_on_deploy" => { "type" => "array", diff --git a/modules/tests/rds.yaml b/modules/tests/rds.yaml index 58e499bae..b30943ecf 100644 --- a/modules/tests/rds.yaml +++ b/modules/tests/rds.yaml @@ -7,6 +7,7 @@ databases: - name: pgcluster size: db.t3.medium engine: postgres + engine_version: "10" allow_major_version_upgrade: true auto_minor_version_upgrade: false backup_retention_period: 10 @@ -18,17 +19,20 @@ databases: - name: mysql-base size: db.t2.small engine: mysql - storage: 5 vpc: name: rdstests create_read_replica: true + cloudwatch_logs: + - slowquery multi_az_on_create: true master_user: Bob - name: mysql-point-in-time creation_style: point_in_time size: db.t2.micro engine: mysql - storage: 5 + cloudwatch_logs: + - error + - general source: name: mysql-base vpc: From 557b51574315a20fb6b4fb4d4fa30091deb7c11a Mon Sep 17 00:00:00 2001 From: John Stange Date: Fri, 27 Mar 2020 13:38:31 -0400 Subject: [PATCH 023/124] bring smoke tests in line with RDS API requirements --- modules/mu/config/database.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/modules/mu/config/database.yml b/modules/mu/config/database.yml index 84a939193..4bdadae0a 100644 --- a/modules/mu/config/database.yml +++ b/modules/mu/config/database.yml @@ -4,7 +4,7 @@ name: database-complex size: db.r4.large engine: postgres engine_version: 9.6.6 -storage: 5 +storage: 21 add_cluster_node: true allow_major_version_upgrade: true auto_minor_version_upgrade: false @@ -24,6 +24,5 @@ vpc: name: <%= vpc_name %> size: <%= db_size %> engine: mariadb -storage: 5 <% end %> From 35a1cb5057a2f571ffa6dee34db438ffb407de90 Mon Sep 17 00:00:00 2001 From: John Stange Date: Fri, 27 Mar 2020 14:27:40 -0400 Subject: [PATCH 024/124] AWS::Database: break out more bits of validation, correct new_snapshot behavior somewhat, validate passwords more validly --- modules/mu/clouds/aws/database.rb | 150 ++++++++++++++++++------------ 1 file changed, 92 insertions(+), 58 deletions(-) diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index 3ed905cda..15ceb84c4 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -581,48 +581,35 @@ def cloud_desc(use_cache: true) # @return [String]: The cloud provider's identifier for the snapshot. def createNewSnapshot snap_id = @deploy.getResourceName(@config["name"]) + Time.new.strftime("%M%S").to_s + src_ref = MU::Config::Ref.get(@config["source"]) - attempts = 0 - begin + MU.retrier([Aws::RDS::Errors::InvalidDBInstanceState, Aws::RDS::Errors::InvalidDBClusterStateFault], wait: 60, max: 10) { if @config["create_cluster"] MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_cluster_snapshot( db_cluster_snapshot_identifier: snap_id, - db_cluster_identifier: @mu_name, + db_cluster_identifier: src_ref.id, tags: allTags ) else MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_snapshot( db_snapshot_identifier: snap_id, - db_instance_identifier: @mu_name, + db_instance_identifier: src_ref.id, tags: allTags ) end - rescue Aws::RDS::Errors::InvalidDBInstanceState, Aws::RDS::Errors::InvalidDBClusterStateFault => e - raise MuError, e.inspect if attempts >= 10 - attempts += 1 - sleep 60 - retry - end - - attempts = 0 - loop do - MU.log "Waiting for RDS snapshot of #{@mu_name} to be ready...", MU::NOTICE if attempts % 20 == 0 - MU.log "Waiting for RDS snapshot of #{@mu_name} to be ready...", MU::DEBUG - snapshot_resp = - if @config["create_cluster"] - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).describe_db_cluster_snapshots(db_cluster_snapshot_identifier: snap_id) - else - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).describe_db_snapshots(db_snapshot_identifier: snap_id) - end + } + loop_if = Proc.new { if @config["create_cluster"] - break unless snapshot_resp.db_cluster_snapshots.first.status != "available" + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).describe_db_cluster_snapshots(db_cluster_snapshot_identifier: snap_id).db_cluster_snapshots.first.status != "available" else - break unless snapshot_resp.db_snapshots.first.status != "available" + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).describe_db_snapshots(db_snapshot_identifier: snap_id).db_snapshots.first.status != "available" end - attempts += 1 - sleep 15 - end + } + + MU.retrier(wait: 15, loop_if: loop_if) { |retries, _wait| + MU.log "Waiting for RDS snapshot of #{src_ref.id} to be ready...", MU::NOTICE if retries % 20 == 0 + } return snap_id end @@ -907,11 +894,9 @@ def self.get_supported_engines(region = MU.myRegion, credentials = nil, engine: end private_class_method :get_supported_engines - # Cloud-specific pre-processing of {MU::Config::BasketofKittens::databases}, bare and unvalidated. - # @param db [Hash]: The resource to process and validate - # @param _configurator [MU::Config]: The overall deployment configurator of which this resource is a member - # @return [Boolean]: True if validation succeeded, False otherwise - def self.validateConfig(db, _configurator) + # Make sure any source database/cluster/snapshot we've asked for exists + # and is valid. + def self.validate_source_data(db) ok = true if db['creation_style'] == "existing_snapshot" and @@ -919,24 +904,69 @@ def self.validateConfig(db, _configurator) db['source'] and db["source"]["id"] and db['source']["id"].match(/:cluster-snapshot:/) MU.log "Database #{db['name']}: Existing snapshot #{db["source"]["id"]} looks like a cluster snapshot, but create_cluster is not set. Add 'create_cluster: true' if you're building an RDS cluster.", MU::ERR ok = false + elsif db["creation_style"] == "existing" or db["creation_style"] == "new_snapshot" + begin + MU::Cloud::AWS.rds(region: db['region']).describe_db_instances( + db_instance_identifier: db['source']['id'] + ) + rescue Aws::RDS::Errors::DBInstanceNotFound + MU.log "Source database was specified for #{db['name']}, but no such database exists in #{db['region']}", MU::ERR, db['source'] + ok = false + end end - if db['create_cluster'] or (db['engine'] and db['engine'].match(/aurora/)) or db["member_of_cluster"] - case db['engine'] - when "mysql", "aurora", "aurora-mysql" - if db["engine_version"].match(/^5\.6/) or db["cluster_mode"] == "serverless" - db["engine"] = "aurora" - else - db["engine"] = "aurora-mysql" - end - when "postgres", "postgresql", "postgresql-mysql" - db["engine"] = "aurora-postgresql" + ok + end + private_class_method :validate_source_data + + def self.validate_master_password(db) + maxlen = case db['engine'] + when "mariadb", "mysql" + 41 + when "postgresql" + 41 + when /oracle/ + 30 + when /sqlserver/ + 128 else - ok = false - MU.log "Database #{db['name']}: Requested a clustered database, but engine #{db['engine']} is not supported for clustering", MU::ERR - end + return true end + pw = if !db['password'].nil? + db['password'] + elsif db['auth_vault'] and !db['auth_vault'].empty? + groomclass = MU::Groomer.loadGroomer(db['groomer']) + pw = groomclass.getSecret( + vault: db['auth_vault']['vault'], + item: db['auth_vault']['item'], + field: db['auth_vault']['password_field'] + ) + return true if pw.nil? + pw + else + return true + end + + if pw.length < 8 or pw.match(/[\/\\@\s]/) or pw > maxlen + MU.log "Database password specified in 'password' or 'auth_vault' doesn't meet RDS requirements. Must be between 8 and #{maxlen.to_s} chars and have only ASCII characters other than /, @, \", or [space].", MU::ERR + return false + end + + true + end + private_class_method :validate_master_password + + # Cloud-specific pre-processing of {MU::Config::BasketofKittens::databases}, bare and unvalidated. + # @param db [Hash]: The resource to process and validate + # @param _configurator [MU::Config]: The overall deployment configurator of which this resource is a member + # @return [Boolean]: True if validation succeeded, False otherwise + def self.validateConfig(db, _configurator) + ok = true + + ok = false if !validate_source_data(db) + + ok = false if !validate_engine(db) db["license_model"] ||= @@ -948,21 +978,9 @@ def self.validateConfig(db, _configurator) "license-included" end - if db["creation_style"] == "existing" - begin - MU::Cloud::AWS.rds(region: db['region']).describe_db_instances( - db_instance_identifier: db['source']['id'] - ) - rescue Aws::RDS::Errors::DBInstanceNotFound - MU.log "Source database was specified for #{db['name']}, but no such database exists in #{db['region']}", MU::ERR, db['source'] - ok = false - end - end + ok = false if !validate_master_password(db) + - if !db['password'].nil? and (db['password'].length < 8 or db['password'].match(/[\/\\@\s]/)) - MU.log "Database password '#{db['password']}' doesn't meet RDS requirements. Must be > 8 chars and have only ASCII characters other than /, @, \", or [space].", MU::ERR - ok = false - end if db["multi_az_on_create"] and db["multi_az_on_deploy"] MU.log "Both of multi_az_on_create and multi_az_on_deploy cannot be true", MU::ERR ok = false @@ -1032,6 +1050,22 @@ def self.valid_cloudwatch_logs?(db) def self.validate_engine(db) ok = true + if db['create_cluster'] or (db['engine'] and db['engine'].match(/aurora/)) or db["member_of_cluster"] + case db['engine'] + when "mysql", "aurora", "aurora-mysql" + if db["engine_version"].match(/^5\.6/) or db["cluster_mode"] == "serverless" + db["engine"] = "aurora" + else + db["engine"] = "aurora-mysql" + end + when "postgres", "postgresql", "postgresql-mysql" + db["engine"] = "aurora-postgresql" + else + ok = false + MU.log "Database #{db['name']}: Requested a clustered database, but engine #{db['engine']} is not supported for clustering", MU::ERR + end + end + engine_cfg = get_supported_engines(db['region'], db['credentials'], engine: db['engine']) if !engine_cfg or engine_cfg['versions'].empty? or engine_cfg['families'].empty? From f850329dc1ef15e5733782ec76bc4e6540a01cbd Mon Sep 17 00:00:00 2001 From: John Stange Date: Fri, 27 Mar 2020 15:36:03 -0400 Subject: [PATCH 025/124] try doing tags a different way --- modules/mu/clouds/aws/database.rb | 37 ++++++++----------------------- 1 file changed, 9 insertions(+), 28 deletions(-) diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index 15ceb84c4..879b6160b 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -187,24 +187,7 @@ def self.getARN(resource, resource_type, client_type, region: MU.curRegion, acco # Construct all our tags. # @return [Array]: All our standard tags and any custom tags. def allTags - tags = [] - MU::MommaCat.listStandardTags.each_pair { |name, value| - tags << {key: name, value: value} - } - - if @config['optional_tags'] - MU::MommaCat.listOptionalTags.each_pair { |name, value| - tags << {key: name, value: value} - } - end - - if @config['tags'] - @config['tags'].each { |tag| - tags << {key: tag['key'], value: tag['value']} - } - end - - return tags + @tags.each_key.map { |k| { :key => k, :value => @tags[k] } } end # Getting the password for the master user, and saving it in a database / cluster specif vault @@ -236,7 +219,7 @@ def genericParams engine: @config["engine"], db_subnet_group_name: @config["subnet_group_name"].downcase, vpc_security_group_ids: @config["vpc_security_group_ids"], - tags: allTags + tags: @tags.each_key.map { |k| { :key => k, :value => @tags[k] } } } if @config['cloudwatch_logs'] paramhash[:enable_cloudwatch_logs_exports ] = @config['cloudwatch_logs'] @@ -263,7 +246,7 @@ def genericParams db_subnet_group_name: @config["subnet_group_name"], publicly_accessible: @config["publicly_accessible"], copy_tags_to_snapshot: true, - tags: allTags + tags: @tags.each_key.map { |k| { :key => k, :value => @tags[k] } } } end @@ -343,7 +326,7 @@ def createSubnetGroup db_subnet_group_name: @config["subnet_group_name"], db_subnet_group_description: @config["subnet_group_name"], subnet_ids: subnet_ids, - tags: allTags + tags: @tags.each_key.map { |k| { :key => k, :value => @tags[k] } } ) @config["subnet_group_name"] = resp.db_subnet_group.db_subnet_group_name @@ -378,7 +361,7 @@ def createDBParameterGroup(cluster = false) params = { db_parameter_group_family: @config["parameter_group_family"], description: "Parameter group for #{@mu_name}", - tags: allTags + tags: @tags.each_key.map { |k| { :key => k, :value => @tags[k] } } } params[cluster ? :db_cluster_parameter_group_name : :db_parameter_group_name] = @config["parameter_group_name"] MU.log "Creating a #{cluster ? "cluster" : "database" } parameter group #{@config["parameter_group_name"]}" @@ -588,13 +571,13 @@ def createNewSnapshot MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_cluster_snapshot( db_cluster_snapshot_identifier: snap_id, db_cluster_identifier: src_ref.id, - tags: allTags + tags: @tags.each_key.map { |k| { :key => k, :value => @tags[k] } } ) else MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_snapshot( db_snapshot_identifier: snap_id, db_instance_identifier: src_ref.id, - tags: allTags + tags: @tags.each_key.map { |k| { :key => k, :value => @tags[k] } } ) end } @@ -944,11 +927,9 @@ def self.validate_master_password(db) ) return true if pw.nil? pw - else - return true end - if pw.length < 8 or pw.match(/[\/\\@\s]/) or pw > maxlen + if pw and (pw.length < 8 or pw.match(/[\/\\@\s]/) or pw > maxlen) MU.log "Database password specified in 'password' or 'auth_vault' doesn't meet RDS requirements. Must be between 8 and #{maxlen.to_s} chars and have only ASCII characters other than /, @, \", or [space].", MU::ERR return false end @@ -1256,7 +1237,7 @@ def create_read_replica db_instance_class: @config["size"], auto_minor_version_upgrade: @config["auto_minor_version_upgrade"], publicly_accessible: @config["publicly_accessible"], - tags: allTags, + tags: @tags.each_key.map { |k| { :key => k, :value => @tags[k] } }, db_subnet_group_name: @config["subnet_group_name"], storage_type: @config["storage_type"] } From 6a7a68e7b76ef1cedbe1c98e0dac8e05d18335b3 Mon Sep 17 00:00:00 2001 From: John Stange Date: Fri, 27 Mar 2020 16:47:33 -0400 Subject: [PATCH 026/124] AWS::Database: fold extraneous fetch methods into .find --- modules/mu/clouds/aws/database.rb | 84 +++++++++++-------------------- 1 file changed, 29 insertions(+), 55 deletions(-) diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index 879b6160b..56ca04854 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -147,8 +147,16 @@ def self.find(**args) found = {} if args[:cloud_id] - resp = MU::Cloud::AWS::Database.getDatabaseById(args[:cloud_id], region: args[:region], credentials: args[:credentials]) - found[args[:cloud_id]] = resp if resp + begin + resp = MU::Cloud::AWS.rds(region: args[:region], credentials: args[:credentials]).describe_db_instances(db_instance_identifier: args[:cloud_id]).db_instances.first + return { args[:cloud_id] => resp } if resp + rescue Aws::RDS::Errors::DBInstanceNotFoundFault + end + begin + resp = MU::Cloud::AWS.rds(region: args[:region], credentials: args[:credentials]).describe_db_clusters(db_cluster_identifier: args[:cloud_id]).db_clusters.first + rescue Aws::RDS::Errors::DBClusterNotFoundFault + end + return { args[:cloud_id] => resp } if resp elsif args[:tag_value] MU::Cloud::AWS.rds(credentials: args[:credentials], region: args[:region]).describe_db_instances.db_instances.each { |db| resp = MU::Cloud::AWS.rds(credentials: args[:credentials], region: args[:region]).list_tags_for_resource( @@ -516,27 +524,6 @@ def allowHost(cidr) end end - # Retrieve the complete cloud provider description of a database instance. - # @param db_id [String]: The cloud provider's identifier for this database. - # @param region [String]: The cloud provider region - # @return [OpenStruct] - def self.getDatabaseById(db_id, region: MU.curRegion, credentials: nil) - raise MuError, "You must provide a db_id" if db_id.nil? - MU::Cloud::AWS.rds(region: region, credentials: credentials).describe_db_instances(db_instance_identifier: db_id).db_instances.first - rescue Aws::RDS::Errors::DBInstanceNotFound - # We're fine with this returning nil when searching for a database instance the doesn't exist. - end - - # Retrieve the complete cloud provider description of a database cluster. - # @param db_cluster_id [String]: The cloud provider's identifier for this database cluster. - # @param region [String]: The cloud provider region - # @return [OpenStruct] - def self.getDatabaseClusterById(db_cluster_id, region: MU.curRegion, credentials: nil) - MU::Cloud::AWS.rds(region: region, credentials: credentials).describe_db_clusters(db_cluster_identifier: db_cluster_id).db_clusters.first - rescue Aws::RDS::Errors::DBClusterNotFoundFault - # We're fine with this returning nil when searching for a database cluster the doesn't exist. - end - # Return the metadata for this ContainerCluster # @return [Hash] def notify @@ -547,19 +534,6 @@ def notify deploy_struct end - # Return the cloud descriptor for this database cluster or instance - def cloud_desc(use_cache: true) - return @cloud_desc_cache if @cloud_desc_cache and use_cache - - @cloud_desc_cache = if @config['create_cluster'] - MU::Cloud::AWS::Database.getDatabaseClusterById(@cloud_id, region: @config['region'], credentials: @credentials) - else - MU::Cloud::AWS::Database.getDatabaseById(@cloud_id, region: @config['region'], credentials: @credentials) - end - - @cloud_desc_cache - end - # Generate a snapshot from the database described in this instance. # @return [String]: The cloud provider's identifier for the snapshot. def createNewSnapshot @@ -1004,7 +978,12 @@ def self.can_read_replica?(db) if engine.nil? or !engine['features'] or !engine['features'][db['engine_version']] return true # we can't be sure, so let the API sort it out later end - engine['features'][db['engine_version']].include?(:supports_read_replica) + + if !engine['features'][db['engine_version']].include?(:supports_read_replica) + MU.log "Engine #{db['engine']} #{db['engine_version']} does not appear to support read replicas", MU::ERR + return false + end + true end private_class_method :can_read_replica? @@ -1031,7 +1010,7 @@ def self.valid_cloudwatch_logs?(db) def self.validate_engine(db) ok = true - if db['create_cluster'] or (db['engine'] and db['engine'].match(/aurora/)) or db["member_of_cluster"] + if db['create_cluster'] or db["member_of_cluster"] or (db['engine'] and db['engine'].match(/aurora/)) case db['engine'] when "mysql", "aurora", "aurora-mysql" if db["engine_version"].match(/^5\.6/) or db["cluster_mode"] == "serverless" @@ -1039,18 +1018,18 @@ def self.validate_engine(db) else db["engine"] = "aurora-mysql" end - when "postgres", "postgresql", "postgresql-mysql" + when /postgres/ db["engine"] = "aurora-postgresql" else ok = false - MU.log "Database #{db['name']}: Requested a clustered database, but engine #{db['engine']} is not supported for clustering", MU::ERR + MU.log "#{db['engine']} is not supported for clustering", MU::ERR end end engine_cfg = get_supported_engines(db['region'], db['credentials'], engine: db['engine']) if !engine_cfg or engine_cfg['versions'].empty? or engine_cfg['families'].empty? - MU.log "RDS engine #{db['engine']} has no supported versions in #{db['region']}", MU::ERR, details: engines.keys.sort + MU.log "RDS engine #{db['engine']} reports no supported versions in #{db['region']}", MU::ERR, details: engines.keys.sort return false end @@ -1076,7 +1055,6 @@ def self.validate_engine(db) end if (db['create_read_replica'] or db['read_replica_of']) and !can_read_replica?(db) - MU.log "Engine #{db['engine']} #{db['engine_version']} does not appear to support read replicas", MU::ERR ok = false end @@ -1477,7 +1455,7 @@ def self.should_delete?(tags, ignoremaster = false, deploy_id = MU.deploy_id, ma # @param db [OpenStruct]: The cloud provider's description of the database artifact # @return [void] def self.terminate_rds_instance(db, noop: false, skipsnapshots: false, region: MU.curRegion, deploy_id: MU.deploy_id, mu_name: nil, cloud_id: nil, credentials: nil) - db ||= MU::Cloud::AWS::Database.getDatabaseById(cloud_id, region: region, credentials: credentials) if cloud_id + db ||= MU::Cloud::AWS::Database.find(cloud_id: cloud_id, region: region, credentials: credentials).values.first if cloud_id db_obj ||= MU::MommaCat.findStray( "AWS", "database", @@ -1503,12 +1481,10 @@ def self.terminate_rds_instance(db, noop: false, skipsnapshots: false, region: M end - if db.db_instance_status != "available" - MU.retrier([], wait: 60, loop_if: Proc.new { %w{creating modifying backing-up}.include?(db.db_instance_status) }) { - db = MU::Cloud::AWS::Database.getDatabaseById(cloud_id, region: region, credentials: credentials) - return if db.nil? - } - end + MU.retrier([], wait: 60, loop_if: Proc.new { %w{creating modifying backing-up}.include?(db.db_instance_status) }) { + db = MU::Cloud::AWS::Database.find(cloud_id: cloud_id, region: region, credentials: credentials).values.first + return if db.nil? + } MU::Cloud::AWS::DNSZone.genericMuDNSEntry(name: cloud_id, target: db.endpoint.address, cloudclass: MU::Cloud::Database, delete: true) if !noop @@ -1538,7 +1514,7 @@ def self.terminate_rds_instance(db, noop: false, skipsnapshots: false, region: M MU::Cloud::AWS.rds(region: region, credentials: credentials).delete_db_instance(params) } MU.retrier([], wait: 10, ignoreme: [Aws::RDS::Errors::DBInstanceNotFound]) { - del_db = MU::Cloud::AWS::Database.getDatabaseById(cloud_id, region: region) + del_db = MU::Cloud::AWS::Database.find(cloud_id: cloud_id, region: region).values.first break if del_db.nil? or del_db.db_instance_status == "deleted" } end @@ -1573,7 +1549,7 @@ def self.terminate_rds_instance(db, noop: false, skipsnapshots: false, region: M # @return [void] def self.terminate_rds_cluster(cluster, noop: false, skipsnapshots: false, region: MU.curRegion, deploy_id: MU.deploy_id, mu_name: nil, cloud_id: nil, credentials: nil) - cluster ||= MU::Cloud::AWS::Database.getDatabaseClusterById(cloud_id, region: region, credentials: credentials) if cloud_id + cluster ||= MU::Cloud::AWS::Database.find(cloud_id: cloud_id, region: region, credentials: credentials).values.first if cloud_id cluster_obj ||= MU::MommaCat.findStray( "AWS", "database", @@ -1594,7 +1570,7 @@ def self.terminate_rds_cluster(cluster, noop: false, skipsnapshots: false, regio unless cluster.status == "available" loop do MU.log "Waiting for #{cloud_id} to be in a removable state...", MU::NOTICE - cluster = MU::Cloud::AWS::Database.getDatabaseClusterById(cloud_id, region: region, credentials: credentials) + cluster = MU::Cloud::AWS::Database.find(cloud_id: cloud_id, region: region, credentials: credentials).values.first break unless %w{creating modifying backing-up}.include?(cluster.status) sleep 60 end @@ -1638,15 +1614,13 @@ def self.terminate_rds_cluster(cluster, noop: false, skipsnapshots: false, regio unless noop loop do MU.log "Waiting for #{cloud_id} to terminate", MU::NOTICE - cluster = MU::Cloud::AWS::Database.getDatabaseClusterById(cloud_id, region: region, credentials: credentials) + cluster = MU::Cloud::AWS::Database.find(cloud_id: cloud_id, region: region, credentials: credentials).values.first break unless cluster sleep 30 end end end - # We're wating until getDatabaseClusterById returns nil. This assumes the database cluster object doesn't linger around in "deleted" state for a while. - # Cleanup the cluster vault groomer = if cluster_obj From 597ecf6e965a1a423cb67dceb563d0a040581c0d Mon Sep 17 00:00:00 2001 From: John Stange Date: Sun, 29 Mar 2020 21:23:22 -0400 Subject: [PATCH 027/124] AWS::Database: fix snapshot builds; add oracle tests --- modules/mu/clouds/aws/database.rb | 96 +++++++++++++++---------------- modules/mu/config/database.rb | 2 +- modules/tests/rds.yaml | 29 ++++++++++ 3 files changed, 75 insertions(+), 52 deletions(-) diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index 56ca04854..dd2f9c2a1 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -127,7 +127,32 @@ def create createDBParameterGroup(true) end - createDbCluster + @config["cluster_identifier"] ||= @cloud_id + + if @config['creation_style'] == "point_in_time" + create_point_in_time + else + create_basic + end + + wait_until_available + + if %w{existing_snapshot new_snapshot point_in_time}.include?(@config["creation_style"]) + modify_db_cluster_struct = { + db_cluster_identifier: @cloud_id, + apply_immediately: true, + backup_retention_period: @config["backup_retention_period"], + db_cluster_parameter_group_name: @config["parameter_group_name"], + master_user_password: @config["password"], + preferred_backup_window: @config["preferred_backup_window"] + } + + modify_db_cluster_struct[:preferred_maintenance_window] = @config["preferred_maintenance_window"] if @config["preferred_maintenance_window"] + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_cluster(modify_db_cluster_struct) + wait_until_available + end + + do_naming elsif @config["add_cluster_node"] add_cluster_node else @@ -147,10 +172,12 @@ def self.find(**args) found = {} if args[:cloud_id] - begin - resp = MU::Cloud::AWS.rds(region: args[:region], credentials: args[:credentials]).describe_db_instances(db_instance_identifier: args[:cloud_id]).db_instances.first - return { args[:cloud_id] => resp } if resp - rescue Aws::RDS::Errors::DBInstanceNotFoundFault + if !args[:cluster] + begin + resp = MU::Cloud::AWS.rds(region: args[:region], credentials: args[:credentials]).describe_db_instances(db_instance_identifier: args[:cloud_id]).db_instances.first + return { args[:cloud_id] => resp } if resp + rescue Aws::RDS::Errors::DBInstanceNotFound + end end begin resp = MU::Cloud::AWS.rds(region: args[:region], credentials: args[:credentials]).describe_db_clusters(db_cluster_identifier: args[:cloud_id]).db_clusters.first @@ -269,43 +296,6 @@ def genericParams params end - # Create the database cluster described in this instance - # @return [String]: The cloud provider's identifier for this database cluster. - def createDbCluster - @config["cluster_identifier"] ||= @cloud_id - - if @config['creation_style'] == "point_in_time" - create_point_in_time - else - create_basic - end - - wait_until_available - - if %w{existing_snapshot new_snapshot point_in_time}.include?(@config["creation_style"]) - modify_db_cluster_struct = { - db_cluster_identifier: @cloud_id, - apply_immediately: true, - backup_retention_period: @config["backup_retention_period"], - db_cluster_parameter_group_name: @config["parameter_group_name"], - master_user_password: @config["password"], - preferred_backup_window: @config["preferred_backup_window"] - } - - modify_db_cluster_struct[:preferred_maintenance_window] = @config["preferred_maintenance_window"] if @config["preferred_maintenance_window"] - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_cluster(modify_db_cluster_struct) - - MU.retrier(wait: 10, max: 240, loop_if: Proc.new { cloud_desc(use_cache: false).status != "available" }) { |retries, _wait| - if retries > 0 and retries % 10 == 0 - MU.log "Waiting for modifications on RDS cluster #{@cloud_id}...", MU::NOTICE - end - } - end - - do_naming - @cloud_id - end - # Create a subnet group for a database. def createSubnetGroup # Finding subnets, creating security groups/adding holes, create subnet group @@ -539,6 +529,11 @@ def notify def createNewSnapshot snap_id = @deploy.getResourceName(@config["name"]) + Time.new.strftime("%M%S").to_s src_ref = MU::Config::Ref.get(@config["source"]) + src_ref.kitten + if !src_ref.id + MU.log "Failed to get an id from reference for creating a snapshot", MU::ERR, details: @config['source'] + raise "Failed to get an id from reference for creating a snapshot" + end MU.retrier([Aws::RDS::Errors::InvalidDBInstanceState, Aws::RDS::Errors::InvalidDBClusterStateFault], wait: 60, max: 10) { if @config["create_cluster"] @@ -1026,6 +1021,8 @@ def self.validate_engine(db) end end + db["engine"] = "oracle-se2" if db["engine"] == "oracle" + engine_cfg = get_supported_engines(db['region'], db['credentials'], engine: db['engine']) if !engine_cfg or engine_cfg['versions'].empty? or engine_cfg['families'].empty? @@ -1123,8 +1120,8 @@ def create_basic params = genericParams params[:storage_encrypted] = @config["storage_encrypted"] params[:master_user_password] = @config['password'] - params[:vpc_security_group_ids] = @config["vpc_security_group_ids"] params[:engine_version] = @config["engine_version"] + params[:vpc_security_group_ids] = @config["vpc_security_group_ids"] params[:preferred_maintenance_window] = @config["preferred_maintenance_window"] if @config["preferred_maintenance_window"] if @config['create_cluster'] @@ -1153,7 +1150,9 @@ def create_basic MU.retrier([Aws::RDS::Errors::InvalidParameterValue], max: 5, wait: 10) { if %w{existing_snapshot new_snapshot}.include?(@config["creation_style"]) + [:storage_encrypted, :master_user_password, :engine_version, :allocated_storage, :backup_retention_period, :preferred_backup_window, :master_username, :db_name, :database_name].each { |p| params.delete(p) } MU.log "Creating database #{@config['create_cluster'] ? "cluster" : "instance" } #{@cloud_id} from snapshot #{@config["snapshot_id"]}" + pp params if @config['create_cluster'] MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).restore_db_cluster_from_snapshot(params) else @@ -1191,7 +1190,7 @@ def create_point_in_time params[:use_latest_restorable_time] = true if @config['restore_time'] == "latest" - MU.retrier([Aws::RDS::Errors::InvalidParameterValue], max: 5, wait: 10) { + MU.retrier([Aws::RDS::Errors::InvalidParameterValue], max: 6, wait: 20) { MU.log "Creating database #{@config['create_cluster'] ? "cluster" : "instance" } #{@cloud_id} based on point in time backup #{@config['restore_time']} of #{@config['source'].id}" if @config['create_cluster'] MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).restore_db_cluster_to_point_in_time(params) @@ -1233,7 +1232,7 @@ def create_read_replica end } - MU.retrier([Aws::RDS::Errors::InvalidParameterValue, Aws::RDS::Errors::DBSubnetGroupNotAllowedFault], max: 5, wait: 10, on_retry: on_retry) { + MU.retrier([Aws::RDS::Errors::InvalidDBInstanceState, Aws::RDS::Errors::InvalidParameterValue, Aws::RDS::Errors::DBSubnetGroupNotAllowedFault], max: 10, wait: 30, on_retry: on_retry) { MU.log "Creating read replica database instance #{@cloud_id} for #{@config['source'].id}" MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_instance_read_replica(params) } @@ -1323,12 +1322,7 @@ def createDb end MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_instance(mod_config) - - MU.retrier(wait: 10, max: 240, loop_if: Proc.new { cloud_desc(use_cache: false).db_instance_status != "available" }) { |retries, _wait| - if retries > 0 and retries % 10 == 0 - MU.log "Waiting for modifications on RDS database #{@cloud_id}...", MU::NOTICE - end - } + wait_until_available end diff --git a/modules/mu/config/database.rb b/modules/mu/config/database.rb index 5060a994d..27d92198b 100644 --- a/modules/mu/config/database.rb +++ b/modules/mu/config/database.rb @@ -53,7 +53,7 @@ def self.schema }, "engine_version" => {"type" => "string"}, "engine" => { - "enum" => ["mysql", "postgres", "oracle-se1", "oracle-se2", "oracle-se", "oracle-ee", "sqlserver-ee", "sqlserver-se", "sqlserver-ex", "sqlserver-web", "aurora", "mariadb"], + "enum" => ["mysql", "postgres", "oracle", "oracle-se1", "oracle-se2", "oracle-se", "oracle-ee", "sqlserver-ee", "sqlserver-se", "sqlserver-ex", "sqlserver-web", "aurora", "mariadb"], "type" => "string" }, "add_cluster_node" => { diff --git a/modules/tests/rds.yaml b/modules/tests/rds.yaml index b30943ecf..e14ccec4d 100644 --- a/modules/tests/rds.yaml +++ b/modules/tests/rds.yaml @@ -26,6 +26,14 @@ databases: - slowquery multi_az_on_create: true master_user: Bob +- name: mysql-from-snap + size: db.t2.small + engine: mysql + vpc: + name: rdstests + creation_style: new_snapshot + source: + name: mysql-base - name: mysql-point-in-time creation_style: point_in_time size: db.t2.micro @@ -37,3 +45,24 @@ databases: name: mysql-base vpc: name: rdstests +- name: oracle-base + size: db.m5.large + engine: oracle + vpc: + name: rdstests +- name: oracle-from-snap + size: db.m5.large + engine: oracle + vpc: + name: rdstests + creation_style: new_snapshot + source: + name: oracle-base +- name: oracle-point-in-time + size: db.m5.large + engine: oracle + vpc: + name: rdstests + creation_style: point_in_time + source: + name: oracle-base From 33a0a6f65d7ac68809a741da5c66af588ccabbc3 Mon Sep 17 00:00:00 2001 From: John Stange Date: Mon, 30 Mar 2020 13:49:47 -0400 Subject: [PATCH 028/124] AWS::Database: mix in SQL Server, and address some validation problems that it introduces; add cluster_mode multimaster --- modules/mu/clouds/aws/database.rb | 44 +++++++++++++--------------- modules/tests/rds.yaml | 48 +++++++++++++++++++++++++------ 2 files changed, 60 insertions(+), 32 deletions(-) diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index dd2f9c2a1..468a6aa2d 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -187,9 +187,9 @@ def self.find(**args) elsif args[:tag_value] MU::Cloud::AWS.rds(credentials: args[:credentials], region: args[:region]).describe_db_instances.db_instances.each { |db| resp = MU::Cloud::AWS.rds(credentials: args[:credentials], region: args[:region]).list_tags_for_resource( - resource_name: MU::Cloud::AWS::Database.getARN(db.db_instance_identifier, "db", "rds", region: args[:region], credentials: args[:credentials]) + resource_name: MU::Cloud::AWS::Database.getARN(db.db_instance_identifier, "db", "rds", region: args[:region], credentials: args[:credentials]) ) - if resp && resp.tag_list && !resp.tag_list.empty? + if resp and resp.tag_list resp.tag_list.each { |tag| found[db.db_instance_identifier] = db if tag.key == args[:tag_key] and tag.value == args[:tag_value] } @@ -646,29 +646,18 @@ def self.cleanup(noop: false, ignoremaster: false, credentials: nil, region: MU. threads = threaded_resource_purge(:describe_db_subnet_groups, :db_subnet_groups, :db_subnet_group_name, "subgrp", region, credentials, ignoremaster) { |id| MU.log "Deleting RDS subnet group #{id}" - if !noop - MU.retrier([Aws::RDS::Errors::InvalidDBSubnetGroupStateFault], wait: 30, max: 5, ignoreme: [Aws::RDS::Errors::DBSubnetGroupNotFoundFault]) { - MU::Cloud::AWS.rds(region: region).delete_db_subnet_group(db_subnet_group_name: id) - } - end - } - - threads.concat threaded_resource_purge(:describe_db_parameter_groups, :db_parameter_groups, :db_parameter_group_name, "pg", region, credentials, ignoremaster) { |id| - MU.log "Deleting RDS database parameter group #{id}" - if !noop - MU.retrier([Aws::RDS::Errors::InvalidDBParameterGroupState], wait: 30, max: 5, ignoreme: [Aws::RDS::Errors::DBParameterGroupNotFound]) { - MU::Cloud::AWS.rds(region: region).delete_db_parameter_group(db_parameter_group_name: id) - } - end + MU.retrier([Aws::RDS::Errors::InvalidDBSubnetGroupStateFault], wait: 30, max: 5, ignoreme: [Aws::RDS::Errors::DBSubnetGroupNotFoundFault]) { + MU::Cloud::AWS.rds(region: region).delete_db_subnet_group(db_subnet_group_name: id) if !noop + } } - threads.concat threaded_resource_purge(:describe_db_cluster_parameter_groups, :db_cluster_parameter_groups, :db_cluster_parameter_group_name, "pg", region, credentials, ignoremaster) { |id| - MU.log "Deleting RDS cluster parameter group #{id}" - if !noop + ["db", "db_cluster"].each { |type| + threads.concat threaded_resource_purge("describe_#{type}_parameter_groups".to_sym, "#{type}_parameter_groups".to_sym, "#{type}_parameter_group_name".to_sym, "pg", region, credentials, ignoremaster) { |id| + MU.log "Deleting RDS #{type} parameter group #{id}" MU.retrier([Aws::RDS::Errors::InvalidDBParameterGroupState], wait: 30, max: 5, ignoreme: [Aws::RDS::Errors::DBParameterGroupNotFound]) { - MU::Cloud::AWS.rds(region: region).delete_db_cluster_parameter_group(db_cluster_parameter_group_name: id) + MU::Cloud::AWS.rds(region: region).send("delete_#{type}_parameter_group", { "#{type}_parameter_group_name".to_sym => id }) if !noop } - end + } } # Wait for all of the databases subnet/parameter groups to finish cleanup before proceeding @@ -718,7 +707,7 @@ def self.schema(_config) "cluster_mode" => { "type" => "string", "description" => "The DB engine mode of the DB cluster", - "enum" => ["provisioned", "serverless", "parallelquery", "global"], + "enum" => ["provisioned", "serverless", "parallelquery", "global", "multimaster"], "default" => "provisioned" }, "storage_type" => { @@ -949,7 +938,7 @@ def self.validateConfig(db, _configurator) if !db['vpc'] db["vpc"] = MU::Cloud::AWS::VPC.defaultVpc(db['region'], db['credentials']) - if db['vpc'] + if db['vpc'] and !(db['engine'].match(/sqlserver/) and db['create_read_replica']) MU.log "Using default VPC for database '#{db['name']}; this sets 'publicly_accessible' to true.", MU::WARN db['publicly_accessible'] = true end @@ -961,6 +950,10 @@ def self.validateConfig(db, _configurator) MU.log "Setting publicly_accessible to false on database '#{db['name']}', since deploying into private subnets.", MU::NOTICE db['publicly_accessible'] = false end + if db['engine'].match(/sqlserver/) and db['create_read_replica'] + MU.log "SQL Server does not support read replicas in VPC deployments", MU::ERR + ok = false + end end ok @@ -1008,8 +1001,10 @@ def self.validate_engine(db) if db['create_cluster'] or db["member_of_cluster"] or (db['engine'] and db['engine'].match(/aurora/)) case db['engine'] when "mysql", "aurora", "aurora-mysql" - if db["engine_version"].match(/^5\.6/) or db["cluster_mode"] == "serverless" + if (db['engine_version'] and db["engine_version"].match(/^5\.6/)) or db["cluster_mode"] == "serverless" db["engine"] = "aurora" + db["engine_version"] = "5.6" + db['publicly_accessible'] = false else db["engine"] = "aurora-mysql" end @@ -1022,6 +1017,7 @@ def self.validate_engine(db) end db["engine"] = "oracle-se2" if db["engine"] == "oracle" + db["engine"] = "sqlserver-ex" if db["engine"] == "sqlserver" engine_cfg = get_supported_engines(db['region'], db['credentials'], engine: db['engine']) diff --git a/modules/tests/rds.yaml b/modules/tests/rds.yaml index e14ccec4d..b3c0296ec 100644 --- a/modules/tests/rds.yaml +++ b/modules/tests/rds.yaml @@ -16,9 +16,18 @@ databases: vpc: name: rdstests master_user: Bob -- name: mysql-base + +#- name: mysqlcluster +# size: db.t3.medium +# engine: aurora +# cluster_mode: serverless +# create_cluster: true +# vpc: +# name: rdstests + +- name: maria-base size: db.t2.small - engine: mysql + engine: mariadb vpc: name: rdstests create_read_replica: true @@ -26,25 +35,26 @@ databases: - slowquery multi_az_on_create: true master_user: Bob -- name: mysql-from-snap +- name: maria-from-snap size: db.t2.small - engine: mysql + engine: mariadb vpc: name: rdstests creation_style: new_snapshot source: - name: mysql-base -- name: mysql-point-in-time + name: maria-base +- name: maria-point-in-time creation_style: point_in_time size: db.t2.micro - engine: mysql + engine: mariadb cloudwatch_logs: - error - general source: - name: mysql-base + name: maria-base vpc: name: rdstests + - name: oracle-base size: db.m5.large engine: oracle @@ -66,3 +76,25 @@ databases: creation_style: point_in_time source: name: oracle-base + +- name: sqlserver-base + size: db.t2.small + engine: sqlserver-ex + vpc: + name: rdstests +- name: sqlserver-from-snap + size: db.t2.small + engine: sqlserver-ex + vpc: + name: rdstests + creation_style: new_snapshot + source: + name: sqlserver-base +- name: sqlserver-point-in-time + size: db.t2.small + engine: sqlserver-ex + vpc: + name: rdstests + creation_style: point_in_time + source: + name: sqlserver-base From 1e53eacca9949d89b321cc15af8853cba4eddea0 Mon Sep 17 00:00:00 2001 From: John Stange Date: Mon, 30 Mar 2020 18:10:01 -0400 Subject: [PATCH 029/124] AWS::Database: correct cross-region replication; incorporate parameter groups into testing --- modules/mu/clouds/aws/database.rb | 29 +++++++++++++++++------------ modules/mu/clouds/aws/vpc.rb | 2 +- modules/mu/config/database.rb | 6 +++++- modules/tests/rds.yaml | 10 +++++++++- 4 files changed, 32 insertions(+), 15 deletions(-) diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index 468a6aa2d..b78770ca7 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -116,7 +116,7 @@ def create createNewSnapshot end - @config["subnet_group_name"] = @mu_name + @config["subnet_group_name"] = @mu_name if @vpc if @config["create_cluster"] getPassword @@ -252,10 +252,10 @@ def genericParams paramhash = { db_cluster_identifier: @cloud_id, engine: @config["engine"], - db_subnet_group_name: @config["subnet_group_name"].downcase, vpc_security_group_ids: @config["vpc_security_group_ids"], tags: @tags.each_key.map { |k| { :key => k, :value => @tags[k] } } } + paramhash[:db_subnet_group_name] = @config["subnet_group_name"].downcase if @vpc if @config['cloudwatch_logs'] paramhash[:enable_cloudwatch_logs_exports ] = @config['cloudwatch_logs'] end @@ -603,10 +603,16 @@ def self.quality # @return [Array] def self.threaded_resource_purge(describe_method, list_method, id_method, arn_type, region, credentials, ignoremaster) deletia = [] + resp = MU::Cloud::AWS.rds(credentials: credentials, region: region).send(describe_method) resp.send(list_method).each { |resource| - arn = MU::Cloud::AWS::Database.getARN(resource.send(id_method), arn_type, "rds", region: region, credentials: credentials) - tags = MU::Cloud::AWS.rds(credentials: credentials, region: region).list_tags_for_resource(resource_name: arn).tag_list + begin + arn = MU::Cloud::AWS::Database.getARN(resource.send(id_method), arn_type, "rds", region: region, credentials: credentials) + tags = MU::Cloud::AWS.rds(credentials: credentials, region: region).list_tags_for_resource(resource_name: arn).tag_list + rescue Aws::RDS::Errors::InvalidParameterValue => e + MU.log "Failed to fetch ARN or tags of resource via #{id_method.to_s}", MU::WARN, details: resource + next + end if should_delete?(tags, ignoremaster) deletia << resource.send(id_method) @@ -905,7 +911,6 @@ def self.validateConfig(db, _configurator) ok = false if !validate_source_data(db) - ok = false if !validate_engine(db) db["license_model"] ||= @@ -919,15 +924,14 @@ def self.validateConfig(db, _configurator) ok = false if !validate_master_password(db) - if db["multi_az_on_create"] and db["multi_az_on_deploy"] MU.log "Both of multi_az_on_create and multi_az_on_deploy cannot be true", MU::ERR ok = false end if (db["db_parameter_group_parameters"] or db["cluster_parameter_group_parameters"]) and db["parameter_group_family"].nil? - MU.log "parameter_group_family must be set when setting db_parameter_group_parameters", MU::ERR - ok = false + engine = get_supported_engines(db['region'], db['credentials'], engine: db['engine']) + db["parameter_group_family"] = engine['raw'][db['engine_version']].db_parameter_group_family end # Adding rules for Database instance storage. This varies depending on storage type and database type. @@ -1063,7 +1067,7 @@ def add_basic getPassword if @config['source'].nil? or @config['region'] != @config['source'].region - createSubnetGroup + createSubnetGroup if @vpc else MU.log "Note: Read Replicas automatically reside in the same subnet group as the source database, if they're both in the same region. This replica may not land in the VPC you intended.", MU::WARN end @@ -1100,7 +1104,7 @@ def add_cluster_node raise MuError, "Couldn't resolve cluster node reference to a unique live Database in #{@mu_name}" if cluster.nil? || cluster.cloud_id.nil? @config['cluster_identifier'] = cluster.cloud_id.downcase # We're overriding @config["subnet_group_name"] because we need each cluster member to use the cluster's subnet group instead of a unique subnet group - @config["subnet_group_name"] = @config['cluster_identifier'] + @config["subnet_group_name"] = @config['cluster_identifier'] if @vpc @config["creation_style"] = "new" if @config["creation_style"] != "new" if @config.has_key?("parameter_group_family") @@ -1230,6 +1234,7 @@ def create_read_replica MU.retrier([Aws::RDS::Errors::InvalidDBInstanceState, Aws::RDS::Errors::InvalidParameterValue, Aws::RDS::Errors::DBSubnetGroupNotAllowedFault], max: 10, wait: 30, on_retry: on_retry) { MU.log "Creating read replica database instance #{@cloud_id} for #{@config['source'].id}" +pp params MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_instance_read_replica(params) } end @@ -1503,9 +1508,9 @@ def self.terminate_rds_instance(db, noop: false, skipsnapshots: false, region: M MU.retrier([Aws::RDS::Errors::InvalidDBInstanceState, Aws::RDS::Errors::DBSnapshotAlreadyExists], wait: 60, max: 20, on_retry: on_retry) { MU::Cloud::AWS.rds(region: region, credentials: credentials).delete_db_instance(params) } - MU.retrier([], wait: 10, ignoreme: [Aws::RDS::Errors::DBInstanceNotFound]) { + del_db = nil + MU.retrier([], wait: 10, ignoreme: [Aws::RDS::Errors::DBInstanceNotFound], loop_if: Proc.new { del_db and del_db.db_instance_status != "deleted" }) { del_db = MU::Cloud::AWS::Database.find(cloud_id: cloud_id, region: region).values.first - break if del_db.nil? or del_db.db_instance_status == "deleted" } end end diff --git a/modules/mu/clouds/aws/vpc.rb b/modules/mu/clouds/aws/vpc.rb index 3d3f9673a..f5f037a22 100644 --- a/modules/mu/clouds/aws/vpc.rb +++ b/modules/mu/clouds/aws/vpc.rb @@ -1267,7 +1267,7 @@ def self.getDefaultSg(vpc_id, region: MU.curRegion, credentials: nil) # Try to locate the default VPC for a region, and return a BoK-style # config fragment for something that might want to live in it. - def defaultVpc(region, credentials) + def self.defaultVpc(region, credentials) cfg_fragment = nil MU::Cloud::AWS.ec2(region: region, credentials: credentials).describe_vpcs.vpcs.each { |vpc| if vpc.is_default diff --git a/modules/mu/config/database.rb b/modules/mu/config/database.rb index 27d92198b..4ae5a4d55 100644 --- a/modules/mu/config/database.rb +++ b/modules/mu/config/database.rb @@ -334,11 +334,15 @@ def self.validate(db, configurator) replica["credentials"] = db["credentials"] replica['create_read_replica'] = false replica["create_cluster"] = false + replica["region"] = db['read_replica_region'] + if db['region'] != replica['region'] + replica.delete("vpc") + end replica['read_replica_of'] = { "name" => db['name'], "cloud" => db['cloud'], + "region" => db['region'], "credentials" => db['credentials'], - "region" => db['read_replica_region'] || db['region'] } replica['dependencies'] << { "type" => "database", diff --git a/modules/tests/rds.yaml b/modules/tests/rds.yaml index b3c0296ec..498ed4f1b 100644 --- a/modules/tests/rds.yaml +++ b/modules/tests/rds.yaml @@ -1,4 +1,4 @@ -# clouds: AWS + clouds: AWS --- appname: smoketest vpcs: @@ -13,6 +13,9 @@ databases: backup_retention_period: 10 cluster_node_count: 2 create_cluster: true + cluster_parameter_group_parameters: + - name: log_disconnections + value: "0" vpc: name: rdstests master_user: Bob @@ -28,9 +31,14 @@ databases: - name: maria-base size: db.t2.small engine: mariadb + db_parameter_group_parameters: + - name: autocommit + value: "0" vpc: name: rdstests + region: us-east-1 create_read_replica: true + read_replica_region: us-east-2 cloudwatch_logs: - slowquery multi_az_on_create: true From 95c7eacf877215da559d9797d73ed68ceee4a3a3 Mon Sep 17 00:00:00 2001 From: John Stange Date: Mon, 30 Mar 2020 23:35:55 -0400 Subject: [PATCH 030/124] AWS::Database: fix test typo, catch some security group stupidity in cross-region replicas --- modules/mu/clouds/aws/database.rb | 17 +++++++++++++---- modules/tests/rds.yaml | 2 +- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index b78770ca7..93aa7e196 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -1152,7 +1152,6 @@ def create_basic if %w{existing_snapshot new_snapshot}.include?(@config["creation_style"]) [:storage_encrypted, :master_user_password, :engine_version, :allocated_storage, :backup_retention_period, :preferred_backup_window, :master_username, :db_name, :database_name].each { |p| params.delete(p) } MU.log "Creating database #{@config['create_cluster'] ? "cluster" : "instance" } #{@cloud_id} from snapshot #{@config["snapshot_id"]}" - pp params if @config['create_cluster'] MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).restore_db_cluster_from_snapshot(params) else @@ -1234,7 +1233,6 @@ def create_read_replica MU.retrier([Aws::RDS::Errors::InvalidDBInstanceState, Aws::RDS::Errors::InvalidParameterValue, Aws::RDS::Errors::DBSubnetGroupNotAllowedFault], max: 10, wait: 30, on_retry: on_retry) { MU.log "Creating read replica database instance #{@cloud_id} for #{@config['source'].id}" -pp params MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_instance_read_replica(params) } end @@ -1306,9 +1304,13 @@ def createDb if %w{existing_snapshot new_snapshot point_in_time}.include?(@config["creation_style"]) or @config["read_replica_of"] mod_config = { db_instance_identifier: @cloud_id, - vpc_security_group_ids: @config["vpc_security_group_ids"], apply_immediately: true } + if !@config["read_replica_of"] or @config['region'] == @config['source'].region + mod_config[:vpc_security_group_ids] = @config["vpc_security_group_ids"] + end + + if !@config["read_replica_of"] mod_config[:preferred_backup_window] = @config["preferred_backup_window"] mod_config[:backup_retention_period] = @config["backup_retention_period"] @@ -1322,7 +1324,14 @@ def createDb mod_config[:preferred_maintenance_window] = @config["preferred_maintenance_window"] end - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_instance(mod_config) + begin + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_instance(mod_config) + rescue Aws::RDS::Errors::InvalidParameterValue => e + if e.message.match(/Invalid security group/) + MU.log e.message+" modifying "+@cloud_id, MU::ERR, details: mod_config + end + raise e + end wait_until_available end diff --git a/modules/tests/rds.yaml b/modules/tests/rds.yaml index 498ed4f1b..23c6bd02a 100644 --- a/modules/tests/rds.yaml +++ b/modules/tests/rds.yaml @@ -1,4 +1,4 @@ - clouds: AWS +# clouds: AWS --- appname: smoketest vpcs: From 2341e9247f4dbc34c52dd162d68b873883539e4e Mon Sep 17 00:00:00 2001 From: Mu Administrator Date: Tue, 31 Mar 2020 18:18:56 -0400 Subject: [PATCH 031/124] mu-tools, MommaCat, AWS::Server: fix a bunch of dumb problems that boil down to Chef-based disk creation not working --- cookbooks/mu-tools/libraries/helper.rb | 2 +- cookbooks/mu-tools/recipes/apply_security.rb | 28 ++++++++++---------- modules/mu/clouds/aws/server.rb | 2 +- modules/mu/mommacat/daemon.rb | 2 +- 4 files changed, 17 insertions(+), 17 deletions(-) diff --git a/cookbooks/mu-tools/libraries/helper.rb b/cookbooks/mu-tools/libraries/helper.rb index 58dc12eae..08257f8c4 100644 --- a/cookbooks/mu-tools/libraries/helper.rb +++ b/cookbooks/mu-tools/libraries/helper.rb @@ -236,7 +236,7 @@ def mommacat_request(action, arg) response = nil begin secret = get_deploy_secret - if secret.nil? + if secret.nil? or secret.empty? raise "Failed to fetch deploy secret, and I can't communicate with Momma Cat without it" end diff --git a/cookbooks/mu-tools/recipes/apply_security.rb b/cookbooks/mu-tools/recipes/apply_security.rb index 33157c1ab..1fa60db1c 100644 --- a/cookbooks/mu-tools/recipes/apply_security.rb +++ b/cookbooks/mu-tools/recipes/apply_security.rb @@ -252,21 +252,21 @@ # end # 6.3 Configure PAM # 6.3.2 Set Password Creation Requirement Parameters Using pam_cracklib - template "/etc/pam.d/password-auth-local" do - source "etc_pamd_password-auth.erb" - mode 0644 - end - link "/etc/pam.d/password-auth" do - to "/etc/pam.d/password-auth-local" - end +# template "/etc/pam.d/password-auth-local" do +# source "etc_pamd_password-auth.erb" +# mode 0644 +# end +# link "/etc/pam.d/password-auth" do +# to "/etc/pam.d/password-auth-local" +# end #6.3.3 Set Lockout for Failed Password Attempts - template "/etc/pam.d/system-auth-local" do - source "etc_pamd_system-auth.erb" - mode 0644 - end - link "/etc/pam.d/system-auth" do - to "/etc/pam.d/system-auth-local" - end +# template "/etc/pam.d/system-auth-local" do +# source "etc_pamd_system-auth.erb" +# mode 0644 +# end +# link "/etc/pam.d/system-auth" do +# to "/etc/pam.d/system-auth-local" +# end #SV-50303r1_rule/SV-50304r1_rule execute "chown root:root /etc/shadow" diff --git a/modules/mu/clouds/aws/server.rb b/modules/mu/clouds/aws/server.rb index 7c5dd6a8c..8c5b09f84 100644 --- a/modules/mu/clouds/aws/server.rb +++ b/modules/mu/clouds/aws/server.rb @@ -1318,7 +1318,7 @@ def addVolume(dev, size, type: "gp2", delete_on_termination: false) if @deploy MU::Cloud::AWS.createStandardTags( - resource_id, + creation.volume_id, region: @config['region'], credentials: @config['credentials'], optional: @config['optional_tags'], diff --git a/modules/mu/mommacat/daemon.rb b/modules/mu/mommacat/daemon.rb index dd7885f76..0d3cb9046 100644 --- a/modules/mu/mommacat/daemon.rb +++ b/modules/mu/mommacat/daemon.rb @@ -33,7 +33,7 @@ def authKey(ciphertext) my_key = OpenSSL::PKey::RSA.new(@private_key) begin - if my_key.private_decrypt(ciphertext).force_encoding("UTF-8") == @deploy_secret.force_encoding("UTF-8") + if my_key.private_decrypt(ciphertext).force_encoding("UTF-8").chomp == @deploy_secret.force_encoding("UTF-8").chomp MU.log "Matched ciphertext for #{MU.deploy_id}", MU::INFO return true else From a658df5a5b83ab8381cf3cfb91d0d4f751504c9b Mon Sep 17 00:00:00 2001 From: ICRAS Mu Administrator Date: Tue, 31 Mar 2020 18:29:25 -0400 Subject: [PATCH 032/124] AWS::Database: typo in password length validation --- modules/Gemfile.lock | 8 ++++---- modules/mu/clouds/aws/database.rb | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/modules/Gemfile.lock b/modules/Gemfile.lock index 601421425..775badc02 100644 --- a/modules/Gemfile.lock +++ b/modules/Gemfile.lock @@ -54,7 +54,7 @@ GEM public_suffix (>= 2.0.2, < 4.0) ast (2.4.0) aws-eventstream (1.0.3) - aws-sdk-core (2.11.471) + aws-sdk-core (2.11.480) aws-sigv4 (~> 1.0) jmespath (~> 1.0) aws-sigv4 (1.1.1) @@ -691,7 +691,7 @@ GEM mini_portile2 (~> 2.4.0) nori (2.6.0) numerizer (0.1.1) - octokit (4.17.0) + octokit (4.18.0) faraday (>= 0.9) sawyer (~> 0.8.0, >= 0.5.3) ohai (14.14.0) @@ -708,7 +708,7 @@ GEM wmi-lite (~> 1.0) openssl-oaep (0.1.0) optimist (3.0.0) - os (1.0.1) + os (1.1.0) paint (1.0.1) parallel (1.19.1) parser (2.7.0.5) @@ -776,7 +776,7 @@ GEM rspec-its specinfra (~> 2.72) sfl (2.3) - signet (0.13.0) + signet (0.13.2) addressable (~> 2.3) faraday (>= 0.17.3, < 2.0) jwt (>= 1.5, < 3.0) diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index 93aa7e196..a71b9c797 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -893,7 +893,7 @@ def self.validate_master_password(db) pw end - if pw and (pw.length < 8 or pw.match(/[\/\\@\s]/) or pw > maxlen) + if pw and (pw.length < 8 or pw.match(/[\/\\@\s]/) or pw.length > maxlen) MU.log "Database password specified in 'password' or 'auth_vault' doesn't meet RDS requirements. Must be between 8 and #{maxlen.to_s} chars and have only ASCII characters other than /, @, \", or [space].", MU::ERR return false end From 1637e12c5569057b7cdbf8492171159244b6bd70 Mon Sep 17 00:00:00 2001 From: Mu Administrator Date: Wed, 1 Apr 2020 11:57:49 -0400 Subject: [PATCH 033/124] Config: fix some subtly incorrect merging of cloud-specific config schema --- modules/mu/config/schema_helpers.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/mu/config/schema_helpers.rb b/modules/mu/config/schema_helpers.rb index 9a3b874a5..05564db28 100644 --- a/modules/mu/config/schema_helpers.rb +++ b/modules/mu/config/schema_helpers.rb @@ -302,7 +302,7 @@ def applySchemaDefaults(conf_chunk = config, schema_chunk = schema, depth = 0, s _toplevel_required, cloudschema = cloudclass.schema(self) newschema = schema_chunk["items"].dup - newschema["properties"].merge!(cloudschema) + MU::Config.schemaMerge(newschema["properties"], cloudschema, item["cloud"]) newschema else schema_chunk["items"].dup From 6f0f03eef544c076d6feed1b1f4f9111a9c6d934 Mon Sep 17 00:00:00 2001 From: Mu Administrator Date: Wed, 1 Apr 2020 13:44:51 -0400 Subject: [PATCH 034/124] AWS: fresh, public CentOS 6 base images sans marketplace pollution --- cookbooks/mu-tools/recipes/aws_api.rb | 9 ++++++++ modules/mu/defaults/AWS.yaml | 32 +++++++++++++-------------- 2 files changed, 25 insertions(+), 16 deletions(-) diff --git a/cookbooks/mu-tools/recipes/aws_api.rb b/cookbooks/mu-tools/recipes/aws_api.rb index 5f44464b0..d21452125 100644 --- a/cookbooks/mu-tools/recipes/aws_api.rb +++ b/cookbooks/mu-tools/recipes/aws_api.rb @@ -21,3 +21,12 @@ version "2.11.24" action :install end + +if platform_family?("rhel") or platform_family?("amazon") + if node['platform_version'].to_i == 6 + package "python34-pip" + execute "/usr/bin/pip3 install awscli" do + not_if "test -x /usr/bin/aws" + end + end +end diff --git a/modules/mu/defaults/AWS.yaml b/modules/mu/defaults/AWS.yaml index f6215c90e..189de1cb0 100644 --- a/modules/mu/defaults/AWS.yaml +++ b/modules/mu/defaults/AWS.yaml @@ -17,22 +17,22 @@ rhel71: &4 us-west-1: ami-04898e596c06e802b us-west-2: ami-02db5457189a8a8c2 centos6: &3 - us-east-1: ami-06b6c01abc6998348 - ap-northeast-1: ami-0c5da73fde2cb6437 - ap-northeast-2: ami-0134fce6dc00eb00d - ap-south-1: ami-0e59a612e7c84836b - ap-southeast-1: ami-0e16974f528ae0dae - ap-southeast-2: ami-0e2feddf3dbf4d539 - ca-central-1: ami-089236a344dadad5f - eu-central-1: ami-0c4eed3fe046c3917 - eu-north-1: ami-05f636e89d0362c14 - eu-west-1: ami-00c50b11d713f90d3 - eu-west-2: ami-06cc78c32eed7f944 - eu-west-3: ami-0ba626236ad786c54 - sa-east-1: ami-07c3b2a5a41e92376 - us-east-2: ami-01129e636778acfbc - us-west-1: ami-0632e646cd5089ffc - us-west-2: ami-0ce4c9f2e1037de53 + us-east-1: ami-0ccdc671f12147a1d + us-east-2: ami-00d0e8bc2f05ab949 + ap-northeast-1: ami-0726801ceef87f5f8 + ap-northeast-2: ami-05fa4afc4a0493b0a + ap-south-1: ami-0d6e4f3b6592b3139 + ap-southeast-1: ami-0c988e3dc80b14653 + ap-southeast-2: ami-02ac856fd094675ef + ca-central-1: ami-0ce7e343953af2292 + eu-central-1: ami-0ce8317423cea27b8 + eu-north-1: ami-0a923b493d5fc9743 + eu-west-1: ami-06e0f02328921c865 + eu-west-2: ami-07ae118c8814df140 + eu-west-3: ami-03c1017cd1ccc6e9d + sa-east-1: ami-05212ae133b9c3ba1 + us-west-1: ami-0b05ec54412b9f8b0 + us-west-2: ami-0447e036b102b2ca0 centos7: us-east-1: ami-07e6f661e71ad964b ap-northeast-1: ami-0988180d74897c639 From 63a673b8836c5d6fbe2c66863a9532f29f7e427d Mon Sep 17 00:00:00 2001 From: ICRAS Mu Administrator Date: Wed, 1 Apr 2020 15:42:06 -0400 Subject: [PATCH 035/124] LoadBalancer: don't skip healthchecks when inferring target_groups from listeners --- modules/mu/config/loadbalancer.rb | 9 +++++---- modules/mu/config/vpc.rb | 6 +++++- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/modules/mu/config/loadbalancer.rb b/modules/mu/config/loadbalancer.rb index 94de86288..46e80109a 100644 --- a/modules/mu/config/loadbalancer.rb +++ b/modules/mu/config/loadbalancer.rb @@ -405,13 +405,14 @@ def self.validate(lb, _configurator) "proto" => l["instance_protocol"], "port" => l["instance_port"] } - if lb["healthcheck"] - hc_target = lb['healthcheck']['target'].match(/^([^:]+):(\d+)(.*)/) - tg["healthcheck"] = lb['healthcheck'].dup + l['healthcheck'] ||= lb['healthcheck'] if lb['healthcheck'] + if l["healthcheck"] + hc_target = l['healthcheck']['target'].match(/^([^:]+):(\d+)(.*)/) + tg["healthcheck"] = l['healthcheck'].dup proto = ["HTTP", "HTTPS"].include?(hc_target[1]) ? hc_target[1] : l["instance_protocol"] tg['healthcheck']['target'] = "#{proto}:#{hc_target[2]}#{hc_target[3]}" tg['healthcheck']["httpcode"] = "200,301,302" - MU.log "Converting classic-style ELB health check target #{lb['healthcheck']['target']} to ALB style for target group #{tgname} (#{l["instance_protocol"]}:#{l["instance_port"]}).", details: tg['healthcheck'] + MU.log "Converting classic-style ELB health check target #{l['healthcheck']['target']} to ALB style for target group #{tgname} (#{l["instance_protocol"]}:#{l["instance_port"]}).", details: tg['healthcheck'] end lb["targetgroups"] << tg } diff --git a/modules/mu/config/vpc.rb b/modules/mu/config/vpc.rb index bfacbb5eb..800e58d8e 100644 --- a/modules/mu/config/vpc.rb +++ b/modules/mu/config/vpc.rb @@ -419,7 +419,7 @@ def self.validate(vpc, configurator) if configurator.updating and configurator.existing_deploy and configurator.existing_deploy.original_config['vpcs'] configurator.existing_deploy.original_config['vpcs'].each { |v| - if v['name'] == vpc['name'] + if v['name'].to_s == vpc['name'].to_s vpc['ip_block'] = v['ip_block'] vpc['peers'] ||= [] vpc['peers'].concat(v['peers']) @@ -431,6 +431,10 @@ def self.validate(vpc, configurator) break end } + if !vpc['ip_block'] + MU.log "Loading existing deploy but can't find IP block of VPC #{vpc['name']}", MU::ERR + ok = false + end else using_default_cidr = true vpc['ip_block'] = "10.0.0.0/16" From ee64b0c50c0c6af8fa49a29e56a21ab23c70fb45 Mon Sep 17 00:00:00 2001 From: John Stange Date: Wed, 1 Apr 2020 17:12:04 -0400 Subject: [PATCH 036/124] ContainerCluster: default Kubernetes engine to 1.14 so GKE will still work --- modules/mu/config/container_cluster.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/mu/config/container_cluster.rb b/modules/mu/config/container_cluster.rb index 9aa97bcb3..aa1cefed7 100644 --- a/modules/mu/config/container_cluster.rb +++ b/modules/mu/config/container_cluster.rb @@ -48,7 +48,7 @@ def self.schema "properties" => { "version" => { "type" => "string", - "default" => "1.13", + "default" => "1.14", "description" => "Version of Kubernetes control plane to deploy", }, "max_pods" => { From 9e11edb5404856561eb3ae5969b92fcf721ce277 Mon Sep 17 00:00:00 2001 From: ICRAS Mu Administrator Date: Wed, 1 Apr 2020 17:20:40 -0400 Subject: [PATCH 037/124] AWS::Server: re(?)insert database vault secret access magic --- modules/mu/clouds/aws/server.rb | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/modules/mu/clouds/aws/server.rb b/modules/mu/clouds/aws/server.rb index 8c5b09f84..3d116e78f 100644 --- a/modules/mu/clouds/aws/server.rb +++ b/modules/mu/clouds/aws/server.rb @@ -883,6 +883,17 @@ def groom begin getIAMProfile + + dbs = @deploy.findLitterMate(type: "database", return_all: true) + if dbs + dbs.each_pair { |sib_name, sib| + @groomer.groomer_class.grantSecretAccess(@mu_name, sib_name, "database_credentials") + if sib.config and sib.config['auth_vault'] + @groomer.groomer_class.grantSecretAccess(@mu_name, sib.config['auth_vault']['vault'], sib.config['auth_vault']['item']) + end + } + end + if @config['groom'].nil? or @config['groom'] @groomer.run(purpose: "Full Initial Run", max_retries: 15, reboot_first_fail: (windows? and @config['groomer'] != "Ansible"), timeout: @config['groomer_timeout']) end From 9e71bf78b97ba4a472e472c7a54482e8fdcf58bc Mon Sep 17 00:00:00 2001 From: John Stange Date: Wed, 1 Apr 2020 19:05:26 -0400 Subject: [PATCH 038/124] gem version bumps --- modules/Gemfile.lock | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/modules/Gemfile.lock b/modules/Gemfile.lock index 775badc02..0314919c6 100644 --- a/modules/Gemfile.lock +++ b/modules/Gemfile.lock @@ -54,7 +54,7 @@ GEM public_suffix (>= 2.0.2, < 4.0) ast (2.4.0) aws-eventstream (1.0.3) - aws-sdk-core (2.11.480) + aws-sdk-core (2.11.481) aws-sigv4 (~> 1.0) jmespath (~> 1.0) aws-sigv4 (1.1.1) @@ -751,14 +751,14 @@ GEM rspec_junit_formatter (0.2.3) builder (< 4) rspec-core (>= 2, < 4, != 2.12.0) - rubocop (0.80.1) + rubocop (0.81.0) jaro_winkler (~> 1.5.1) parallel (~> 1.10) parser (>= 2.7.0.1) rainbow (>= 2.2.2, < 4.0) rexml ruby-progressbar (~> 1.7) - unicode-display_width (>= 1.4.0, < 1.7) + unicode-display_width (>= 1.4.0, < 2.0) ruby-graphviz (1.2.5) rexml ruby-progressbar (1.10.1) @@ -786,7 +786,7 @@ GEM solve (4.0.3) molinillo (~> 0.6) semverse (>= 1.1, < 4.0) - specinfra (2.82.12) + specinfra (2.82.13) net-scp net-ssh (>= 2.7) net-telnet (= 0.1.1) @@ -809,7 +809,7 @@ GEM unf (0.1.4) unf_ext unf_ext (0.0.7.2) - unicode-display_width (1.6.1) + unicode-display_width (1.7.0) uuidtools (2.1.5) winrm (2.3.4) builder (>= 2.1.2) From 5d7ccca9412a6240918847ae5a1a17c6dbbf347e Mon Sep 17 00:00:00 2001 From: Mu Administrator Date: Thu, 2 Apr 2020 13:29:30 -0400 Subject: [PATCH 039/124] AWS: new CentOS 7 base images, sans marketplace codes and filesystem corruption --- extras/generate-stock-images | 1 + modules/mu/clouds/aws/server.rb | 2 ++ modules/mu/defaults/AWS.yaml | 32 ++++++++++++++++---------------- 3 files changed, 19 insertions(+), 16 deletions(-) diff --git a/extras/generate-stock-images b/extras/generate-stock-images index 3bb117083..d000f048c 100644 --- a/extras/generate-stock-images +++ b/extras/generate-stock-images @@ -91,6 +91,7 @@ $opts[:clouds].each { |cloud| end next if !needed end + MU.log "Loading "+bok_dir+"/"+cloud+"/"+platform+".yaml" conf_engine = MU::Config.new( bok_dir+"/"+cloud+"/"+platform+".yaml", default_credentials: $opts[(cloud.downcase+"_creds").to_sym] diff --git a/modules/mu/clouds/aws/server.rb b/modules/mu/clouds/aws/server.rb index 3d116e78f..a60826c1f 100644 --- a/modules/mu/clouds/aws/server.rb +++ b/modules/mu/clouds/aws/server.rb @@ -898,8 +898,10 @@ def groom @groomer.run(purpose: "Full Initial Run", max_retries: 15, reboot_first_fail: (windows? and @config['groomer'] != "Ansible"), timeout: @config['groomer_timeout']) end rescue MU::Groomer::RunError => e + raise e if !@config['create_image'].nil? and !@config['image_created'] MU.log "Proceeding after failed initial Groomer run, but #{@mu_name} may not behave as expected!", MU::WARN, details: e.message rescue StandardError => e + raise e if !@config['create_image'].nil? and !@config['image_created'] MU.log "Caught #{e.inspect} on #{@mu_name} in an unexpected place (after @groomer.run on Full Initial Run)", MU::ERR end diff --git a/modules/mu/defaults/AWS.yaml b/modules/mu/defaults/AWS.yaml index 189de1cb0..e0cb2316a 100644 --- a/modules/mu/defaults/AWS.yaml +++ b/modules/mu/defaults/AWS.yaml @@ -34,22 +34,22 @@ centos6: &3 us-west-1: ami-0b05ec54412b9f8b0 us-west-2: ami-0447e036b102b2ca0 centos7: - us-east-1: ami-07e6f661e71ad964b - ap-northeast-1: ami-0988180d74897c639 - ap-northeast-2: ami-0e77cd1c7024b8ae0 - ap-south-1: ami-02bd479122041000a - ap-southeast-1: ami-017767778ef9db671 - ap-southeast-2: ami-05b09a58c3964d67d - ca-central-1: ami-0a59a176d810fcc5f - eu-central-1: ami-0b48a421fb05d96af - eu-north-1: ami-02337601ea5dc4a5d - eu-west-1: ami-0b0a55b7423eeac07 - eu-west-2: ami-060518b40b25b9eb4 - eu-west-3: ami-060957bb3adacd831 - sa-east-1: ami-0c706132b35071de6 - us-east-2: ami-0db4c266ed0bb958b - us-west-1: ami-0980f6eb52c998793 - us-west-2: ami-07f2ed4755c01c05c + us-east-1: ami-067256ca1497c924d + ap-northeast-1: ami-07c1e51354fdfd362 + ap-northeast-2: ami-042b761c93d6df2f1 + ap-south-1: ami-02e879f52322e7c98 + ap-southeast-1: ami-0487e9f84d0ffde89 + ap-southeast-2: ami-0e854dab39fd6a427 + ca-central-1: ami-05a27d311b585a70b + eu-central-1: ami-0e396d00c787b4f47 + eu-north-1: ami-087763a2ba60b2bfe + eu-west-1: ami-04e3bd9335a14e635 + eu-west-2: ami-0efd34a8d1fc2b104 + eu-west-3: ami-08d0bcbc780448cf8 + sa-east-1: ami-0284f4a0968263cf0 + us-east-2: ami-0292786917d1e3015 + us-west-1: ami-0ba622529dcdff2bb + us-west-2: ami-079a309ca6261d7f6 ubuntu16: &2 us-east-1: ami-bcdc16c6 us-west-1: ami-1b17257b From eb4cc6c7ea300e05a890f0641afdc7447a913a88 Mon Sep 17 00:00:00 2001 From: John Stange Date: Thu, 2 Apr 2020 20:31:52 -0400 Subject: [PATCH 040/124] AWS::Database: Factor some more CodeClimate complaints, meld cluster and instance termination into one method --- modules/mu/cloud.rb | 25 +++ modules/mu/clouds/aws/database.rb | 214 +++++-------------- modules/mu/clouds/cloudformation/database.rb | 13 +- 3 files changed, 81 insertions(+), 171 deletions(-) diff --git a/modules/mu/cloud.rb b/modules/mu/cloud.rb index e56259c40..d91856d6c 100644 --- a/modules/mu/cloud.rb +++ b/modules/mu/cloud.rb @@ -1763,6 +1763,31 @@ def self.find(*flags) } allfound end + + if shortname == "Database" + + # Getting the password for a database's master user, and saving it in a database / cluster specific vault + def getPassword + if @config['password'].nil? + if @config['auth_vault'] && !@config['auth_vault'].empty? + @config['password'] = @groomclass.getSecret( + vault: @config['auth_vault']['vault'], + item: @config['auth_vault']['item'], + field: @config['auth_vault']['password_field'] + ) + else + # Should we use random instead? + @config['password'] = Password.pronounceable(10..12) + end + end + + creds = { + "username" => @config["master_user"], + "password" => @config["password"] + } + @groomclass.saveSecret(vault: @mu_name, item: "database_credentials", data: creds) + end + end if shortname == "DNSZone" def self.genericMuDNSEntry(*flags) diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index a71b9c797..127114e51 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -225,28 +225,6 @@ def allTags @tags.each_key.map { |k| { :key => k, :value => @tags[k] } } end - # Getting the password for the master user, and saving it in a database / cluster specif vault - def getPassword - if @config['password'].nil? - if @config['auth_vault'] && !@config['auth_vault'].empty? - @config['password'] = @groomclass.getSecret( - vault: @config['auth_vault']['vault'], - item: @config['auth_vault']['item'], - field: @config['auth_vault']['password_field'] - ) - else - # Should we use random instead? - @config['password'] = Password.pronounceable(10..12) - end - end - - creds = { - "username" => @config["master_user"], - "password" => @config["password"] - } - @groomclass.saveSecret(vault: @mu_name, item: "database_credentials", data: creds) - end - def genericParams params = if @config['create_cluster'] paramhash = { @@ -609,8 +587,8 @@ def self.threaded_resource_purge(describe_method, list_method, id_method, arn_ty begin arn = MU::Cloud::AWS::Database.getARN(resource.send(id_method), arn_type, "rds", region: region, credentials: credentials) tags = MU::Cloud::AWS.rds(credentials: credentials, region: region).list_tags_for_resource(resource_name: arn).tag_list - rescue Aws::RDS::Errors::InvalidParameterValue => e - MU.log "Failed to fetch ARN or tags of resource via #{id_method.to_s}", MU::WARN, details: resource + rescue Aws::RDS::Errors::InvalidParameterValue + MU.log "Failed to fetch ARN of type #{arn_type} or tags of resource via #{id_method.to_s}", MU::WARN, details: [resource, arn] next end @@ -637,17 +615,13 @@ def self.threaded_resource_purge(describe_method, list_method, id_method, arn_ty # @return [void] def self.cleanup(noop: false, ignoremaster: false, credentials: nil, region: MU.curRegion, flags: {}) - threaded_resource_purge(:describe_db_instances, :db_instances, :db_instance_identifier, "db", region, credentials, ignoremaster) { |id| - terminate_rds_instance(nil, noop: noop, skipsnapshots: flags["skipsnapshots"], region: region, deploy_id: MU.deploy_id, cloud_id: id, mu_name: id.upcase, credentials: credentials) - - }.each { |t| - t.join - } - - threaded_resource_purge(:describe_db_clusters, :db_clusters, :db_cluster_identifier, "cluster", region, credentials, ignoremaster) { |id| - terminate_rds_cluster(nil, noop: noop, skipsnapshots: flags["skipsnapshots"], region: region, deploy_id: MU.deploy_id, cloud_id: id, mu_name: id.upcase, credentials: credentials) - }.each { |t| - t.join + ["instance", "cluster"].each { |type| + threaded_resource_purge("describe_db_#{type}s".to_sym, "db_#{type}s".to_sym, "db_#{type}_identifier".to_sym, (type == "instance" ? "db" : "cluster"), region, credentials, ignoremaster) { |id| + terminate_rds_instance(nil, noop: noop, skipsnapshots: flags["skipsnapshots"], region: region, deploy_id: MU.deploy_id, cloud_id: id, mu_name: id.upcase, credentials: credentials, cluster: (type == "cluster")) + + }.each { |t| + t.join + } } threads = threaded_resource_purge(:describe_db_subnet_groups, :db_subnet_groups, :db_subnet_group_name, "subgrp", region, credentials, ignoremaster) { |id| @@ -658,7 +632,7 @@ def self.cleanup(noop: false, ignoremaster: false, credentials: nil, region: MU. } ["db", "db_cluster"].each { |type| - threads.concat threaded_resource_purge("describe_#{type}_parameter_groups".to_sym, "#{type}_parameter_groups".to_sym, "#{type}_parameter_group_name".to_sym, "pg", region, credentials, ignoremaster) { |id| + threads.concat threaded_resource_purge("describe_#{type}_parameter_groups".to_sym, "#{type}_parameter_groups".to_sym, "#{type}_parameter_group_name".to_sym, (type == "db" ? "pg" : "cluster-pg"), region, credentials, ignoremaster) { |id| MU.log "Deleting RDS #{type} parameter group #{id}" MU.retrier([Aws::RDS::Errors::InvalidDBParameterGroupState], wait: 30, max: 5, ignoreme: [Aws::RDS::Errors::DBParameterGroupNotFound]) { MU::Cloud::AWS.rds(region: region).send("delete_#{type}_parameter_group", { "#{type}_parameter_group_name".to_sym => id }) if !noop @@ -1327,7 +1301,7 @@ def createDb begin MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_instance(mod_config) rescue Aws::RDS::Errors::InvalidParameterValue => e - if e.message.match(/Invalid security group/) + if e.message =~ /Invalid security group/ MU.log e.message+" modifying "+@cloud_id, MU::ERR, details: mod_config end raise e @@ -1458,8 +1432,8 @@ def self.should_delete?(tags, ignoremaster = false, deploy_id = MU.deploy_id, ma # Remove an RDS database and associated artifacts # @param db [OpenStruct]: The cloud provider's description of the database artifact # @return [void] - def self.terminate_rds_instance(db, noop: false, skipsnapshots: false, region: MU.curRegion, deploy_id: MU.deploy_id, mu_name: nil, cloud_id: nil, credentials: nil) - db ||= MU::Cloud::AWS::Database.find(cloud_id: cloud_id, region: region, credentials: credentials).values.first if cloud_id + def self.terminate_rds_instance(db, noop: false, skipsnapshots: false, region: MU.curRegion, deploy_id: MU.deploy_id, mu_name: nil, cloud_id: nil, credentials: nil, cluster: false) + db ||= MU::Cloud::AWS::Database.find(cloud_id: cloud_id, region: region, credentials: credentials, cluster: cluster).values.first if cloud_id db_obj ||= MU::MommaCat.findStray( "AWS", "database", @@ -1474,66 +1448,61 @@ def self.terminate_rds_instance(db, noop: false, skipsnapshots: false, region: M db ||= db_obj.cloud_desc end - raise MuError, "terminate_rds_instance requires a non-nil database descriptor (#{cloud_id})" if db.nil? - - rdssecgroups = [] - begin - secgroup = MU::Cloud::AWS.rds(region: region).describe_db_security_groups(db_security_group_name: cloud_id) - rdssecgroups << cloud_id if !secgroup.nil? - rescue Aws::RDS::Errors::DBSecurityGroupNotFound - # this is normal in VPC world - end - + raise MuError, "terminate_rds_instance requires a non-nil database descriptor (#{cloud_id})" if db.nil? or cloud_id.nil? - MU.retrier([], wait: 60, loop_if: Proc.new { %w{creating modifying backing-up}.include?(db.db_instance_status) }) { - db = MU::Cloud::AWS::Database.find(cloud_id: cloud_id, region: region, credentials: credentials).values.first + MU.retrier([], wait: 60, loop_if: Proc.new { %w{creating modifying backing-up}.include?(cluster ? db.status : db.db_instance_status) }) { + db = MU::Cloud::AWS::Database.find(cloud_id: cloud_id, region: region, credentials: credentials, cluster: cluster).values.first return if db.nil? } MU::Cloud::AWS::DNSZone.genericMuDNSEntry(name: cloud_id, target: db.endpoint.address, cloudclass: MU::Cloud::Database, delete: true) if !noop - if %w{deleting deleted}.include?(db.db_instance_status) + if %w{deleting deleted}.include?(cluster ? db.status : db.db_instance_status) MU.log "#{cloud_id} has already been terminated", MU::WARN else - params = { - db_instance_identifier: cloud_id - } - if skipsnapshots or db.db_cluster_identifier or db.read_replica_source_db_instance_identifier - MU.log "Terminating #{cloud_id} (not saving final snapshot)" + params = cluster ? { :db_cluster_identifier => cloud_id } : { :db_instance_identifier => cloud_id } + + if skipsnapshots or (!cluster and (db.db_cluster_identifier or db.read_replica_source_db_instance_identifier)) + MU.log "Terminating #{cluster ? "cluster" : "database" } #{cloud_id} (not saving final snapshot)" params[:skip_final_snapshot] = true else - MU.log "Terminating #{cloud_id} (final snapshot: #{cloud_id}-mufinal)" + MU.log "Terminating #{cluster ? "cluster" : "database" } #{cloud_id} (final snapshot: #{cloud_id}-mufinal)" params[:skip_final_snapshot] = false params[:final_db_snapshot_identifier] = "#{cloud_id}-mufinal" end if !noop on_retry = Proc.new { |e| - if e.class == Aws::RDS::Errors::DBSnapshotAlreadyExists - MU.log "Snapshot of #{cloud_id} already exists", MU::WARN + if [Aws::RDS::Errors::DBSnapshotAlreadyExists, Aws::RDS::Errors::DBClusterSnapshotAlreadyExistsFault, Aws::RDS::Errors::DBClusterQuotaExceeded].include?(e.class) + MU.log e.message, MU::WARN params[:skip_final_snapshot] = true + params.delete(:final_db_snapshot_identifier) end } - MU.retrier([Aws::RDS::Errors::InvalidDBInstanceState, Aws::RDS::Errors::DBSnapshotAlreadyExists], wait: 60, max: 20, on_retry: on_retry) { - MU::Cloud::AWS.rds(region: region, credentials: credentials).delete_db_instance(params) + MU.retrier([Aws::RDS::Errors::InvalidDBInstanceState, Aws::RDS::Errors::DBSnapshotAlreadyExists, Aws::RDS::Errors::InvalidDBClusterStateFault], wait: 60, max: 20, on_retry: on_retry) { + MU.log "Terminating #{cloud_id}#{params[:skip_final_snapshot] ? " with final snapshot named #{cloud_id}-mufinal" : ""}", MU::NOTICE, details: params + if !noop + cluster ? MU::Cloud::AWS.rds(region: region, credentials: credentials).delete_db_cluster(params) : MU::Cloud::AWS.rds(region: region, credentials: credentials).delete_db_instance(params) + end } del_db = nil MU.retrier([], wait: 10, ignoreme: [Aws::RDS::Errors::DBInstanceNotFound], loop_if: Proc.new { del_db and del_db.db_instance_status != "deleted" }) { - del_db = MU::Cloud::AWS::Database.find(cloud_id: cloud_id, region: region).values.first + del_db = MU::Cloud::AWS::Database.find(cloud_id: cloud_id, region: region, cluster: cluster).values.first +MU.log cloud_id, MU::NOTICE, details: del_db if cluster } end end + purge_rds_sgs(cloud_id, region, credentials, noop) - # RDS security groups can depend on EC2 security groups, do these last - begin - rdssecgroups.each { |sg| - MU.log "Removing RDS Security Group #{sg}" - MU::Cloud::AWS.rds(region: region, credentials: credentials).delete_db_security_group(db_security_group_name: sg) if !noop - } - rescue Aws::RDS::Errors::DBSecurityGroupNotFound - end + purge_groomer_artifacts(db_obj, cloud_id, noop) + + MU.log "#{cloud_id} has been terminated" if !noop + end + private_class_method :terminate_rds_instance + def self.purge_groomer_artifacts(db_obj, cloud_id, noop) + return if !db_obj # Cleanup the database vault groomer = if db_obj and db_obj.respond_to?(:config) and db_obj.config @@ -1544,101 +1513,28 @@ def self.terminate_rds_instance(db, noop: false, skipsnapshots: false, region: M groomclass = MU::Groomer.loadGroomer(groomer) groomclass.deleteSecret(vault: cloud_id.upcase) if !noop - MU.log "#{cloud_id} has been terminated" if !noop end - private_class_method :terminate_rds_instance - - # Remove an RDS database cluster and associated artifacts - # @param cluster [OpenStruct]: The cloud provider's description of the database artifact - # @return [void] - def self.terminate_rds_cluster(cluster, noop: false, skipsnapshots: false, region: MU.curRegion, deploy_id: MU.deploy_id, mu_name: nil, cloud_id: nil, credentials: nil) - - cluster ||= MU::Cloud::AWS::Database.find(cloud_id: cloud_id, region: region, credentials: credentials).values.first if cloud_id - cluster_obj ||= MU::MommaCat.findStray( - "AWS", - "database", - region: region, - deploy_id: deploy_id, - cloud_id: cloud_id, - mu_name: mu_name, - dummy_ok: true - ).first - if cluster_obj - cloud_id ||= cluster_obj.cloud_id - cluster ||= cluster_obj.cloud_desc - end - - raise MuError, "terminate_rds_cluster requires a non-nil database cluster descriptor" if cluster.nil? + private_class_method :purge_groomer_artifacts - # We can use an AWS waiter for this. - unless cluster.status == "available" - loop do - MU.log "Waiting for #{cloud_id} to be in a removable state...", MU::NOTICE - cluster = MU::Cloud::AWS::Database.find(cloud_id: cloud_id, region: region, credentials: credentials).values.first - break unless %w{creating modifying backing-up}.include?(cluster.status) - sleep 60 - end + def self.purge_rds_sgs(cloud_id, region, credentials, noop) + rdssecgroups = [] + begin + secgroup = MU::Cloud::AWS.rds(region: region, credentials: credentials).describe_db_security_groups(db_security_group_name: cloud_id) + rdssecgroups << cloud_id if !secgroup.nil? + rescue Aws::RDS::Errors::DBSecurityGroupNotFound + # this is normal in VPC world end - MU::Cloud::AWS::DNSZone.genericMuDNSEntry(name: cloud_id, target: cluster.endpoint, cloudclass: MU::Cloud::Database, delete: true) - - if %w{deleting deleted}.include?(cluster.status) - MU.log "#{cloud_id} has already been terminated", MU::WARN - else - clusterSkipSnap = Proc.new { - MU.log "Terminating #{cloud_id}. Not saving final snapshot" - MU::Cloud::AWS.rds(region: region, credentials: credentials).delete_db_cluster(db_cluster_identifier: cloud_id, skip_final_snapshot: true) if !noop - } - - clusterCreateSnap = Proc.new { - MU.log "Terminating #{cloud_id}. Saving final snapshot: #{cloud_id}-mufinal" - MU::Cloud::AWS.rds(region: region, credentials: credentials).delete_db_cluster(db_cluster_identifier: cloud_id, skip_final_snapshot: false, final_db_snapshot_identifier: "#{cloud_id}-mufinal") if !noop + # RDS security groups can depend on EC2 security groups, do these last + begin + rdssecgroups.each { |sg| + MU.log "Removing RDS Security Group #{sg}" + MU::Cloud::AWS.rds(region: region, credentials: credentials).delete_db_security_group(db_security_group_name: sg) if !noop } - - retries = 0 - begin - skipsnapshots ? clusterSkipSnap.call : clusterCreateSnap.call - rescue Aws::RDS::Errors::InvalidDBClusterStateFault => e - if retries < 5 - MU.log "#{cloud_id} is not in a removable state, retrying several times", MU::WARN - retries += 1 - sleep 30 - retry - else - MU.log "#{cloud_id} is not in a removable state after several retries, giving up. #{e.inspect}", MU::ERR - end - rescue Aws::RDS::Errors::DBClusterSnapshotAlreadyExistsFault - clusterSkipSnap.call - MU.log "Snapshot of #{cloud_id} already exists", MU::WARN - rescue Aws::RDS::Errors::DBClusterQuotaExceeded - clusterSkipSnap.call - MU.log "Snapshot quota exceeded while deleting #{cloud_id}", MU::ERR - end - - unless noop - loop do - MU.log "Waiting for #{cloud_id} to terminate", MU::NOTICE - cluster = MU::Cloud::AWS::Database.find(cloud_id: cloud_id, region: region, credentials: credentials).values.first - break unless cluster - sleep 30 - end - end + rescue Aws::RDS::Errors::DBSecurityGroupNotFound end - - # Cleanup the cluster vault - groomer = - if cluster_obj - cluster_obj.config.has_key?("groomer") ? cluster_obj.config["groomer"] : MU::Config.defaultGroomer - else - MU::Config.defaultGroomer - end - - groomclass = MU::Groomer.loadGroomer(groomer) - groomclass.deleteSecret(vault: cloud_id.upcase) if !noop - - MU.log "#{cloud_id} has been terminated" if !noop end - private_class_method :terminate_rds_cluster + private_class_method :purge_rds_sgs end #class end #class diff --git a/modules/mu/clouds/cloudformation/database.rb b/modules/mu/clouds/cloudformation/database.rb index cf48d616f..2daa2f844 100644 --- a/modules/mu/clouds/cloudformation/database.rb +++ b/modules/mu/clouds/cloudformation/database.rb @@ -214,18 +214,7 @@ def create elsif @config['db_name'] MU::Cloud::CloudFormation.setCloudFormationProp(@cfm_template[@cfm_name], "DBName", @config['db_name']) end - if @config['password'].nil? - if @config['auth_vault'] && !@config['auth_vault'].empty? - @config['password'] = @groomclass.getSecret( - vault: @config['auth_vault']['vault'], - item: @config['auth_vault']['item'], - field: @config['auth_vault']['password_field'] - ) - else - # Should we use random instead? - @config['password'] = Password.pronounceable(10..12) - end - end + getPassword MU::Cloud::CloudFormation.setCloudFormationProp(@cfm_template[@cfm_name], "MasterUserPassword", @config['password']) end end From 21117b47ea6d91a6a29ebf21a9f6d6b5553d135d Mon Sep 17 00:00:00 2001 From: John Stange Date: Thu, 2 Apr 2020 21:20:23 -0400 Subject: [PATCH 041/124] AWS::Database: minor nits in new cleanup behavior --- modules/mu/clouds/aws/database.rb | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index 127114e51..386132cf5 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -1454,8 +1454,8 @@ def self.terminate_rds_instance(db, noop: false, skipsnapshots: false, region: M db = MU::Cloud::AWS::Database.find(cloud_id: cloud_id, region: region, credentials: credentials, cluster: cluster).values.first return if db.nil? } - - MU::Cloud::AWS::DNSZone.genericMuDNSEntry(name: cloud_id, target: db.endpoint.address, cloudclass: MU::Cloud::Database, delete: true) if !noop +pp db + MU::Cloud::AWS::DNSZone.genericMuDNSEntry(name: cloud_id, target: (cluster ? db.endpoint : db.endpoint.address), cloudclass: MU::Cloud::Database, delete: true) if !noop if %w{deleting deleted}.include?(cluster ? db.status : db.db_instance_status) MU.log "#{cloud_id} has already been terminated", MU::WARN @@ -1480,7 +1480,6 @@ def self.terminate_rds_instance(db, noop: false, skipsnapshots: false, region: M end } MU.retrier([Aws::RDS::Errors::InvalidDBInstanceState, Aws::RDS::Errors::DBSnapshotAlreadyExists, Aws::RDS::Errors::InvalidDBClusterStateFault], wait: 60, max: 20, on_retry: on_retry) { - MU.log "Terminating #{cloud_id}#{params[:skip_final_snapshot] ? " with final snapshot named #{cloud_id}-mufinal" : ""}", MU::NOTICE, details: params if !noop cluster ? MU::Cloud::AWS.rds(region: region, credentials: credentials).delete_db_cluster(params) : MU::Cloud::AWS.rds(region: region, credentials: credentials).delete_db_instance(params) end From decab88ac7b6b80f6f7ffe58823e2b6f857efd96 Mon Sep 17 00:00:00 2001 From: John Stange Date: Thu, 2 Apr 2020 23:02:34 -0400 Subject: [PATCH 042/124] AWS::Database: drop some unused methods --- modules/mu/clouds/aws/database.rb | 47 ++++++------------------------- 1 file changed, 8 insertions(+), 39 deletions(-) diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index 386132cf5..8219ac730 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -366,38 +366,6 @@ def createDBParameterGroup(cluster = false) end end - # Retrieve a complete description of a database cluster parameter group. - # @param param_group_id [String]: The cloud provider's identifier for this parameter group. - # @param region [String]: The cloud provider region - # @return [OpenStruct] - def self.getDBClusterParameterGroup(param_group_id, region: MU.curRegion) - MU::Cloud::AWS.rds(region: region).describe_db_cluster_parameter_groups(db_cluster_parameter_group_name: param_group_id).db_cluster_parameter_groups.first - # rescue DBClusterParameterGroupNotFound => e - # Of course the API will return DBParameterGroupNotFound instead of the documented DBClusterParameterGroupNotFound error. - rescue Aws::RDS::Errors::DBParameterGroupNotFound - #we're fine returning nil - end - - # Retrieve a complete description of a database parameter group. - # @param param_group_id [String]: The cloud provider's identifier for this parameter group. - # @param region [String]: The cloud provider region - # @return [OpenStruct] - def self.getDBParameterGroup(param_group_id, region: MU.curRegion) - MU::Cloud::AWS.rds(region: region).describe_db_parameter_groups(db_parameter_group_name: param_group_id).db_parameter_groups.first - rescue Aws::RDS::Errors::DBParameterGroupNotFound - #we're fine returning nil - end - - # Retrieve a complete description of a database subnet group. - # @param subnet_id [String]: The cloud provider's identifier for this subnet group. - # @param region [String]: The cloud provider region - # @return [OpenStruct] - def self.getSubnetGroup(subnet_id, region: MU.curRegion) - MU::Cloud::AWS.rds(region: region).describe_db_subnet_groups(db_subnet_group_name: subnet_id).db_subnet_groups.first - rescue Aws::RDS::Errors::DBSubnetGroupNotFoundFault - #we're fine returning nil - end - # Called automatically by {MU::Deploy#createResources} def groom if @config["create_cluster"] @@ -588,7 +556,7 @@ def self.threaded_resource_purge(describe_method, list_method, id_method, arn_ty arn = MU::Cloud::AWS::Database.getARN(resource.send(id_method), arn_type, "rds", region: region, credentials: credentials) tags = MU::Cloud::AWS.rds(credentials: credentials, region: region).list_tags_for_resource(resource_name: arn).tag_list rescue Aws::RDS::Errors::InvalidParameterValue - MU.log "Failed to fetch ARN of type #{arn_type} or tags of resource via #{id_method.to_s}", MU::WARN, details: [resource, arn] + MU.log "Failed to fetch ARN of type #{arn_type} or tags of resource via #{id_method}", MU::WARN, details: [resource, arn] next end @@ -1525,13 +1493,14 @@ def self.purge_rds_sgs(cloud_id, region, credentials, noop) end # RDS security groups can depend on EC2 security groups, do these last - begin - rdssecgroups.each { |sg| - MU.log "Removing RDS Security Group #{sg}" + rdssecgroups.each { |sg| + MU.log "Removing RDS Security Group #{sg}" + begin MU::Cloud::AWS.rds(region: region, credentials: credentials).delete_db_security_group(db_security_group_name: sg) if !noop - } - rescue Aws::RDS::Errors::DBSecurityGroupNotFound - end + rescue Aws::RDS::Errors::DBSecurityGroupNotFound + MU.log "RDS Security Group #{sg} disappeared before I could remove it", MU::NOTICE + end + } end private_class_method :purge_rds_sgs From a3dd10745e9a19975aee3c49718df56ec0eb74e3 Mon Sep 17 00:00:00 2001 From: John Stange Date: Fri, 3 Apr 2020 11:49:14 -0400 Subject: [PATCH 043/124] AWS: factor some firewall-related commonalities --- modules/mu/cloud.rb | 25 ++++++++++- modules/mu/clouds/aws/cache_cluster.rb | 40 +----------------- modules/mu/clouds/aws/database.rb | 38 +---------------- modules/mu/clouds/aws/firewall_rule.rb | 25 +++++++++++ modules/mu/clouds/aws/loadbalancer.rb | 21 +--------- modules/mu/clouds/aws/server.rb | 58 ++------------------------ modules/mu/clouds/aws/server_pool.rb | 21 +--------- 7 files changed, 59 insertions(+), 169 deletions(-) diff --git a/modules/mu/cloud.rb b/modules/mu/cloud.rb index d91856d6c..b92c3de80 100644 --- a/modules/mu/cloud.rb +++ b/modules/mu/cloud.rb @@ -1697,6 +1697,28 @@ def myFirewallRules rules end + # If applicable, allow this resource's NAT host blanket access via + # rules in its associated +admin+ firewall rule set. + def allowBastionAccess + return nil if !@nat or !@nat.is_a?(MU::Cloud::Server) + + myFirewallRules.each { |acl| + if acl.config["admin"] + acl.addRule([@nat.canonicalIP], proto: "tcp") + acl.addRule([@nat.canonicalIP], proto: "udp") + acl.addRule([@nat.canonicalIP], proto: "icmp") + # XXX this is an AWS-specific hack; we need to force Server + # implementations to expose a method that lists all of their + # internal IPs, akin to #canonicalIP + if @nat.cloud_desc and @nat.cloud_desc.respond_to?(:private_ip_address) + acl.addRule([@nat.cloud_desc.private_ip_address], proto: "tcp") + acl.addRule([@nat.cloud_desc.private_ip_address], proto: "udp") + acl.addRule([@nat.cloud_desc.private_ip_address], proto: "icmp") + end + end + } + end + # Defaults any resources that don't declare their release-readiness to # ALPHA. That'll learn 'em. def self.quality @@ -1763,7 +1785,8 @@ def self.find(*flags) } allfound end - + + if shortname == "Database" # Getting the password for a database's master user, and saving it in a database / cluster specific vault diff --git a/modules/mu/clouds/aws/cache_cluster.rb b/modules/mu/clouds/aws/cache_cluster.rb index 67eccfb60..2a82f506b 100644 --- a/modules/mu/clouds/aws/cache_cluster.rb +++ b/modules/mu/clouds/aws/cache_cluster.rb @@ -333,24 +333,7 @@ def createSubnetGroup subnet_ids: subnet_ids ) - # Find NAT and create holes in security groups. - # Adding just for consistency, but do we really need this for cache clusters? I guess Nagios and such.. - if @config["vpc"]["nat_host_name"] || @config["vpc"]["nat_host_id"] || @config["vpc"]["nat_host_tag"] || @config["vpc"]["nat_host_ip"] - nat = @nat - if nat.is_a?(Struct) && nat.nat_gateway_id && nat.nat_gateway_id.start_with?("nat-") - MU.log "Using NAT Gateway, not modifying security groups" - else - _nat_name, _nat_conf, nat_deploydata = @nat.describe - @deploy.kittens['firewall_rules'].values.each { |acl| - # XXX if a user doesn't set up dependencies correctly, this can die horribly on a NAT that's still in mid-creation. Fix this... possibly in the config parser. - if acl.config["admin"] - acl.addRule([nat_deploydata["private_ip_address"]], proto: "tcp") - acl.addRule([nat_deploydata["private_ip_address"]], proto: "udp") - break - end - } - end - end + allowBastionAccess if @dependencies.has_key?('firewall_rule') @config["security_group_ids"] = [] @@ -703,26 +686,7 @@ def self.schema(_config) "type" => "boolean", "description" => "Create a replication group; will be set automatically if +engine+ is +redis+ and +node_count+ is greated than one." }, - "ingress_rules" => { - "items" => { - "properties" => { - "sgs" => { - "type" => "array", - "items" => { - "description" => "Other AWS Security Groups; resources that are associated with this group will have this rule applied to their traffic", - "type" => "string" - } - }, - "lbs" => { - "type" => "array", - "items" => { - "description" => "AWS Load Balancers which will have this rule applied to their traffic", - "type" => "string" - } - } - } - } - } + "ingress_rules" => MU::Cloud::AWS::FirewallRule.ingressRuleAddtlSchema } [toplevel_required, schema] end diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index 8219ac730..5ce23e3da 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -314,22 +314,7 @@ def createSubnetGroup end end - # Find NAT and create holes in security groups. - if @nat - if @nat.is_a?(Struct) and @nat.respond_to?(:nat_gateway_id) and @nat.nat_gateway_id.start_with?("nat-") - MU.log "Using NAT Gateway, not modifying security groups" - else - _nat_name, _nat_conf, nat_deploydata = @nat.describe - @deploy.kittens['firewall_rules'].each_value { |acl| -# XXX if a user doesn't set up dependencies correctly, this can die horribly on a NAT that's still in mid-creation. Fix this... possibly in the config parser. - if acl.config["admin"] - acl.addRule([nat_deploydata["private_ip_address"]], proto: "tcp") - acl.addRule([nat_deploydata["private_ip_address"]], proto: "udp") - break - end - } - end - end + allowBastionAccess end # Create a database parameter group. @@ -707,26 +692,7 @@ def self.schema(_config) "type" => "string", "enum" => ["license-included", "bring-your-own-license", "general-public-license", "postgresql-license"] }, - "ingress_rules" => { - "items" => { - "properties" => { - "sgs" => { - "type" => "array", - "items" => { - "description" => "Other AWS Security Groups; resources that are associated with this group will have this rule applied to their traffic", - "type" => "string" - } - }, - "lbs" => { - "type" => "array", - "items" => { - "description" => "AWS Load Balancers which will have this rule applied to their traffic", - "type" => "string" - } - } - } - } - } + "ingress_rules" => MU::Cloud::AWS::FirewallRule.ingressRuleAddtlSchema } [toplevel_required, schema] end diff --git a/modules/mu/clouds/aws/firewall_rule.rb b/modules/mu/clouds/aws/firewall_rule.rb index f496a800a..01f3ea39a 100644 --- a/modules/mu/clouds/aws/firewall_rule.rb +++ b/modules/mu/clouds/aws/firewall_rule.rb @@ -514,6 +514,31 @@ def self.revoke_rules(sg, egress: false, region: MU.myregion, credentials: nil) end private_class_method :revoke_rules + # Return an AWS-specific chunk of schema commonly used in the +ingress_rules+ parameter of other resource types. + # @return [Hash] + def self.ingressRuleAddtlSchema + { + "items" => { + "properties" => { + "sgs" => { + "type" => "array", + "items" => { + "description" => "Other AWS Security Groups; resources that are associated with this group will have this rule applied to their traffic", + "type" => "string" + } + }, + "lbs" => { + "type" => "array", + "items" => { + "description" => "AWS Load Balancers which will have this rule applied to their traffic", + "type" => "string" + } + } + } + } + } + end + # Cloud-specific configuration properties. # @param _config [MU::Config]: The calling MU::Config object # @return [Array]: List of required fields, and json-schema Hash of cloud-specific configuration parameters for this resource diff --git a/modules/mu/clouds/aws/loadbalancer.rb b/modules/mu/clouds/aws/loadbalancer.rb index 0b75e45b0..f184dc473 100644 --- a/modules/mu/clouds/aws/loadbalancer.rb +++ b/modules/mu/clouds/aws/loadbalancer.rb @@ -793,26 +793,7 @@ def self.schema(_config) } } }, - "ingress_rules" => { - "items" => { - "properties" => { - "sgs" => { - "type" => "array", - "items" => { - "description" => "Other AWS Security Groups; resources that are associated with this group will have this rule applied to their traffic", - "type" => "string" - } - }, - "lbs" => { - "type" => "array", - "items" => { - "description" => "AWS Load Balancers which will have this rule applied to their traffic", - "type" => "string" - } - } - } - } - } + "ingress_rules" => MU::Cloud::AWS::FirewallRule.ingressRuleAddtlSchema } [toplevel_required, schema] end diff --git a/modules/mu/clouds/aws/server.rb b/modules/mu/clouds/aws/server.rb index a60826c1f..8614529e5 100644 --- a/modules/mu/clouds/aws/server.rb +++ b/modules/mu/clouds/aws/server.rb @@ -299,7 +299,7 @@ def createEc2Instance raise MuError, "Got null subnet id out of #{@config['vpc']}" end MU.log "Deploying #{@mu_name} into VPC #{@vpc.cloud_id} Subnet #{subnet.cloud_id}" - punchAdminNAT + allowBastionAccess instance_descriptor[:subnet_id] = subnet.cloud_id end @@ -482,7 +482,7 @@ def postBoot(instance_id = nil) end } - punchAdminNAT + allowBastionAccess setAlarms @@ -809,37 +809,6 @@ def notify return deploydata end - # If the specified server is in a VPC, and has a NAT, make sure we'll - # be letting ssh traffic in from said NAT. - def punchAdminNAT - if @config['vpc'].nil? or - ( - !@config['vpc'].has_key?("nat_host_id") and - !@config['vpc'].has_key?("nat_host_tag") and - !@config['vpc'].has_key?("nat_host_ip") and - !@config['vpc'].has_key?("nat_host_name") - ) - return nil - end - - return nil if @nat.is_a?(Struct) && @nat.nat_gateway_id && @nat.nat_gateway_id.start_with?("nat-") - - dependencies if @nat.nil? - if @nat.nil? or @nat.cloud_desc.nil? - raise MuError, "#{@mu_name} (#{MU.deploy_id}) is configured to use #{@config['vpc']} but I can't find the cloud descriptor for a matching NAT instance" - end - MU.log "Adding administrative holes for NAT host #{@nat.cloud_desc.private_ip_address} to #{@mu_name}" - if !@deploy.kittens['firewall_rules'].nil? - @deploy.kittens['firewall_rules'].values.each { |acl| - if acl.config["admin"] - acl.addRule([@nat.cloud_desc.private_ip_address], proto: "tcp") - acl.addRule([@nat.cloud_desc.private_ip_address], proto: "udp") - acl.addRule([@nat.cloud_desc.private_ip_address], proto: "icmp") - end - } - end - end - # Called automatically by {MU::Deploy#createResources} def groom MU::MommaCat.lock(@cloud_id+"-groom") @@ -851,7 +820,7 @@ def groom end end - punchAdminNAT + allowBastionAccess tagVolumes @@ -1703,26 +1672,7 @@ def self.schema(_config) "type" => "object" } }, - "ingress_rules" => { - "items" => { - "properties" => { - "sgs" => { - "type" => "array", - "items" => { - "description" => "Other AWS Security Groups; resources that are associated with this group will have this rule applied to their traffic", - "type" => "string" - } - }, - "lbs" => { - "type" => "array", - "items" => { - "description" => "AWS Load Balancers which will have this rule applied to their traffic", - "type" => "string" - } - } - } - } - }, + "ingress_rules" => MU::Cloud::AWS::FirewallRule.ingressRuleAddtlSchema, "ssh_user" => { "type" => "string", "default" => "root", diff --git a/modules/mu/clouds/aws/server_pool.rb b/modules/mu/clouds/aws/server_pool.rb index e3d934320..718fa6e6b 100644 --- a/modules/mu/clouds/aws/server_pool.rb +++ b/modules/mu/clouds/aws/server_pool.rb @@ -813,26 +813,7 @@ def self.schema(_config) } } }, - "ingress_rules" => { - "items" => { - "properties" => { - "sgs" => { - "type" => "array", - "items" => { - "description" => "Other AWS Security Groups; resources that are associated with this group will have this rule applied to their traffic", - "type" => "string" - } - }, - "lbs" => { - "type" => "array", - "items" => { - "description" => "AWS Load Balancers which will have this rule applied to their traffic", - "type" => "string" - } - } - } - } - } + "ingress_rules" => MU::Cloud::AWS::FirewallRule.ingressRuleAddtlSchema } [toplevel_required, schema] end From 0bf44564eef0d146dfd27e9a76fc01c056c9e75c Mon Sep 17 00:00:00 2001 From: John Stange Date: Fri, 3 Apr 2020 12:31:35 -0400 Subject: [PATCH 044/124] Server: implement a #listIPs for all platforms; AWS::Database: more .send shenanigans to shorten code --- modules/mu/cloud.rb | 16 ++++------------ modules/mu/clouds/aws/database.rb | 28 ++++++++++++++++------------ modules/mu/clouds/aws/server.rb | 7 +++++++ modules/mu/clouds/azure/server.rb | 22 ++++++++++++++++++++++ modules/mu/clouds/google/server.rb | 16 ++++++++++++++++ 5 files changed, 65 insertions(+), 24 deletions(-) diff --git a/modules/mu/cloud.rb b/modules/mu/cloud.rb index b92c3de80..0be1e36e2 100644 --- a/modules/mu/cloud.rb +++ b/modules/mu/cloud.rb @@ -256,7 +256,7 @@ class NoSQLDB; :deps_wait_on_my_creation => false, :waits_on_parent_completion => false, :class => generic_class_methods + [:validateInstanceType, :imageTimeStamp], - :instance => generic_instance_methods + [:groom, :postBoot, :getSSHConfig, :canonicalIP, :getWindowsAdminPassword, :active?, :groomer, :mu_windows_name, :mu_windows_name=, :reboot, :addVolume, :genericNAT] + :instance => generic_instance_methods + [:groom, :postBoot, :getSSHConfig, :canonicalIP, :getWindowsAdminPassword, :active?, :groomer, :mu_windows_name, :mu_windows_name=, :reboot, :addVolume, :genericNAT, :listIPs] }, :ServerPool => { :has_multiples => false, @@ -1704,17 +1704,9 @@ def allowBastionAccess myFirewallRules.each { |acl| if acl.config["admin"] - acl.addRule([@nat.canonicalIP], proto: "tcp") - acl.addRule([@nat.canonicalIP], proto: "udp") - acl.addRule([@nat.canonicalIP], proto: "icmp") - # XXX this is an AWS-specific hack; we need to force Server - # implementations to expose a method that lists all of their - # internal IPs, akin to #canonicalIP - if @nat.cloud_desc and @nat.cloud_desc.respond_to?(:private_ip_address) - acl.addRule([@nat.cloud_desc.private_ip_address], proto: "tcp") - acl.addRule([@nat.cloud_desc.private_ip_address], proto: "udp") - acl.addRule([@nat.cloud_desc.private_ip_address], proto: "icmp") - end + acl.addRule(@nat.listIPs, proto: "tcp") + acl.addRule(@nat.listIPs, proto: "udp") + acl.addRule(@nat.listIPs, proto: "icmp") end } end diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index 5ce23e3da..1c05492e2 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -1056,22 +1056,26 @@ def create_basic params[:iops] = @config["iops"] if @config['storage_type'] == "io1" end + noun = @config['create_cluster'] ? "cluster" : "instance" + MU.retrier([Aws::RDS::Errors::InvalidParameterValue], max: 5, wait: 10) { if %w{existing_snapshot new_snapshot}.include?(@config["creation_style"]) [:storage_encrypted, :master_user_password, :engine_version, :allocated_storage, :backup_retention_period, :preferred_backup_window, :master_username, :db_name, :database_name].each { |p| params.delete(p) } - MU.log "Creating database #{@config['create_cluster'] ? "cluster" : "instance" } #{@cloud_id} from snapshot #{@config["snapshot_id"]}" - if @config['create_cluster'] - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).restore_db_cluster_from_snapshot(params) - else - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).restore_db_instance_from_db_snapshot(params) - end + MU.log "Creating database #{noun} #{@cloud_id} from snapshot #{@config["snapshot_id"]}" + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).send("restore_db_#{noun}_from_snapshot".to_sym, params) +# if @config['create_cluster'] +# MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).restore_db_cluster_from_snapshot(params) +# else +# MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).restore_db_instance_from_db_snapshot(params) +# end else - MU.log "Creating pristine database #{@config['create_cluster'] ? "cluster" : "instance" } #{@cloud_id} (#{@config['name']}) in #{@config['region']}" - if @config['create_cluster'] - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_cluster(params) - else - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_instance(params) - end + MU.log "Creating pristine database #{noun} #{@cloud_id} (#{@config['name']}) in #{@config['region']}" + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).send("create_db_#{noun}".to_sym, params) +# if @config['create_cluster'] +# MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_cluster(params) +# else +# MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_instance(params) +# end end } end diff --git a/modules/mu/clouds/aws/server.rb b/modules/mu/clouds/aws/server.rb index 8614529e5..4a2e787bf 100644 --- a/modules/mu/clouds/aws/server.rb +++ b/modules/mu/clouds/aws/server.rb @@ -1982,6 +1982,13 @@ def self.configureBlockDevices(image_id: nil, storage: nil, add_ephemeral: true, configured_storage end + # Return all of the IP addresses, public and private, from all of our + # network interfaces. + # @return [Array] + def listIPs + MU::Cloud::AWS::Server.getAddresses(cloud_desc).first + end + private def bootstrapGroomer diff --git a/modules/mu/clouds/azure/server.rb b/modules/mu/clouds/azure/server.rb index be2c27ce4..dd9e2a842 100644 --- a/modules/mu/clouds/azure/server.rb +++ b/modules/mu/clouds/azure/server.rb @@ -393,6 +393,28 @@ def canonicalIP end end + # Return all of the IP addresses, public and private, from all of our + # network interfaces. + # @return [Array] + def listIPs + ips = [] + cloud_desc.network_profile.network_interfaces.each { |iface| + iface_id = Id.new(iface.is_a?(Hash) ? iface['id'] : iface.id) + iface_desc = MU::Cloud::Azure.network(credentials: @credentials).network_interfaces.get(@resource_group, iface_id.to_s) + iface_desc.ip_configurations.each { |ipcfg| + ips << ipcfg.private_ipaddress + if ipcfg.respond_to?(:public_ipaddress) and ipcfg.public_ipaddress + ip_id = Id.new(ipcfg.public_ipaddress.id) + ip_desc = MU::Cloud::Azure.network(credentials: @credentials).public_ipaddresses.get(@resource_group, ip_id.to_s) + if ip_desc + ips << ip_desc.ip_address + end + end + } + } + ips + end + # return [String]: A password string. def getWindowsAdminPassword end diff --git a/modules/mu/clouds/google/server.rb b/modules/mu/clouds/google/server.rb index e228c192f..eb24d1c18 100644 --- a/modules/mu/clouds/google/server.rb +++ b/modules/mu/clouds/google/server.rb @@ -1001,6 +1001,22 @@ def canonicalIP end end + # Return all of the IP addresses, public and private, from all of our + # network interfaces. + # @return [Array] + def listIPs + ips = [] + cloud_desc.network_interfaces.each { |iface| + ips << iface.network_ip + if iface.access_configs + iface.access_configs.each { |acfg| + ips << acfg.nat_ip if acfg.nat_ip + } + end + } + ips + end + # return [String]: A password string. def getWindowsAdminPassword(use_cache: true) @config['windows_auth_vault'] ||= { From 38c6b31de6085fd86d9d4a32debf77ce41e86416 Mon Sep 17 00:00:00 2001 From: John Stange Date: Fri, 3 Apr 2020 12:47:15 -0400 Subject: [PATCH 045/124] Database: migrate member_of_cluster parameter to use a Ref --- modules/mu/clouds/aws/database.rb | 21 +++------------------ modules/mu/clouds/google/database.rb | 9 +-------- modules/mu/config/database.rb | 27 +++++++-------------------- 3 files changed, 11 insertions(+), 46 deletions(-) diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index 1c05492e2..9b5d17ed1 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -990,24 +990,9 @@ def add_basic def add_cluster_node - cluster = nil - rr = @config["member_of_cluster"] - cluster = @deploy.findLitterMate(type: "database", name: rr['db_name']) if rr['db_name'] - - if cluster.nil? - tag_key, tag_value = rr['tag'].split(/=/, 2) if !rr['tag'].nil? - found = MU::MommaCat.findStray( - rr['cloud'], - "database", - deploy_id: rr["deploy_id"], - cloud_id: rr["db_id"], - tag_key: tag_key, - tag_value: tag_value, - region: rr["region"], - dummy_ok: true - ) - cluster = found.first if found.size == 1 - end + cluster_ref = MU::Config::Ref.get(@config["member_of_cluster"]) + + cluster = cluster.kitten raise MuError, "Couldn't resolve cluster node reference to a unique live Database in #{@mu_name}" if cluster.nil? || cluster.cloud_id.nil? @config['cluster_identifier'] = cluster.cloud_id.downcase diff --git a/modules/mu/clouds/google/database.rb b/modules/mu/clouds/google/database.rb index d4520db4a..cb3e9f5ec 100644 --- a/modules/mu/clouds/google/database.rb +++ b/modules/mu/clouds/google/database.rb @@ -25,14 +25,7 @@ def initialize(**args) @config["groomer"] = MU::Config.defaultGroomer unless @config["groomer"] @groomclass = MU::Groomer.loadGroomer(@config["groomer"]) - @mu_name ||= - if @config and @config['engine'] and @config["engine"].match(/^sqlserver/) - @deploy.getResourceName(@config["name"], max_length: 15) - else - @deploy.getResourceName(@config["name"], max_length: 63) - end - - @mu_name.gsub(/(--|-$)/i, "").gsub(/(_)/, "-").gsub!(/^[^a-z]/i, "") + @mu_name ||= @deploy.getResourceName(@config["name"], max_length: 63) end # Called automatically by {MU::Deploy#createResources} diff --git a/modules/mu/config/database.rb b/modules/mu/config/database.rb index 4ae5a4d55..d96ee34a8 100644 --- a/modules/mu/config/database.rb +++ b/modules/mu/config/database.rb @@ -367,7 +367,7 @@ def self.validate(db, configurator) node["creation_style"] = "new" node["add_cluster_node"] = true node["member_of_cluster"] = { - "db_name" => db['name'], + "name" => db['name'], "cloud" => db['cloud'], "region" => db['region'] } @@ -399,28 +399,15 @@ def self.validate(db, configurator) ok = false end elsif db["member_of_cluster"] - rr = db["member_of_cluster"] - if rr['db_name'] - if !configurator.haveLitterMate?(rr['db_name'], "databases") - MU.log "Database cluster node #{db['name']} references sibling source #{rr['db_name']}, but I have no such database", MU::ERR + cluster = MU::Config::Ref.get(db["member_of_cluster"]) + if cluster['name'] + if !configurator.haveLitterMate?(cluster['name'], "databases") + MU.log "Database cluster node #{db['name']} references sibling source #{cluster['name']}, but I have no such database", MU::ERR ok = false end else - rr['cloud'] = db['cloud'] if rr['cloud'].nil? - tag_key, tag_value = rr['tag'].split(/=/, 2) if !rr['tag'].nil? - found = MU::MommaCat.findStray( - rr['cloud'], - "database", - deploy_id: rr["deploy_id"], - cloud_id: rr["db_id"], - tag_key: tag_key, - tag_value: tag_value, - region: rr["region"], - dummy_ok: true - ) - ext_database = found.first if !found.nil? and found.size == 1 - if !ext_database - MU.log "Couldn't resolve Database reference to a unique live Database in #{db['name']}", MU::ERR, details: rr + if !cluster.kitten + MU.log "Couldn't resolve Database reference to a unique live Database in #{db['name']}", MU::ERR, details: cluster.to_h ok = false end end From 95649dc379f127cd9a4265388eabb01a90b56b79 Mon Sep 17 00:00:00 2001 From: John Stange Date: Fri, 3 Apr 2020 13:33:32 -0400 Subject: [PATCH 046/124] AWS::Database: fudge things around to get under rubocop's method complexity bar --- modules/mu/clouds/aws/database.rb | 68 ++++++++++++++----------------- 1 file changed, 31 insertions(+), 37 deletions(-) diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index 9b5d17ed1..8b7b2c96a 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -821,6 +821,10 @@ def self.validateConfig(db, _configurator) ok = false if !validate_engine(db) + ok = false if !valid_read_replica?(db) + + ok = false if !valid_cloudwatch_logs?(db) + db["license_model"] ||= if ["postgres", "postgresql", "aurora-postgresql"].include?(db["engine"]) "postgresql-license" @@ -848,6 +852,16 @@ def self.validateConfig(db, _configurator) ok = false end + ok = false if !validate_network_cfg(db) + + ok + end + + private + + def self.validate_network_cfg(db) + ok = true + if !db['vpc'] db["vpc"] = MU::Cloud::AWS::VPC.defaultVpc(db['region'], db['credentials']) if db['vpc'] and !(db['engine'].match(/sqlserver/) and db['create_read_replica']) @@ -870,10 +884,13 @@ def self.validateConfig(db, _configurator) ok end + private_class_method :validate_network_cfg - private + def self.valid_read_replica?(db) + if !db['create_read_replica'] and !db['read_replica_of'] + return true + end - def self.can_read_replica?(db) engine = get_supported_engines(db['region'], db['credentials'], engine: db['engine']) if engine.nil? or !engine['features'] or !engine['features'][db['engine_version']] return true # we can't be sure, so let the API sort it out later @@ -885,7 +902,7 @@ def self.can_read_replica?(db) end true end - private_class_method :can_read_replica? + private_class_method :valid_read_replica? def self.valid_cloudwatch_logs?(db) return true if !db['cloudwatch_logs'] @@ -910,7 +927,9 @@ def self.valid_cloudwatch_logs?(db) def self.validate_engine(db) ok = true - if db['create_cluster'] or db["member_of_cluster"] or (db['engine'] and db['engine'].match(/aurora/)) + is_cluster = db['create_cluster'] or db["member_of_cluster"] or db["add_cluster_node"] or (db['engine'] and db['engine'].match(/aurora/)) + + if is_cluster case db['engine'] when "mysql", "aurora", "aurora-mysql" if (db['engine_version'] and db["engine_version"].match(/^5\.6/)) or db["cluster_mode"] == "serverless" @@ -926,6 +945,7 @@ def self.validate_engine(db) ok = false MU.log "#{db['engine']} is not supported for clustering", MU::ERR end + db["create_cluster"] = true if !(db["member_of_cluster"] or db["add_cluster_node"]) end db["engine"] = "oracle-se2" if db["engine"] == "oracle" @@ -938,11 +958,6 @@ def self.validate_engine(db) return false end - if db['engine'].match(/^aurora/) and !db['create_cluster'] and !db['add_cluster_node'] - MU.log "Database #{db['name']}: #{db['engine']} looks like a cluster engine, but create_cluster is not set. Add 'create_cluster: true' if you're building an RDS cluster.", MU::ERR - ok = false - end - # Resolve or default our engine version to something reasonable db['engine_version'] ||= engine_cfg['versions'].last if !engine_cfg['versions'].include?(db["engine_version"]) @@ -959,14 +974,6 @@ def self.validate_engine(db) ok = false end - if (db['create_read_replica'] or db['read_replica_of']) and !can_read_replica?(db) - ok = false - end - - if db['cloudwatch_logs'] and !valid_cloudwatch_logs?(db) - ok = false - end - ok end private_class_method :validate_engine @@ -990,16 +997,13 @@ def add_basic def add_cluster_node - cluster_ref = MU::Config::Ref.get(@config["member_of_cluster"]) - - cluster = cluster.kitten + cluster = MU::Config::Ref.get(@config["member_of_cluster"]).kitten raise MuError, "Couldn't resolve cluster node reference to a unique live Database in #{@mu_name}" if cluster.nil? || cluster.cloud_id.nil? @config['cluster_identifier'] = cluster.cloud_id.downcase # We're overriding @config["subnet_group_name"] because we need each cluster member to use the cluster's subnet group instead of a unique subnet group @config["subnet_group_name"] = @config['cluster_identifier'] if @vpc @config["creation_style"] = "new" if @config["creation_style"] != "new" - if @config.has_key?("parameter_group_family") @config["parameter_group_name"] = @mu_name createDBParameterGroup @@ -1179,17 +1183,15 @@ def createDb # If referencing an existing DB, insert this deploy's DB security group so it can access the thing if @config["creation_style"] == 'existing' - vpc_sg_ids = cloud_desc.vpc_security_groups.map { |sg| sg.vpc_security_group_id } + mod_config = {} + mod_config[:db_instance_identifier] = @cloud_id + mod_config[:vpc_security_group_ids] = cloud_desc.vpc_security_groups.map { |sg| sg.vpc_security_group_id } localdeploy_rule = @deploy.findLitterMate(type: "firewall_rule", name: "database"+@config['name']) if localdeploy_rule.nil? raise MU::MuError, "Database #{@config['name']} failed to find its generic security group 'database#{@config['name']}'" end - MU.log "Found this deploy's DB security group: #{localdeploy_rule.cloud_id}", MU::DEBUG - vpc_sg_ids << localdeploy_rule.cloud_id - mod_config = Hash.new - mod_config[:vpc_security_group_ids] = vpc_sg_ids - mod_config[:db_instance_identifier] = @cloud_id + mod_config[:vpc_security_group_ids] << localdeploy_rule.cloud_id MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_instance(mod_config) MU.log "Modified database #{@cloud_id} with new security groups: #{mod_config}", MU::NOTICE @@ -1221,16 +1223,8 @@ def createDb mod_config[:preferred_maintenance_window] = @config["preferred_maintenance_window"] end - begin - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_instance(mod_config) - rescue Aws::RDS::Errors::InvalidParameterValue => e - if e.message =~ /Invalid security group/ - MU.log e.message+" modifying "+@cloud_id, MU::ERR, details: mod_config - end - raise e - end + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_instance(mod_config) wait_until_available - end # Maybe wait for DB instance to be in available state. DB should still be writeable at this state @@ -1377,7 +1371,7 @@ def self.terminate_rds_instance(db, noop: false, skipsnapshots: false, region: M db = MU::Cloud::AWS::Database.find(cloud_id: cloud_id, region: region, credentials: credentials, cluster: cluster).values.first return if db.nil? } -pp db + MU::Cloud::AWS::DNSZone.genericMuDNSEntry(name: cloud_id, target: (cluster ? db.endpoint : db.endpoint.address), cloudclass: MU::Cloud::Database, delete: true) if !noop if %w{deleting deleted}.include?(cluster ? db.status : db.db_instance_status) From d3aeff12b4e28c6a4e4697d7273880475091eabd Mon Sep 17 00:00:00 2001 From: John Stange Date: Fri, 3 Apr 2020 16:21:25 -0400 Subject: [PATCH 047/124] ensmartle MuError to display detailed log messages on its own; Database: more validation of reference syntax --- modules/mu.rb | 5 ++- modules/mu/clouds/aws/cache_cluster.rb | 4 +- modules/mu/clouds/aws/database.rb | 58 +++++++++++--------------- modules/mu/config/database.rb | 9 ++-- 4 files changed, 33 insertions(+), 43 deletions(-) diff --git a/modules/mu.rb b/modules/mu.rb index 2fb1f447f..8b36f2bb9 100644 --- a/modules/mu.rb +++ b/modules/mu.rb @@ -294,8 +294,9 @@ def initialize(*args, &block) # Wrapper class for fatal Exceptions. Gives our internals something to # inherit that will log an error message appropriately before bubbling up. class MuError < StandardError - def initialize(message = nil, silent: false) - MU.log message, MU::ERR, details: caller[2] if !message.nil? and !silent + def initialize(message = nil, silent: false, details: nil) + details ||= caller[2] + MU.log message, MU::ERR, details: details if !message.nil? and !silent if MU.verbosity == MU::Logger::SILENT super "" else diff --git a/modules/mu/clouds/aws/cache_cluster.rb b/modules/mu/clouds/aws/cache_cluster.rb index 2a82f506b..dedc3ad75 100644 --- a/modules/mu/clouds/aws/cache_cluster.rb +++ b/modules/mu/clouds/aws/cache_cluster.rb @@ -270,7 +270,7 @@ def create def createSubnetGroup subnet_ids = [] if @config["vpc"] && !@config["vpc"].empty? - raise MuError, "Didn't find the VPC specified in #{@config["vpc"]}" unless @vpc + raise MuError.new "Didn't find the VPC specified for #{@mu_name}", details: @config["vpc"].to_h unless @vpc vpc_id = @vpc.cloud_id @@ -283,7 +283,7 @@ def createSubnetGroup else @config["vpc"]["subnets"].each { |subnet| subnet_obj = @vpc.getSubnet(cloud_id: subnet["subnet_id"].to_s, name: subnet["subnet_name"].to_s) - raise MuError, "Couldn't find a live subnet matching #{subnet} in #{@vpc} (#{@vpc.subnets})" if subnet_obj.nil? + raise MuError.new "Couldn't find a live subnet matching #{subnet} in #{@vpc}", details: @vpc.subnets if subnet_obj.nil? subnet_ids << subnet_obj.cloud_id } end diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index 8b7b2c96a..9c687742e 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -279,7 +279,7 @@ def createSubnetGroup # Finding subnets, creating security groups/adding holes, create subnet group subnet_ids = [] - raise MuError, "Didn't find the VPC specified in #{@config["vpc"]}" unless @vpc + raise MuError.new "Didn't find the VPC specified for #{@mu_name}", details: @config["vpc"].to_h unless @vpc mySubnets.each { |subnet| next if @config["publicly_accessible"] and subnet.private? @@ -465,20 +465,22 @@ def createNewSnapshot MU.log "Failed to get an id from reference for creating a snapshot", MU::ERR, details: @config['source'] raise "Failed to get an id from reference for creating a snapshot" end + params = { + :tags => @tags.each_key.map { |k| { :key => k, :value => @tags[k] } } + } + if @config["create_cluster"] + params[:db_cluster_snapshot_identifier] = snap_id + params[:db_cluster_identifier] = src_ref.id + else + params[:db_snapshot_identifier] = snap_id + params[:db_instance_identifier] = src_ref.id + end MU.retrier([Aws::RDS::Errors::InvalidDBInstanceState, Aws::RDS::Errors::InvalidDBClusterStateFault], wait: 60, max: 10) { if @config["create_cluster"] - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_cluster_snapshot( - db_cluster_snapshot_identifier: snap_id, - db_cluster_identifier: src_ref.id, - tags: @tags.each_key.map { |k| { :key => k, :value => @tags[k] } } - ) + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_cluster_snapshot(params) else - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_snapshot( - db_snapshot_identifier: snap_id, - db_instance_identifier: src_ref.id, - tags: @tags.each_key.map { |k| { :key => k, :value => @tags[k] } } - ) + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_snapshot(params) end } @@ -927,9 +929,7 @@ def self.valid_cloudwatch_logs?(db) def self.validate_engine(db) ok = true - is_cluster = db['create_cluster'] or db["member_of_cluster"] or db["add_cluster_node"] or (db['engine'] and db['engine'].match(/aurora/)) - - if is_cluster + if db['create_cluster'] or db["member_of_cluster"] or db["add_cluster_node"] or (db['engine'] and db['engine'].match(/aurora/)) case db['engine'] when "mysql", "aurora", "aurora-mysql" if (db['engine_version'] and db["engine_version"].match(/^5\.6/)) or db["cluster_mode"] == "serverless" @@ -997,9 +997,12 @@ def add_basic def add_cluster_node - cluster = MU::Config::Ref.get(@config["member_of_cluster"]).kitten + cluster = MU::Config::Ref.get(@config["member_of_cluster"]).kitten(@deploy, debug: true) + if cluster.nil? or cluster.cloud_id.nil? +puts @deploy.findLitterMate(type: "database", name: @config['member_of_cluster']['name']).class.name + raise MuError.new "Failed to resolve parent cluster of #{@mu_name}", details: @config["member_of_cluster"].to_h + end - raise MuError, "Couldn't resolve cluster node reference to a unique live Database in #{@mu_name}" if cluster.nil? || cluster.cloud_id.nil? @config['cluster_identifier'] = cluster.cloud_id.downcase # We're overriding @config["subnet_group_name"] because we need each cluster member to use the cluster's subnet group instead of a unique subnet group @config["subnet_group_name"] = @config['cluster_identifier'] if @vpc @@ -1051,20 +1054,10 @@ def create_basic if %w{existing_snapshot new_snapshot}.include?(@config["creation_style"]) [:storage_encrypted, :master_user_password, :engine_version, :allocated_storage, :backup_retention_period, :preferred_backup_window, :master_username, :db_name, :database_name].each { |p| params.delete(p) } MU.log "Creating database #{noun} #{@cloud_id} from snapshot #{@config["snapshot_id"]}" - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).send("restore_db_#{noun}_from_snapshot".to_sym, params) -# if @config['create_cluster'] -# MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).restore_db_cluster_from_snapshot(params) -# else -# MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).restore_db_instance_from_db_snapshot(params) -# end + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).send("restore_db_#{noun}_from_#{noun == "instance" ? "db_" : ""}snapshot".to_sym, params) else MU.log "Creating pristine database #{noun} #{@cloud_id} (#{@config['name']}) in #{@config['region']}" MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).send("create_db_#{noun}".to_sym, params) -# if @config['create_cluster'] -# MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_cluster(params) -# else -# MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_instance(params) -# end end } end @@ -1073,8 +1066,7 @@ def create_basic def create_point_in_time @config["source"].kitten(@deploy, debug: true) if !@config["source"].id - MU.log "Database '#{@config['name']}' couldn't resolve cloud id for source database", MU::ERR, details: @config["source"].to_h - raise MuError, "Database '#{@config['name']}' couldn't resolve cloud id for source database" + raise MuError.new "Database '#{@config['name']}' couldn't resolve cloud id for source database", details: @config["source"].to_h end params = genericParams @@ -1090,7 +1082,7 @@ def create_point_in_time params[:use_latest_restorable_time] = true if @config['restore_time'] == "latest" - MU.retrier([Aws::RDS::Errors::InvalidParameterValue], max: 6, wait: 20) { + MU.retrier([Aws::RDS::Errors::InvalidParameterValue], max: 15, wait: 20) { MU.log "Creating database #{@config['create_cluster'] ? "cluster" : "instance" } #{@cloud_id} based on point in time backup #{@config['restore_time']} of #{@config['source'].id}" if @config['create_cluster'] MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).restore_db_cluster_to_point_in_time(params) @@ -1104,8 +1096,7 @@ def create_point_in_time def create_read_replica @config["source"].kitten(@deploy, debug: true) if !@config["source"].id - MU.log "Database '#{@config['name']}' couldn't resolve cloud id for source database", MU::ERR, details: @config["source"].to_h - raise MuError, "Database '#{@config['name']}' couldn't resolve cloud id for source database" + raise MuError.new "Database '#{@config['name']}' couldn't resolve cloud id for source database", details: @config["source"].to_h end params = { @@ -1402,9 +1393,8 @@ def self.terminate_rds_instance(db, noop: false, skipsnapshots: false, region: M end } del_db = nil - MU.retrier([], wait: 10, ignoreme: [Aws::RDS::Errors::DBInstanceNotFound], loop_if: Proc.new { del_db and del_db.db_instance_status != "deleted" }) { + MU.retrier([], wait: 10, ignoreme: [Aws::RDS::Errors::DBInstanceNotFound], loop_if: Proc.new { del_db and ((!cluster and del_db.db_instance_status != "deleted") or (cluster and del_db.status != "deleted")) }) { del_db = MU::Cloud::AWS::Database.find(cloud_id: cloud_id, region: region, cluster: cluster).values.first -MU.log cloud_id, MU::NOTICE, details: del_db if cluster } end end diff --git a/modules/mu/config/database.rb b/modules/mu/config/database.rb index d96ee34a8..6992323fc 100644 --- a/modules/mu/config/database.rb +++ b/modules/mu/config/database.rb @@ -61,10 +61,7 @@ def self.schema "description" => "Internal use", "default" => false }, - "member_of_cluster" => { - "description" => "Internal use", - "type" => "object" - }, + "member_of_cluster" => MU::Config::Ref.schema(type: "databases", desc: "Internal use"), "dns_records" => MU::Config::DNSZone.records_primitive(need_target: false, default_type: "CNAME", need_zone: true), "dns_sync_wait" => { "type" => "boolean", @@ -369,7 +366,9 @@ def self.validate(db, configurator) node["member_of_cluster"] = { "name" => db['name'], "cloud" => db['cloud'], - "region" => db['region'] + "region" => db['region'], + "credentials" => db['credentials'], + "type" => "databases" } # AWS will figure out for us which database instance is the writer/master so we can create all of them concurrently. node['dependencies'] << { From d3587c3aafb8c588d12947ab7bfa694039cb38fe Mon Sep 17 00:00:00 2001 From: John Stange Date: Fri, 3 Apr 2020 17:43:31 -0400 Subject: [PATCH 048/124] AWS::Database: enhance .find in preparation for adoption support --- modules/mu/clouds/aws/database.rb | 55 ++++++++++++++++++++++--------- 1 file changed, 39 insertions(+), 16 deletions(-) diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index 9c687742e..32183d531 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -184,21 +184,44 @@ def self.find(**args) rescue Aws::RDS::Errors::DBClusterNotFoundFault end return { args[:cloud_id] => resp } if resp - elsif args[:tag_value] - MU::Cloud::AWS.rds(credentials: args[:credentials], region: args[:region]).describe_db_instances.db_instances.each { |db| - resp = MU::Cloud::AWS.rds(credentials: args[:credentials], region: args[:region]).list_tags_for_resource( - resource_name: MU::Cloud::AWS::Database.getARN(db.db_instance_identifier, "db", "rds", region: args[:region], credentials: args[:credentials]) - ) - if resp and resp.tag_list - resp.tag_list.each { |tag| - found[db.db_instance_identifier] = db if tag.key == args[:tag_key] and tag.value == args[:tag_value] - } - end - } + else - MU::Cloud::AWS.rds(credentials: args[:credentials], region: args[:region]).describe_db_instances.db_instances.each { |db| - found[db.db_instance_identifier] = db - } + marker = nil + if !args[:cluster] + begin + resp = MU::Cloud::AWS.rds(credentials: args[:credentials], region: args[:region]).describe_db_instances + marker = resp.marker + resp.db_instances.each { |db| + found[db.db_instance_identifier] = db + } + end while marker.nil? + elsif args[:cluster] or !args.has_key?(:cluster) + begin + resp = MU::Cloud::AWS.rds(credentials: args[:credentials], region: args[:region]).describe_db_clusters + marker = resp.marker + resp.db_clusters.each { |db| + found[db.db_cluster_identifier] = db + } + end while marker.nil? + end + if args[:tag_key] and args[:tag_value] + keep = [] + found.each_pair { |id, desc| + noun = desc.is_a?(Aws::RDS::Types::DBCluster) ? "cluster" : "db" + resp = MU::Cloud::AWS.rds(credentials: args[:credentials], region: args[:region]).list_tags_for_resource( + resource_name: MU::Cloud::AWS::Database.getARN(id, noun, "rds", region: args[:region], credentials: args[:credentials]) + ) + if resp and resp.tag_list + resp.tag_list.each { |tag| + if tag.key == args[:tag_key] and tag.value == args[:tag_value] + keep << id + break + end + } + end + } + found.reject! { |k, _v| !keep.include?(k) } + end end return found @@ -970,7 +993,7 @@ def self.validate_engine(db) if db["parameter_group_family"] and !engine_cfg['families'].include?(db['parameter_group_family']) - MU.log "RDS engine '#{db['engine']}' parameter group family '#{db['parameter_group_family']}' is not supported in #{db['region']}", MU::ERR, details: { "Valid parameter families:" => engine_cfg['families'].uniq.sort } + MU.log "RDS engine '#{db['engine']}' parameter group family '#{db['parameter_group_family']}' is not supported.", MU::ERR, details: engine_cfg['families'].uniq.sort ok = false end @@ -1083,7 +1106,7 @@ def create_point_in_time MU.retrier([Aws::RDS::Errors::InvalidParameterValue], max: 15, wait: 20) { - MU.log "Creating database #{@config['create_cluster'] ? "cluster" : "instance" } #{@cloud_id} based on point in time backup #{@config['restore_time']} of #{@config['source'].id}" + MU.log "Creating database #{@config['create_cluster'] ? "cluster" : "instance" } #{@cloud_id} based on point in time backup '#{@config['restore_time']}' of #{@config['source'].id}" if @config['create_cluster'] MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).restore_db_cluster_to_point_in_time(params) else From a62a6121687e6f020980948e4fb59704ef8f1ede Mon Sep 17 00:00:00 2001 From: John Stange Date: Fri, 3 Apr 2020 17:53:43 -0400 Subject: [PATCH 049/124] AWS::Database: munge few more cluster/intance calls with ridiculous Ruby metaprogramming --- modules/mu/clouds/aws/database.rb | 33 +++++++++++-------------------- 1 file changed, 11 insertions(+), 22 deletions(-) diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index 32183d531..2cd3ebdfd 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -186,23 +186,20 @@ def self.find(**args) return { args[:cloud_id] => resp } if resp else - marker = nil - if !args[:cluster] + fetch = Proc.new { |noun| + marker = nil begin - resp = MU::Cloud::AWS.rds(credentials: args[:credentials], region: args[:region]).describe_db_instances + resp = MU::Cloud::AWS.rds(credentials: args[:credentials], region: args[:region]).send("describe_db_#{noun}s".to_sym) marker = resp.marker - resp.db_instances.each { |db| - found[db.db_instance_identifier] = db + resp.send("db_#{noun}s").each { |db| + found[db.send("db_#{noun}_identifier".to_sym)] = db } end while marker.nil? + } + if !args[:cluster] + fetch.call("instance") elsif args[:cluster] or !args.has_key?(:cluster) - begin - resp = MU::Cloud::AWS.rds(credentials: args[:credentials], region: args[:region]).describe_db_clusters - marker = resp.marker - resp.db_clusters.each { |db| - found[db.db_cluster_identifier] = db - } - end while marker.nil? + fetch.call("cluster") end if args[:tag_key] and args[:tag_value] keep = [] @@ -500,11 +497,7 @@ def createNewSnapshot end MU.retrier([Aws::RDS::Errors::InvalidDBInstanceState, Aws::RDS::Errors::InvalidDBClusterStateFault], wait: 60, max: 10) { - if @config["create_cluster"] - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_cluster_snapshot(params) - else - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_snapshot(params) - end + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).send("create_db_#{@config['create_cluster'] ? "cluster_" : ""}snapshot".to_sym, params) } loop_if = Proc.new { @@ -1107,11 +1100,7 @@ def create_point_in_time MU.retrier([Aws::RDS::Errors::InvalidParameterValue], max: 15, wait: 20) { MU.log "Creating database #{@config['create_cluster'] ? "cluster" : "instance" } #{@cloud_id} based on point in time backup '#{@config['restore_time']}' of #{@config['source'].id}" - if @config['create_cluster'] - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).restore_db_cluster_to_point_in_time(params) - else - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).restore_db_instance_to_point_in_time(params) - end + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).send("restore_db_#{@config['create_cluster'] ? "cluster" : "instance"}_to_point_in_time".to_sym, params) } end From 7cf8fff29c56175691939e1cd237bf8bbfa6a25e Mon Sep 17 00:00:00 2001 From: John Stange Date: Fri, 3 Apr 2020 18:02:04 -0400 Subject: [PATCH 050/124] AWS::Database: a couple more CodeClimate trivialities --- modules/mu/clouds/aws/database.rb | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index 2cd3ebdfd..66e8430bf 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -177,11 +177,13 @@ def self.find(**args) resp = MU::Cloud::AWS.rds(region: args[:region], credentials: args[:credentials]).describe_db_instances(db_instance_identifier: args[:cloud_id]).db_instances.first return { args[:cloud_id] => resp } if resp rescue Aws::RDS::Errors::DBInstanceNotFound + MU.log "No results found looking for RDS instance #{args[:cloud_id]}", MU::DEBUG end end begin resp = MU::Cloud::AWS.rds(region: args[:region], credentials: args[:credentials]).describe_db_clusters(db_cluster_identifier: args[:cloud_id]).db_clusters.first rescue Aws::RDS::Errors::DBClusterNotFoundFault + MU.log "No results found looking for RDS cluster #{args[:cloud_id]}", MU::DEBUG end return { args[:cloud_id] => resp } if resp @@ -820,7 +822,7 @@ def self.validate_master_password(db) end if pw and (pw.length < 8 or pw.match(/[\/\\@\s]/) or pw.length > maxlen) - MU.log "Database password specified in 'password' or 'auth_vault' doesn't meet RDS requirements. Must be between 8 and #{maxlen.to_s} chars and have only ASCII characters other than /, @, \", or [space].", MU::ERR + MU.log "Database password specified in 'password' or 'auth_vault' doesn't meet RDS requirements. Must be between 8 and #{maxlen} chars and have only ASCII characters other than /, @, \", or [space].", MU::ERR return false end From 7add6e06770d6111430222983735edb05a2b6b18 Mon Sep 17 00:00:00 2001 From: John Stange Date: Fri, 3 Apr 2020 18:05:51 -0400 Subject: [PATCH 051/124] AWS::Database: one last minor kvetch --- modules/mu/clouds/aws/database.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/clouds/aws/database.rb index 66e8430bf..767f961f2 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/clouds/aws/database.rb @@ -1442,7 +1442,7 @@ def self.purge_rds_sgs(cloud_id, region, credentials, noop) secgroup = MU::Cloud::AWS.rds(region: region, credentials: credentials).describe_db_security_groups(db_security_group_name: cloud_id) rdssecgroups << cloud_id if !secgroup.nil? rescue Aws::RDS::Errors::DBSecurityGroupNotFound - # this is normal in VPC world + MU.log "No such RDS security group #{sg} to purge", MU::DEBUG end # RDS security groups can depend on EC2 security groups, do these last From e3226355ab3ca2df902645c2f7b7d58d7f5711b3 Mon Sep 17 00:00:00 2001 From: John Stange Date: Mon, 6 Apr 2020 11:14:23 -0400 Subject: [PATCH 052/124] migrate clouds/ module directory to providers/ so we can use that filename space for something else --- modules/mu/cleanup.rb | 2 +- modules/mu/cloud.rb | 8 +- modules/mu/config/alarm.rb | 2 +- modules/mu/config/bucket.rb | 2 +- modules/mu/config/cache_cluster.rb | 2 +- modules/mu/config/collection.rb | 2 +- modules/mu/config/container_cluster.rb | 2 +- modules/mu/config/database.rb | 2 +- modules/mu/config/dnszone.rb | 2 +- modules/mu/config/doc_helpers.rb | 2 +- modules/mu/config/endpoint.rb | 2 +- modules/mu/config/firewall_rule.rb | 2 +- modules/mu/config/folder.rb | 2 +- modules/mu/config/function.rb | 2 +- modules/mu/config/group.rb | 2 +- modules/mu/config/habitat.rb | 2 +- modules/mu/config/loadbalancer.rb | 2 +- modules/mu/config/log.rb | 2 +- modules/mu/config/msg_queue.rb | 2 +- modules/mu/config/nosqldb.rb | 2 +- modules/mu/config/notifier.rb | 2 +- modules/mu/config/role.rb | 2 +- modules/mu/config/search_domain.rb | 2 +- modules/mu/config/server.rb | 2 +- modules/mu/config/server_pool.rb | 2 +- modules/mu/config/storage_pool.rb | 2 +- modules/mu/config/user.rb | 2 +- modules/mu/config/vpc.rb | 2 +- modules/mu/{clouds => providers}/README.md | 2 +- modules/mu/{clouds => providers}/aws.rb | 0 modules/mu/{clouds => providers}/aws/alarm.rb | 0 .../mu/{clouds => providers}/aws/bucket.rb | 0 .../aws/cache_cluster.rb | 0 .../{clouds => providers}/aws/collection.rb | 0 .../aws/container_cluster.rb | 0 .../mu/{clouds => providers}/aws/database.rb | 101 +++++++++--------- .../mu/{clouds => providers}/aws/dnszone.rb | 0 .../mu/{clouds => providers}/aws/endpoint.rb | 0 .../aws/firewall_rule.rb | 2 +- .../mu/{clouds => providers}/aws/folder.rb | 0 .../mu/{clouds => providers}/aws/function.rb | 0 modules/mu/{clouds => providers}/aws/group.rb | 0 .../mu/{clouds => providers}/aws/habitat.rb | 0 .../{clouds => providers}/aws/loadbalancer.rb | 0 modules/mu/{clouds => providers}/aws/log.rb | 0 .../mu/{clouds => providers}/aws/msg_queue.rb | 0 .../mu/{clouds => providers}/aws/nosqldb.rb | 0 .../mu/{clouds => providers}/aws/notifier.rb | 0 modules/mu/{clouds => providers}/aws/role.rb | 0 .../aws/search_domain.rb | 0 .../mu/{clouds => providers}/aws/server.rb | 4 +- .../{clouds => providers}/aws/server_pool.rb | 0 .../{clouds => providers}/aws/storage_pool.rb | 0 modules/mu/{clouds => providers}/aws/user.rb | 0 .../aws/userdata/README.md | 0 .../aws/userdata/linux.erb | 0 .../aws/userdata/windows.erb | 0 modules/mu/{clouds => providers}/aws/vpc.rb | 2 +- .../{clouds => providers}/aws/vpc_subnet.rb | 0 modules/mu/{clouds => providers}/azure.rb | 0 .../azure/container_cluster.rb | 0 .../azure/firewall_rule.rb | 0 .../mu/{clouds => providers}/azure/habitat.rb | 0 .../azure/loadbalancer.rb | 0 .../mu/{clouds => providers}/azure/role.rb | 0 .../mu/{clouds => providers}/azure/server.rb | 0 .../mu/{clouds => providers}/azure/user.rb | 0 .../azure/userdata/README.md | 0 .../azure/userdata/linux.erb | 0 .../azure/userdata/windows.erb | 0 modules/mu/{clouds => providers}/azure/vpc.rb | 0 .../{clouds => providers}/cloudformation.rb | 0 .../cloudformation/alarm.rb | 0 .../cloudformation/cache_cluster.rb | 0 .../cloudformation/collection.rb | 0 .../cloudformation/database.rb | 0 .../cloudformation/dnszone.rb | 0 .../cloudformation/firewall_rule.rb | 0 .../cloudformation/loadbalancer.rb | 0 .../cloudformation/log.rb | 0 .../cloudformation/server.rb | 0 .../cloudformation/server_pool.rb | 0 .../cloudformation/vpc.rb | 0 modules/mu/{clouds => providers}/docker.rb | 0 modules/mu/{clouds => providers}/google.rb | 0 .../mu/{clouds => providers}/google/bucket.rb | 0 .../google/container_cluster.rb | 0 .../{clouds => providers}/google/database.rb | 0 .../google/firewall_rule.rb | 0 .../mu/{clouds => providers}/google/folder.rb | 0 .../{clouds => providers}/google/function.rb | 0 .../mu/{clouds => providers}/google/group.rb | 0 .../{clouds => providers}/google/habitat.rb | 0 .../google/loadbalancer.rb | 0 .../mu/{clouds => providers}/google/role.rb | 0 .../mu/{clouds => providers}/google/server.rb | 0 .../google/server_pool.rb | 0 .../mu/{clouds => providers}/google/user.rb | 0 .../google/userdata/README.md | 0 .../google/userdata/linux.erb | 0 .../google/userdata/windows.erb | 0 .../mu/{clouds => providers}/google/vpc.rb | 0 spec/mu/clouds/azure_spec.rb | 4 +- 103 files changed, 90 insertions(+), 87 deletions(-) rename modules/mu/{clouds => providers}/README.md (99%) rename modules/mu/{clouds => providers}/aws.rb (100%) rename modules/mu/{clouds => providers}/aws/alarm.rb (100%) rename modules/mu/{clouds => providers}/aws/bucket.rb (100%) rename modules/mu/{clouds => providers}/aws/cache_cluster.rb (100%) rename modules/mu/{clouds => providers}/aws/collection.rb (100%) rename modules/mu/{clouds => providers}/aws/container_cluster.rb (100%) rename modules/mu/{clouds => providers}/aws/database.rb (99%) rename modules/mu/{clouds => providers}/aws/dnszone.rb (100%) rename modules/mu/{clouds => providers}/aws/endpoint.rb (100%) rename modules/mu/{clouds => providers}/aws/firewall_rule.rb (99%) rename modules/mu/{clouds => providers}/aws/folder.rb (100%) rename modules/mu/{clouds => providers}/aws/function.rb (100%) rename modules/mu/{clouds => providers}/aws/group.rb (100%) rename modules/mu/{clouds => providers}/aws/habitat.rb (100%) rename modules/mu/{clouds => providers}/aws/loadbalancer.rb (100%) rename modules/mu/{clouds => providers}/aws/log.rb (100%) rename modules/mu/{clouds => providers}/aws/msg_queue.rb (100%) rename modules/mu/{clouds => providers}/aws/nosqldb.rb (100%) rename modules/mu/{clouds => providers}/aws/notifier.rb (100%) rename modules/mu/{clouds => providers}/aws/role.rb (100%) rename modules/mu/{clouds => providers}/aws/search_domain.rb (100%) rename modules/mu/{clouds => providers}/aws/server.rb (99%) rename modules/mu/{clouds => providers}/aws/server_pool.rb (100%) rename modules/mu/{clouds => providers}/aws/storage_pool.rb (100%) rename modules/mu/{clouds => providers}/aws/user.rb (100%) rename modules/mu/{clouds => providers}/aws/userdata/README.md (100%) rename modules/mu/{clouds => providers}/aws/userdata/linux.erb (100%) rename modules/mu/{clouds => providers}/aws/userdata/windows.erb (100%) rename modules/mu/{clouds => providers}/aws/vpc.rb (99%) rename modules/mu/{clouds => providers}/aws/vpc_subnet.rb (100%) rename modules/mu/{clouds => providers}/azure.rb (100%) rename modules/mu/{clouds => providers}/azure/container_cluster.rb (100%) rename modules/mu/{clouds => providers}/azure/firewall_rule.rb (100%) rename modules/mu/{clouds => providers}/azure/habitat.rb (100%) rename modules/mu/{clouds => providers}/azure/loadbalancer.rb (100%) rename modules/mu/{clouds => providers}/azure/role.rb (100%) rename modules/mu/{clouds => providers}/azure/server.rb (100%) rename modules/mu/{clouds => providers}/azure/user.rb (100%) rename modules/mu/{clouds => providers}/azure/userdata/README.md (100%) rename modules/mu/{clouds => providers}/azure/userdata/linux.erb (100%) rename modules/mu/{clouds => providers}/azure/userdata/windows.erb (100%) rename modules/mu/{clouds => providers}/azure/vpc.rb (100%) rename modules/mu/{clouds => providers}/cloudformation.rb (100%) rename modules/mu/{clouds => providers}/cloudformation/alarm.rb (100%) rename modules/mu/{clouds => providers}/cloudformation/cache_cluster.rb (100%) rename modules/mu/{clouds => providers}/cloudformation/collection.rb (100%) rename modules/mu/{clouds => providers}/cloudformation/database.rb (100%) rename modules/mu/{clouds => providers}/cloudformation/dnszone.rb (100%) rename modules/mu/{clouds => providers}/cloudformation/firewall_rule.rb (100%) rename modules/mu/{clouds => providers}/cloudformation/loadbalancer.rb (100%) rename modules/mu/{clouds => providers}/cloudformation/log.rb (100%) rename modules/mu/{clouds => providers}/cloudformation/server.rb (100%) rename modules/mu/{clouds => providers}/cloudformation/server_pool.rb (100%) rename modules/mu/{clouds => providers}/cloudformation/vpc.rb (100%) rename modules/mu/{clouds => providers}/docker.rb (100%) rename modules/mu/{clouds => providers}/google.rb (100%) rename modules/mu/{clouds => providers}/google/bucket.rb (100%) rename modules/mu/{clouds => providers}/google/container_cluster.rb (100%) rename modules/mu/{clouds => providers}/google/database.rb (100%) rename modules/mu/{clouds => providers}/google/firewall_rule.rb (100%) rename modules/mu/{clouds => providers}/google/folder.rb (100%) rename modules/mu/{clouds => providers}/google/function.rb (100%) rename modules/mu/{clouds => providers}/google/group.rb (100%) rename modules/mu/{clouds => providers}/google/habitat.rb (100%) rename modules/mu/{clouds => providers}/google/loadbalancer.rb (100%) rename modules/mu/{clouds => providers}/google/role.rb (100%) rename modules/mu/{clouds => providers}/google/server.rb (100%) rename modules/mu/{clouds => providers}/google/server_pool.rb (100%) rename modules/mu/{clouds => providers}/google/user.rb (100%) rename modules/mu/{clouds => providers}/google/userdata/README.md (100%) rename modules/mu/{clouds => providers}/google/userdata/linux.erb (100%) rename modules/mu/{clouds => providers}/google/userdata/windows.erb (100%) rename modules/mu/{clouds => providers}/google/vpc.rb (100%) diff --git a/modules/mu/cleanup.rb b/modules/mu/cleanup.rb index 54086e959..7ddf8b8ce 100644 --- a/modules/mu/cleanup.rb +++ b/modules/mu/cleanup.rb @@ -297,7 +297,7 @@ def self.cleanHabitat(cloud, credset, region, habitat, global_vs_region_semaphor rescue MU::Cloud::MuDefunctHabitat, MU::Cloud::MuCloudResourceNotImplemented next rescue MU::MuError, NoMethodError => e - MU.log "While checking mu/clouds/#{cloud.downcase}/#{cloudclass.cfg_name} for global-ness in cleanup: "+e.message, MU::WARN + MU.log "While checking mu/providers/#{cloud.downcase}/#{cloudclass.cfg_name} for global-ness in cleanup: "+e.message, MU::WARN next rescue ::Aws::EC2::Errors::AuthFailure, ::Google::Apis::ClientError => e MU.log e.message+" in "+region, MU::ERR diff --git a/modules/mu/cloud.rb b/modules/mu/cloud.rb index 0be1e36e2..6492d43ee 100644 --- a/modules/mu/cloud.rb +++ b/modules/mu/cloud.rb @@ -732,7 +732,7 @@ def self.assertAvailableCloud(cloud) # code for each of its supported resource type classes. failed = [] MU::Cloud.supportedClouds.each { |cloud| - require "mu/clouds/#{cloud.downcase}" + require "mu/providers/#{cloud.downcase}" cloudclass = Object.const_get("MU").const_get("Cloud").const_get(cloud) generic_class_methods_toplevel.each { |method| if !cloudclass.respond_to?(method) @@ -767,7 +767,7 @@ def self.fetchUserdata(platform: "linux", template_variables: {}, custom_append: end template_variables["credentials"] ||= credentials $mu = OpenStruct.new(template_variables) - userdata_dir = File.expand_path(MU.myRoot+"/modules/mu/clouds/#{cloud.downcase}/userdata") + userdata_dir = File.expand_path(MU.myRoot+"/modules/mu/providers/#{cloud.downcase}/userdata") platform = if %w{win2k12r2 win2k12 win2k8 win2k8r2 win2k16 windows win2k19}.include?(platform) "windows" @@ -839,11 +839,11 @@ def self.loadCloudType(cloud, type) if cfg_name.nil? raise MuError, "Can't find a cloud resource type named '#{type}'" end - if !File.size?(MU.myRoot+"/modules/mu/clouds/#{cloud.downcase}.rb") + if !File.size?(MU.myRoot+"/modules/mu/providers/#{cloud.downcase}.rb") raise MuError, "Requested to use unsupported provisioning layer #{cloud}" end begin - require "mu/clouds/#{cloud.downcase}/#{cfg_name}" + require "mu/providers/#{cloud.downcase}/#{cfg_name}" rescue LoadError => e raise MuCloudResourceNotImplemented, "MU::Cloud::#{cloud} does not currently implement #{shortclass}, or implementation does not load correctly (#{e.message})" end diff --git a/modules/mu/config/alarm.rb b/modules/mu/config/alarm.rb index a5d1f0552..3b398f015 100644 --- a/modules/mu/config/alarm.rb +++ b/modules/mu/config/alarm.rb @@ -14,7 +14,7 @@ module MU class Config - # Basket of Kittens config schema and parser logic. See modules/mu/clouds/*/alarm.rb + # Basket of Kittens config schema and parser logic. See modules/mu/providers/*/alarm.rb class Alarm # Sections of Alarm schema shared between Alarms as a first-class diff --git a/modules/mu/config/bucket.rb b/modules/mu/config/bucket.rb index 8ad0dedb1..4ebed7d30 100644 --- a/modules/mu/config/bucket.rb +++ b/modules/mu/config/bucket.rb @@ -14,7 +14,7 @@ module MU class Config - # Basket of Kittens config schema and parser logic. See modules/mu/clouds/*/bucket.rb + # Basket of Kittens config schema and parser logic. See modules/mu/providers/*/bucket.rb class Bucket # Base configuration schema for a Bucket diff --git a/modules/mu/config/cache_cluster.rb b/modules/mu/config/cache_cluster.rb index 678e2660d..37456320a 100644 --- a/modules/mu/config/cache_cluster.rb +++ b/modules/mu/config/cache_cluster.rb @@ -14,7 +14,7 @@ module MU class Config - # Basket of Kittens config schema and parser logic. See modules/mu/clouds/*/cache_cluster.rb + # Basket of Kittens config schema and parser logic. See modules/mu/providers/*/cache_cluster.rb class CacheCluster # Base configuration schema for a CacheCluster diff --git a/modules/mu/config/collection.rb b/modules/mu/config/collection.rb index fe1359b95..38b71d0e6 100644 --- a/modules/mu/config/collection.rb +++ b/modules/mu/config/collection.rb @@ -14,7 +14,7 @@ module MU class Config - # Basket of Kittens config schema and parser logic. See modules/mu/clouds/*/collection.rb + # Basket of Kittens config schema and parser logic. See modules/mu/providers/*/collection.rb class Collection # Base configuration schema for a Collection diff --git a/modules/mu/config/container_cluster.rb b/modules/mu/config/container_cluster.rb index aa1cefed7..c7dc192cf 100644 --- a/modules/mu/config/container_cluster.rb +++ b/modules/mu/config/container_cluster.rb @@ -14,7 +14,7 @@ module MU class Config - # Basket of Kittens config schema and parser logic. See modules/mu/clouds/*/container_cluster.rb + # Basket of Kittens config schema and parser logic. See modules/mu/providers/*/container_cluster.rb class ContainerCluster # Base configuration schema for a ContainerCluster diff --git a/modules/mu/config/database.rb b/modules/mu/config/database.rb index 6992323fc..46614e9fc 100644 --- a/modules/mu/config/database.rb +++ b/modules/mu/config/database.rb @@ -14,7 +14,7 @@ module MU class Config - # Basket of Kittens config schema and parser logic. See modules/mu/clouds/*/database.rb + # Basket of Kittens config schema and parser logic. See modules/mu/providers/*/database.rb class Database # Base configuration schema for a Database diff --git a/modules/mu/config/dnszone.rb b/modules/mu/config/dnszone.rb index 103e96319..91348875d 100644 --- a/modules/mu/config/dnszone.rb +++ b/modules/mu/config/dnszone.rb @@ -14,7 +14,7 @@ module MU class Config - # Basket of Kittens config schema and parser logic. See modules/mu/clouds/*/dnszone.rb + # Basket of Kittens config schema and parser logic. See modules/mu/providers/*/dnszone.rb class DNSZone # Base configuration schema for a DNSZone diff --git a/modules/mu/config/doc_helpers.rb b/modules/mu/config/doc_helpers.rb index 5ba4cd076..4d7956127 100644 --- a/modules/mu/config/doc_helpers.rb +++ b/modules/mu/config/doc_helpers.rb @@ -25,7 +25,7 @@ def self.docSchema MU::Cloud.resource_types.each_pair { |classname, attrs| MU::Cloud.supportedClouds.each { |cloud| begin - require "mu/clouds/#{cloud.downcase}/#{attrs[:cfg_name]}" + require "mu/providers/#{cloud.downcase}/#{attrs[:cfg_name]}" rescue LoadError next end diff --git a/modules/mu/config/endpoint.rb b/modules/mu/config/endpoint.rb index 742dbed00..9bdd99f06 100644 --- a/modules/mu/config/endpoint.rb +++ b/modules/mu/config/endpoint.rb @@ -14,7 +14,7 @@ module MU class Config - # Basket of Kittens config schema and parser logic. See modules/mu/clouds/*/api.rb + # Basket of Kittens config schema and parser logic. See modules/mu/providers/*/api.rb class Endpoint # Base configuration schema for an Endpoint (e.g. AWS API Gateway) diff --git a/modules/mu/config/firewall_rule.rb b/modules/mu/config/firewall_rule.rb index c633c48ec..088e2a43b 100644 --- a/modules/mu/config/firewall_rule.rb +++ b/modules/mu/config/firewall_rule.rb @@ -14,7 +14,7 @@ module MU class Config - # Basket of Kittens config schema and parser logic. See modules/mu/clouds/*/firewall_rule.rb + # Basket of Kittens config schema and parser logic. See modules/mu/providers/*/firewall_rule.rb class FirewallRule # Base configuration schema for a FirewallRule diff --git a/modules/mu/config/folder.rb b/modules/mu/config/folder.rb index 27f09beb4..b992dd06b 100644 --- a/modules/mu/config/folder.rb +++ b/modules/mu/config/folder.rb @@ -14,7 +14,7 @@ module MU class Config - # Basket of Kittens config schema and parser logic. See modules/mu/clouds/*/folder.rb + # Basket of Kittens config schema and parser logic. See modules/mu/providers/*/folder.rb class Folder # Base configuration schema for a Folder diff --git a/modules/mu/config/function.rb b/modules/mu/config/function.rb index 57b13991f..6f5f3731d 100644 --- a/modules/mu/config/function.rb +++ b/modules/mu/config/function.rb @@ -14,7 +14,7 @@ module MU class Config - # Basket of Kittens config schema and parser logic. See modules/mu/clouds/*/function.rb + # Basket of Kittens config schema and parser logic. See modules/mu/providers/*/function.rb class Function # Base configuration schema for a Function diff --git a/modules/mu/config/group.rb b/modules/mu/config/group.rb index b84b4ec00..aee210794 100644 --- a/modules/mu/config/group.rb +++ b/modules/mu/config/group.rb @@ -14,7 +14,7 @@ module MU class Config - # Basket of Kittens config schema and parser logic. See modules/mu/clouds/*/group.rb + # Basket of Kittens config schema and parser logic. See modules/mu/providers/*/group.rb class Group # Base configuration schema for a Group diff --git a/modules/mu/config/habitat.rb b/modules/mu/config/habitat.rb index b2eec644f..3a6197391 100644 --- a/modules/mu/config/habitat.rb +++ b/modules/mu/config/habitat.rb @@ -14,7 +14,7 @@ module MU class Config - # Basket of Kittens config schema and parser logic. See modules/mu/clouds/*/project.rb + # Basket of Kittens config schema and parser logic. See modules/mu/providers/*/project.rb class Habitat # Base configuration schema for a Habitat diff --git a/modules/mu/config/loadbalancer.rb b/modules/mu/config/loadbalancer.rb index 46e80109a..ead393c1b 100644 --- a/modules/mu/config/loadbalancer.rb +++ b/modules/mu/config/loadbalancer.rb @@ -14,7 +14,7 @@ module MU class Config - # Basket of Kittens config schema and parser logic. See modules/mu/clouds/*/loadbalancer.rb + # Basket of Kittens config schema and parser logic. See modules/mu/providers/*/loadbalancer.rb class LoadBalancer # Generate schema for a LoadBalancer health check diff --git a/modules/mu/config/log.rb b/modules/mu/config/log.rb index b477a7708..efb7d8fbf 100644 --- a/modules/mu/config/log.rb +++ b/modules/mu/config/log.rb @@ -14,7 +14,7 @@ module MU class Config - # Basket of Kittens config schema and parser logic. See modules/mu/clouds/*/log.rb + # Basket of Kittens config schema and parser logic. See modules/mu/providers/*/log.rb class Log # Base configuration schema for a Log diff --git a/modules/mu/config/msg_queue.rb b/modules/mu/config/msg_queue.rb index ff7021f70..b2acfa8d2 100644 --- a/modules/mu/config/msg_queue.rb +++ b/modules/mu/config/msg_queue.rb @@ -14,7 +14,7 @@ module MU class Config - # Basket of Kittens config schema and parser logic. See modules/mu/clouds/*/msg_queue.rb + # Basket of Kittens config schema and parser logic. See modules/mu/providers/*/msg_queue.rb class MsgQueue # Base configuration schema for a MsgQueue diff --git a/modules/mu/config/nosqldb.rb b/modules/mu/config/nosqldb.rb index 5ec815e7e..df5813367 100644 --- a/modules/mu/config/nosqldb.rb +++ b/modules/mu/config/nosqldb.rb @@ -14,7 +14,7 @@ module MU class Config - # Basket of Kittens config schema and parser logic. See modules/mu/clouds/*/nosqldb.rb + # Basket of Kittens config schema and parser logic. See modules/mu/providers/*/nosqldb.rb class NoSQLDB # Base configuration schema for a Bucket diff --git a/modules/mu/config/notifier.rb b/modules/mu/config/notifier.rb index e61232b96..22163a997 100644 --- a/modules/mu/config/notifier.rb +++ b/modules/mu/config/notifier.rb @@ -14,7 +14,7 @@ module MU class Config - # Basket of Kittens config schema and parser logic. See modules/mu/clouds/*/notifier.rb + # Basket of Kittens config schema and parser logic. See modules/mu/providers/*/notifier.rb class Notifier # Base configuration schema for a Notifier diff --git a/modules/mu/config/role.rb b/modules/mu/config/role.rb index f6d7b934b..c1befbba2 100644 --- a/modules/mu/config/role.rb +++ b/modules/mu/config/role.rb @@ -14,7 +14,7 @@ module MU class Config - # Basket of Kittens config schema and parser logic. See modules/mu/clouds/*/role.rb + # Basket of Kittens config schema and parser logic. See modules/mu/providers/*/role.rb class Role # Base configuration schema for a Group diff --git a/modules/mu/config/search_domain.rb b/modules/mu/config/search_domain.rb index 7c9dd6733..a51348bae 100644 --- a/modules/mu/config/search_domain.rb +++ b/modules/mu/config/search_domain.rb @@ -14,7 +14,7 @@ module MU class Config - # Basket of Kittens config schema and parser logic. See modules/mu/clouds/*/search_domain.rb + # Basket of Kittens config schema and parser logic. See modules/mu/providers/*/search_domain.rb class SearchDomain # Base configuration schema for a SearchDomain diff --git a/modules/mu/config/server.rb b/modules/mu/config/server.rb index 159fde849..29b530301 100644 --- a/modules/mu/config/server.rb +++ b/modules/mu/config/server.rb @@ -14,7 +14,7 @@ module MU class Config - # Basket of Kittens config schema and parser logic. See modules/mu/clouds/*/server.rb + # Basket of Kittens config schema and parser logic. See modules/mu/providers/*/server.rb class Server # Verify that a server or server_pool has a valid LDAP config referencing diff --git a/modules/mu/config/server_pool.rb b/modules/mu/config/server_pool.rb index a13e2b049..d8e6d39ea 100644 --- a/modules/mu/config/server_pool.rb +++ b/modules/mu/config/server_pool.rb @@ -14,7 +14,7 @@ module MU class Config - # Basket of Kittens config schema and parser logic. See modules/mu/clouds/*/server_pool.rb + # Basket of Kittens config schema and parser logic. See modules/mu/providers/*/server_pool.rb class ServerPool # Base configuration schema for a ServerPool diff --git a/modules/mu/config/storage_pool.rb b/modules/mu/config/storage_pool.rb index 9fe9d03b3..0f720ed63 100644 --- a/modules/mu/config/storage_pool.rb +++ b/modules/mu/config/storage_pool.rb @@ -14,7 +14,7 @@ module MU class Config - # Basket of Kittens config schema and parser logic. See modules/mu/clouds/*/storage_pool.rb + # Basket of Kittens config schema and parser logic. See modules/mu/providers/*/storage_pool.rb class StoragePool # Base configuration schema for a StoragePool diff --git a/modules/mu/config/user.rb b/modules/mu/config/user.rb index d71d9927f..f2823516b 100644 --- a/modules/mu/config/user.rb +++ b/modules/mu/config/user.rb @@ -14,7 +14,7 @@ module MU class Config - # Basket of Kittens config schema and parser logic. See modules/mu/clouds/*/user.rb + # Basket of Kittens config schema and parser logic. See modules/mu/providers/*/user.rb class User # Base configuration schema for a User diff --git a/modules/mu/config/vpc.rb b/modules/mu/config/vpc.rb index 800e58d8e..c5fcdc45a 100644 --- a/modules/mu/config/vpc.rb +++ b/modules/mu/config/vpc.rb @@ -14,7 +14,7 @@ module MU class Config - # Basket of Kittens config schema and parser logic. See modules/mu/clouds/*/vpc.rb + # Basket of Kittens config schema and parser logic. See modules/mu/providers/*/vpc.rb class VPC # Base configuration schema for a VPC diff --git a/modules/mu/clouds/README.md b/modules/mu/providers/README.md similarity index 99% rename from modules/mu/clouds/README.md rename to modules/mu/providers/README.md index 18ffa8adc..ce22f9988 100644 --- a/modules/mu/clouds/README.md +++ b/modules/mu/providers/README.md @@ -89,7 +89,7 @@ Looking elsewhere in `cloud.rb` let's see what all we have to do: generic_instance_methods = [:create, :notify, :mu_name, :cloud_id, :config] ``` -Just the basics, for now. Here's what that will look like in the AWS layer, in the file `modules/mu/clouds/aws/function.rb`: +Just the basics, for now. Here's what that will look like in the AWS layer, in the file `modules/mu/providers/aws/function.rb`: ``` module MU diff --git a/modules/mu/clouds/aws.rb b/modules/mu/providers/aws.rb similarity index 100% rename from modules/mu/clouds/aws.rb rename to modules/mu/providers/aws.rb diff --git a/modules/mu/clouds/aws/alarm.rb b/modules/mu/providers/aws/alarm.rb similarity index 100% rename from modules/mu/clouds/aws/alarm.rb rename to modules/mu/providers/aws/alarm.rb diff --git a/modules/mu/clouds/aws/bucket.rb b/modules/mu/providers/aws/bucket.rb similarity index 100% rename from modules/mu/clouds/aws/bucket.rb rename to modules/mu/providers/aws/bucket.rb diff --git a/modules/mu/clouds/aws/cache_cluster.rb b/modules/mu/providers/aws/cache_cluster.rb similarity index 100% rename from modules/mu/clouds/aws/cache_cluster.rb rename to modules/mu/providers/aws/cache_cluster.rb diff --git a/modules/mu/clouds/aws/collection.rb b/modules/mu/providers/aws/collection.rb similarity index 100% rename from modules/mu/clouds/aws/collection.rb rename to modules/mu/providers/aws/collection.rb diff --git a/modules/mu/clouds/aws/container_cluster.rb b/modules/mu/providers/aws/container_cluster.rb similarity index 100% rename from modules/mu/clouds/aws/container_cluster.rb rename to modules/mu/providers/aws/container_cluster.rb diff --git a/modules/mu/clouds/aws/database.rb b/modules/mu/providers/aws/database.rb similarity index 99% rename from modules/mu/clouds/aws/database.rb rename to modules/mu/providers/aws/database.rb index 767f961f2..60c398038 100644 --- a/modules/mu/clouds/aws/database.rb +++ b/modules/mu/providers/aws/database.rb @@ -20,6 +20,8 @@ class AWS # A database as configured in {MU::Config::BasketofKittens::databases} class Database < MU::Cloud::Database + # Map legal storage values for each disk type and database engine so + # our validator can check them for us. STORAGE_RANGES = { "io1" => { "postgres" => 100..65536, @@ -247,55 +249,6 @@ def allTags @tags.each_key.map { |k| { :key => k, :value => @tags[k] } } end - def genericParams - params = if @config['create_cluster'] - paramhash = { - db_cluster_identifier: @cloud_id, - engine: @config["engine"], - vpc_security_group_ids: @config["vpc_security_group_ids"], - tags: @tags.each_key.map { |k| { :key => k, :value => @tags[k] } } - } - paramhash[:db_subnet_group_name] = @config["subnet_group_name"].downcase if @vpc - if @config['cloudwatch_logs'] - paramhash[:enable_cloudwatch_logs_exports ] = @config['cloudwatch_logs'] - end - if @config['cluster_mode'] - paramhash[:engine_mode] = @config['cluster_mode'] - if @config['cluster_mode'] == "serverless" - paramhash[:scaling_configuration] = { - :auto_pause => @config['serverless_scaling']['auto_pause'], - :min_capacity => @config['serverless_scaling']['min_capacity'], - :max_capacity => @config['serverless_scaling']['max_capacity'], - :seconds_until_auto_pause => @config['serverless_scaling']['seconds_until_auto_pause'] - } - end - end - paramhash - else - { - db_instance_identifier: @cloud_id, - db_instance_class: @config["size"], - engine: @config["engine"], - auto_minor_version_upgrade: @config["auto_minor_version_upgrade"], - license_model: @config["license_model"], - db_subnet_group_name: @config["subnet_group_name"], - publicly_accessible: @config["publicly_accessible"], - copy_tags_to_snapshot: true, - tags: @tags.each_key.map { |k| { :key => k, :value => @tags[k] } } - } - end - - if %w{existing_snapshot new_snapshot}.include?(@config["creation_style"]) - if @config['create_cluster'] - params[:snapshot_identifier] = @config["snapshot_id"] - else - params[:db_snapshot_identifier] = @config["snapshot_id"] - end - end - - params - end - # Create a subnet group for a database. def createSubnetGroup # Finding subnets, creating security groups/adding holes, create subnet group @@ -879,6 +832,56 @@ def self.validateConfig(db, _configurator) private + def genericParams + params = if @config['create_cluster'] + paramhash = { + db_cluster_identifier: @cloud_id, + engine: @config["engine"], + vpc_security_group_ids: @config["vpc_security_group_ids"], + tags: @tags.each_key.map { |k| { :key => k, :value => @tags[k] } } + } + paramhash[:db_subnet_group_name] = @config["subnet_group_name"].downcase if @vpc + if @config['cloudwatch_logs'] + paramhash[:enable_cloudwatch_logs_exports ] = @config['cloudwatch_logs'] + end + if @config['cluster_mode'] + paramhash[:engine_mode] = @config['cluster_mode'] + if @config['cluster_mode'] == "serverless" + paramhash[:scaling_configuration] = { + :auto_pause => @config['serverless_scaling']['auto_pause'], + :min_capacity => @config['serverless_scaling']['min_capacity'], + :max_capacity => @config['serverless_scaling']['max_capacity'], + :seconds_until_auto_pause => @config['serverless_scaling']['seconds_until_auto_pause'] + } + end + end + paramhash + else + { + db_instance_identifier: @cloud_id, + db_instance_class: @config["size"], + engine: @config["engine"], + auto_minor_version_upgrade: @config["auto_minor_version_upgrade"], + license_model: @config["license_model"], + db_subnet_group_name: @config["subnet_group_name"], + publicly_accessible: @config["publicly_accessible"], + copy_tags_to_snapshot: true, + tags: @tags.each_key.map { |k| { :key => k, :value => @tags[k] } } + } + end + + if %w{existing_snapshot new_snapshot}.include?(@config["creation_style"]) + if @config['create_cluster'] + params[:snapshot_identifier] = @config["snapshot_id"] + else + params[:db_snapshot_identifier] = @config["snapshot_id"] + end + end + + params + end + + def self.validate_network_cfg(db) ok = true diff --git a/modules/mu/clouds/aws/dnszone.rb b/modules/mu/providers/aws/dnszone.rb similarity index 100% rename from modules/mu/clouds/aws/dnszone.rb rename to modules/mu/providers/aws/dnszone.rb diff --git a/modules/mu/clouds/aws/endpoint.rb b/modules/mu/providers/aws/endpoint.rb similarity index 100% rename from modules/mu/clouds/aws/endpoint.rb rename to modules/mu/providers/aws/endpoint.rb diff --git a/modules/mu/clouds/aws/firewall_rule.rb b/modules/mu/providers/aws/firewall_rule.rb similarity index 99% rename from modules/mu/clouds/aws/firewall_rule.rb rename to modules/mu/providers/aws/firewall_rule.rb index 01f3ea39a..6d9806038 100644 --- a/modules/mu/clouds/aws/firewall_rule.rb +++ b/modules/mu/providers/aws/firewall_rule.rb @@ -18,7 +18,7 @@ class Cloud class AWS # A firewall ruleset as configured in {MU::Config::BasketofKittens::firewall_rules} class FirewallRule < MU::Cloud::FirewallRule - require "mu/clouds/aws/vpc" + require "mu/providers/aws/vpc" @admin_sgs = Hash.new @admin_sg_semaphore = Mutex.new diff --git a/modules/mu/clouds/aws/folder.rb b/modules/mu/providers/aws/folder.rb similarity index 100% rename from modules/mu/clouds/aws/folder.rb rename to modules/mu/providers/aws/folder.rb diff --git a/modules/mu/clouds/aws/function.rb b/modules/mu/providers/aws/function.rb similarity index 100% rename from modules/mu/clouds/aws/function.rb rename to modules/mu/providers/aws/function.rb diff --git a/modules/mu/clouds/aws/group.rb b/modules/mu/providers/aws/group.rb similarity index 100% rename from modules/mu/clouds/aws/group.rb rename to modules/mu/providers/aws/group.rb diff --git a/modules/mu/clouds/aws/habitat.rb b/modules/mu/providers/aws/habitat.rb similarity index 100% rename from modules/mu/clouds/aws/habitat.rb rename to modules/mu/providers/aws/habitat.rb diff --git a/modules/mu/clouds/aws/loadbalancer.rb b/modules/mu/providers/aws/loadbalancer.rb similarity index 100% rename from modules/mu/clouds/aws/loadbalancer.rb rename to modules/mu/providers/aws/loadbalancer.rb diff --git a/modules/mu/clouds/aws/log.rb b/modules/mu/providers/aws/log.rb similarity index 100% rename from modules/mu/clouds/aws/log.rb rename to modules/mu/providers/aws/log.rb diff --git a/modules/mu/clouds/aws/msg_queue.rb b/modules/mu/providers/aws/msg_queue.rb similarity index 100% rename from modules/mu/clouds/aws/msg_queue.rb rename to modules/mu/providers/aws/msg_queue.rb diff --git a/modules/mu/clouds/aws/nosqldb.rb b/modules/mu/providers/aws/nosqldb.rb similarity index 100% rename from modules/mu/clouds/aws/nosqldb.rb rename to modules/mu/providers/aws/nosqldb.rb diff --git a/modules/mu/clouds/aws/notifier.rb b/modules/mu/providers/aws/notifier.rb similarity index 100% rename from modules/mu/clouds/aws/notifier.rb rename to modules/mu/providers/aws/notifier.rb diff --git a/modules/mu/clouds/aws/role.rb b/modules/mu/providers/aws/role.rb similarity index 100% rename from modules/mu/clouds/aws/role.rb rename to modules/mu/providers/aws/role.rb diff --git a/modules/mu/clouds/aws/search_domain.rb b/modules/mu/providers/aws/search_domain.rb similarity index 100% rename from modules/mu/clouds/aws/search_domain.rb rename to modules/mu/providers/aws/search_domain.rb diff --git a/modules/mu/clouds/aws/server.rb b/modules/mu/providers/aws/server.rb similarity index 99% rename from modules/mu/clouds/aws/server.rb rename to modules/mu/providers/aws/server.rb index 4a2e787bf..cc3b6d8cf 100644 --- a/modules/mu/clouds/aws/server.rb +++ b/modules/mu/providers/aws/server.rb @@ -145,7 +145,7 @@ def self.fetchUserdata(platform: "linux", template_variables: {}, custom_append: raise MuError, "My second argument should be a hash of variables to pass into ERB templates" end $mu = OpenStruct.new(template_variables) - userdata_dir = File.expand_path(MU.myRoot+"/modules/mu/clouds/aws/userdata") + userdata_dir = File.expand_path(MU.myRoot+"/modules/mu/providers/aws/userdata") platform = "linux" if %w{centos centos6 centos7 ubuntu ubuntu14 rhel rhel7 rhel71 amazon}.include? platform platform = "windows" if %w{win2k12r2 win2k12 win2k8 win2k8r2 win2k16}.include? platform erbfile = "#{userdata_dir}/#{platform}.erb" @@ -725,7 +725,7 @@ def toKitten(**_args) if int.groups.size > 0 - require 'mu/clouds/aws/firewall_rule' + require 'mu/providers/aws/firewall_rule' ifaces = MU::Cloud::AWS::FirewallRule.getAssociatedInterfaces(int.groups.map { |sg| sg.group_id }, credentials: @credentials, region: @config['region']) done_local_rules = false int.groups.each { |sg| diff --git a/modules/mu/clouds/aws/server_pool.rb b/modules/mu/providers/aws/server_pool.rb similarity index 100% rename from modules/mu/clouds/aws/server_pool.rb rename to modules/mu/providers/aws/server_pool.rb diff --git a/modules/mu/clouds/aws/storage_pool.rb b/modules/mu/providers/aws/storage_pool.rb similarity index 100% rename from modules/mu/clouds/aws/storage_pool.rb rename to modules/mu/providers/aws/storage_pool.rb diff --git a/modules/mu/clouds/aws/user.rb b/modules/mu/providers/aws/user.rb similarity index 100% rename from modules/mu/clouds/aws/user.rb rename to modules/mu/providers/aws/user.rb diff --git a/modules/mu/clouds/aws/userdata/README.md b/modules/mu/providers/aws/userdata/README.md similarity index 100% rename from modules/mu/clouds/aws/userdata/README.md rename to modules/mu/providers/aws/userdata/README.md diff --git a/modules/mu/clouds/aws/userdata/linux.erb b/modules/mu/providers/aws/userdata/linux.erb similarity index 100% rename from modules/mu/clouds/aws/userdata/linux.erb rename to modules/mu/providers/aws/userdata/linux.erb diff --git a/modules/mu/clouds/aws/userdata/windows.erb b/modules/mu/providers/aws/userdata/windows.erb similarity index 100% rename from modules/mu/clouds/aws/userdata/windows.erb rename to modules/mu/providers/aws/userdata/windows.erb diff --git a/modules/mu/clouds/aws/vpc.rb b/modules/mu/providers/aws/vpc.rb similarity index 99% rename from modules/mu/clouds/aws/vpc.rb rename to modules/mu/providers/aws/vpc.rb index f5f037a22..d7646911d 100644 --- a/modules/mu/clouds/aws/vpc.rb +++ b/modules/mu/providers/aws/vpc.rb @@ -18,7 +18,7 @@ class AWS # Creation of Virtual Private Clouds and associated artifacts (routes, subnets, etc). class VPC < MU::Cloud::VPC - require 'mu/clouds/aws/vpc_subnet' + require 'mu/providers/aws/vpc_subnet' # Initialize this cloud resource object. Calling +super+ will invoke the initializer defined under {MU::Cloud}, which should set the attribtues listed in {MU::Cloud::PUBLIC_ATTRS} as well as applicable dependency shortcuts, like +@vpc+, for us. # @param args [Hash]: Hash of named arguments passed via Ruby's double-splat diff --git a/modules/mu/clouds/aws/vpc_subnet.rb b/modules/mu/providers/aws/vpc_subnet.rb similarity index 100% rename from modules/mu/clouds/aws/vpc_subnet.rb rename to modules/mu/providers/aws/vpc_subnet.rb diff --git a/modules/mu/clouds/azure.rb b/modules/mu/providers/azure.rb similarity index 100% rename from modules/mu/clouds/azure.rb rename to modules/mu/providers/azure.rb diff --git a/modules/mu/clouds/azure/container_cluster.rb b/modules/mu/providers/azure/container_cluster.rb similarity index 100% rename from modules/mu/clouds/azure/container_cluster.rb rename to modules/mu/providers/azure/container_cluster.rb diff --git a/modules/mu/clouds/azure/firewall_rule.rb b/modules/mu/providers/azure/firewall_rule.rb similarity index 100% rename from modules/mu/clouds/azure/firewall_rule.rb rename to modules/mu/providers/azure/firewall_rule.rb diff --git a/modules/mu/clouds/azure/habitat.rb b/modules/mu/providers/azure/habitat.rb similarity index 100% rename from modules/mu/clouds/azure/habitat.rb rename to modules/mu/providers/azure/habitat.rb diff --git a/modules/mu/clouds/azure/loadbalancer.rb b/modules/mu/providers/azure/loadbalancer.rb similarity index 100% rename from modules/mu/clouds/azure/loadbalancer.rb rename to modules/mu/providers/azure/loadbalancer.rb diff --git a/modules/mu/clouds/azure/role.rb b/modules/mu/providers/azure/role.rb similarity index 100% rename from modules/mu/clouds/azure/role.rb rename to modules/mu/providers/azure/role.rb diff --git a/modules/mu/clouds/azure/server.rb b/modules/mu/providers/azure/server.rb similarity index 100% rename from modules/mu/clouds/azure/server.rb rename to modules/mu/providers/azure/server.rb diff --git a/modules/mu/clouds/azure/user.rb b/modules/mu/providers/azure/user.rb similarity index 100% rename from modules/mu/clouds/azure/user.rb rename to modules/mu/providers/azure/user.rb diff --git a/modules/mu/clouds/azure/userdata/README.md b/modules/mu/providers/azure/userdata/README.md similarity index 100% rename from modules/mu/clouds/azure/userdata/README.md rename to modules/mu/providers/azure/userdata/README.md diff --git a/modules/mu/clouds/azure/userdata/linux.erb b/modules/mu/providers/azure/userdata/linux.erb similarity index 100% rename from modules/mu/clouds/azure/userdata/linux.erb rename to modules/mu/providers/azure/userdata/linux.erb diff --git a/modules/mu/clouds/azure/userdata/windows.erb b/modules/mu/providers/azure/userdata/windows.erb similarity index 100% rename from modules/mu/clouds/azure/userdata/windows.erb rename to modules/mu/providers/azure/userdata/windows.erb diff --git a/modules/mu/clouds/azure/vpc.rb b/modules/mu/providers/azure/vpc.rb similarity index 100% rename from modules/mu/clouds/azure/vpc.rb rename to modules/mu/providers/azure/vpc.rb diff --git a/modules/mu/clouds/cloudformation.rb b/modules/mu/providers/cloudformation.rb similarity index 100% rename from modules/mu/clouds/cloudformation.rb rename to modules/mu/providers/cloudformation.rb diff --git a/modules/mu/clouds/cloudformation/alarm.rb b/modules/mu/providers/cloudformation/alarm.rb similarity index 100% rename from modules/mu/clouds/cloudformation/alarm.rb rename to modules/mu/providers/cloudformation/alarm.rb diff --git a/modules/mu/clouds/cloudformation/cache_cluster.rb b/modules/mu/providers/cloudformation/cache_cluster.rb similarity index 100% rename from modules/mu/clouds/cloudformation/cache_cluster.rb rename to modules/mu/providers/cloudformation/cache_cluster.rb diff --git a/modules/mu/clouds/cloudformation/collection.rb b/modules/mu/providers/cloudformation/collection.rb similarity index 100% rename from modules/mu/clouds/cloudformation/collection.rb rename to modules/mu/providers/cloudformation/collection.rb diff --git a/modules/mu/clouds/cloudformation/database.rb b/modules/mu/providers/cloudformation/database.rb similarity index 100% rename from modules/mu/clouds/cloudformation/database.rb rename to modules/mu/providers/cloudformation/database.rb diff --git a/modules/mu/clouds/cloudformation/dnszone.rb b/modules/mu/providers/cloudformation/dnszone.rb similarity index 100% rename from modules/mu/clouds/cloudformation/dnszone.rb rename to modules/mu/providers/cloudformation/dnszone.rb diff --git a/modules/mu/clouds/cloudformation/firewall_rule.rb b/modules/mu/providers/cloudformation/firewall_rule.rb similarity index 100% rename from modules/mu/clouds/cloudformation/firewall_rule.rb rename to modules/mu/providers/cloudformation/firewall_rule.rb diff --git a/modules/mu/clouds/cloudformation/loadbalancer.rb b/modules/mu/providers/cloudformation/loadbalancer.rb similarity index 100% rename from modules/mu/clouds/cloudformation/loadbalancer.rb rename to modules/mu/providers/cloudformation/loadbalancer.rb diff --git a/modules/mu/clouds/cloudformation/log.rb b/modules/mu/providers/cloudformation/log.rb similarity index 100% rename from modules/mu/clouds/cloudformation/log.rb rename to modules/mu/providers/cloudformation/log.rb diff --git a/modules/mu/clouds/cloudformation/server.rb b/modules/mu/providers/cloudformation/server.rb similarity index 100% rename from modules/mu/clouds/cloudformation/server.rb rename to modules/mu/providers/cloudformation/server.rb diff --git a/modules/mu/clouds/cloudformation/server_pool.rb b/modules/mu/providers/cloudformation/server_pool.rb similarity index 100% rename from modules/mu/clouds/cloudformation/server_pool.rb rename to modules/mu/providers/cloudformation/server_pool.rb diff --git a/modules/mu/clouds/cloudformation/vpc.rb b/modules/mu/providers/cloudformation/vpc.rb similarity index 100% rename from modules/mu/clouds/cloudformation/vpc.rb rename to modules/mu/providers/cloudformation/vpc.rb diff --git a/modules/mu/clouds/docker.rb b/modules/mu/providers/docker.rb similarity index 100% rename from modules/mu/clouds/docker.rb rename to modules/mu/providers/docker.rb diff --git a/modules/mu/clouds/google.rb b/modules/mu/providers/google.rb similarity index 100% rename from modules/mu/clouds/google.rb rename to modules/mu/providers/google.rb diff --git a/modules/mu/clouds/google/bucket.rb b/modules/mu/providers/google/bucket.rb similarity index 100% rename from modules/mu/clouds/google/bucket.rb rename to modules/mu/providers/google/bucket.rb diff --git a/modules/mu/clouds/google/container_cluster.rb b/modules/mu/providers/google/container_cluster.rb similarity index 100% rename from modules/mu/clouds/google/container_cluster.rb rename to modules/mu/providers/google/container_cluster.rb diff --git a/modules/mu/clouds/google/database.rb b/modules/mu/providers/google/database.rb similarity index 100% rename from modules/mu/clouds/google/database.rb rename to modules/mu/providers/google/database.rb diff --git a/modules/mu/clouds/google/firewall_rule.rb b/modules/mu/providers/google/firewall_rule.rb similarity index 100% rename from modules/mu/clouds/google/firewall_rule.rb rename to modules/mu/providers/google/firewall_rule.rb diff --git a/modules/mu/clouds/google/folder.rb b/modules/mu/providers/google/folder.rb similarity index 100% rename from modules/mu/clouds/google/folder.rb rename to modules/mu/providers/google/folder.rb diff --git a/modules/mu/clouds/google/function.rb b/modules/mu/providers/google/function.rb similarity index 100% rename from modules/mu/clouds/google/function.rb rename to modules/mu/providers/google/function.rb diff --git a/modules/mu/clouds/google/group.rb b/modules/mu/providers/google/group.rb similarity index 100% rename from modules/mu/clouds/google/group.rb rename to modules/mu/providers/google/group.rb diff --git a/modules/mu/clouds/google/habitat.rb b/modules/mu/providers/google/habitat.rb similarity index 100% rename from modules/mu/clouds/google/habitat.rb rename to modules/mu/providers/google/habitat.rb diff --git a/modules/mu/clouds/google/loadbalancer.rb b/modules/mu/providers/google/loadbalancer.rb similarity index 100% rename from modules/mu/clouds/google/loadbalancer.rb rename to modules/mu/providers/google/loadbalancer.rb diff --git a/modules/mu/clouds/google/role.rb b/modules/mu/providers/google/role.rb similarity index 100% rename from modules/mu/clouds/google/role.rb rename to modules/mu/providers/google/role.rb diff --git a/modules/mu/clouds/google/server.rb b/modules/mu/providers/google/server.rb similarity index 100% rename from modules/mu/clouds/google/server.rb rename to modules/mu/providers/google/server.rb diff --git a/modules/mu/clouds/google/server_pool.rb b/modules/mu/providers/google/server_pool.rb similarity index 100% rename from modules/mu/clouds/google/server_pool.rb rename to modules/mu/providers/google/server_pool.rb diff --git a/modules/mu/clouds/google/user.rb b/modules/mu/providers/google/user.rb similarity index 100% rename from modules/mu/clouds/google/user.rb rename to modules/mu/providers/google/user.rb diff --git a/modules/mu/clouds/google/userdata/README.md b/modules/mu/providers/google/userdata/README.md similarity index 100% rename from modules/mu/clouds/google/userdata/README.md rename to modules/mu/providers/google/userdata/README.md diff --git a/modules/mu/clouds/google/userdata/linux.erb b/modules/mu/providers/google/userdata/linux.erb similarity index 100% rename from modules/mu/clouds/google/userdata/linux.erb rename to modules/mu/providers/google/userdata/linux.erb diff --git a/modules/mu/clouds/google/userdata/windows.erb b/modules/mu/providers/google/userdata/windows.erb similarity index 100% rename from modules/mu/clouds/google/userdata/windows.erb rename to modules/mu/providers/google/userdata/windows.erb diff --git a/modules/mu/clouds/google/vpc.rb b/modules/mu/providers/google/vpc.rb similarity index 100% rename from modules/mu/clouds/google/vpc.rb rename to modules/mu/providers/google/vpc.rb diff --git a/spec/mu/clouds/azure_spec.rb b/spec/mu/clouds/azure_spec.rb index ae78859bb..fb0b750e3 100644 --- a/spec/mu/clouds/azure_spec.rb +++ b/spec/mu/clouds/azure_spec.rb @@ -1,6 +1,6 @@ require 'spec_helper' require 'yaml' -require 'mu/clouds/azure' +require 'mu/providers/azure' describe MU::Cloud::Azure do @@ -216,4 +216,4 @@ end -end \ No newline at end of file +end From 1bfbc3185874684c3fb4e40f9be36fc9d01f2612 Mon Sep 17 00:00:00 2001 From: John Stange Date: Tue, 7 Apr 2020 00:27:14 -0400 Subject: [PATCH 053/124] Terrorize internal class reference behavior, and also I discovered a circular reference bug --- modules/mu/adoption.rb | 2 +- modules/mu/cleanup.rb | 16 +- modules/mu/cloud.rb | 1860 +------------------------- modules/mu/cloud/machine_images.rb | 212 +++ modules/mu/cloud/providers.rb | 81 ++ modules/mu/cloud/resource_base.rb | 1089 +++++++++++++++ modules/mu/cloud/ssh_sessions.rb | 225 ++++ modules/mu/cloud/winrm_sessions.rb | 231 ++++ modules/mu/config.rb | 11 +- modules/mu/config/database.rb | 4 +- modules/mu/config/doc_helpers.rb | 5 +- modules/mu/config/firewall_rule.rb | 4 +- modules/mu/config/ref.rb | 2 + modules/mu/config/schema_helpers.rb | 22 +- modules/mu/config/vpc.rb | 4 +- modules/mu/deploy.rb | 20 +- modules/mu/mommacat.rb | 2 +- modules/mu/mommacat/search.rb | 12 +- modules/mu/providers/aws.rb | 2 +- modules/mu/providers/aws/database.rb | 13 +- modules/mu/providers/aws/habitat.rb | 2 +- modules/mu/providers/aws/role.rb | 2 +- modules/mu/providers/aws/vpc.rb | 4 +- 23 files changed, 1969 insertions(+), 1856 deletions(-) create mode 100644 modules/mu/cloud/machine_images.rb create mode 100644 modules/mu/cloud/providers.rb create mode 100644 modules/mu/cloud/resource_base.rb create mode 100644 modules/mu/cloud/ssh_sessions.rb create mode 100644 modules/mu/cloud/winrm_sessions.rb diff --git a/modules/mu/adoption.rb b/modules/mu/adoption.rb index 2d59e8431..ac41bdf35 100644 --- a/modules/mu/adoption.rb +++ b/modules/mu/adoption.rb @@ -229,7 +229,7 @@ def generateBaskets(prefix: "") @clouds.each { |cloud| @scraped.each_pair { |type, resources| res_class = begin - MU::Cloud.loadCloudType(cloud, type) + MU::Cloud.resourceClass(cloud, type) rescue MU::Cloud::MuCloudResourceNotImplemented # XXX I don't think this can actually happen next diff --git a/modules/mu/cleanup.rb b/modules/mu/cleanup.rb index 7ddf8b8ce..653dabce5 100644 --- a/modules/mu/cleanup.rb +++ b/modules/mu/cleanup.rb @@ -122,7 +122,7 @@ def self.run(deploy_id, noop: false, skipsnapshots: false, onlycloud: false, ver } creds.each_pair { |provider, credsets_inner| - cloudclass = Object.const_get("MU").const_get("Cloud").const_get(provider) + cloudclass = MU::Cloud.cloudClass(provider) credsets_inner.keys.each { |c| cloudclass.cleanDeploy(MU.deploy_id, credentials: c, noop: @noop) } @@ -160,7 +160,7 @@ def self.run(deploy_id, noop: false, skipsnapshots: false, onlycloud: false, ver def self.listUsedCredentials(credsets) creds = {} MU::Cloud.availableClouds.each { |cloud| - cloudclass = Object.const_get("MU").const_get("Cloud").const_get(cloud) + cloudclass = MU::Cloud.cloudClass(cloud) if $MU_CFG[cloud.downcase] and $MU_CFG[cloud.downcase].size > 0 creds[cloud] ||= {} cloudclass.listCredentials.each { |credset| @@ -181,7 +181,7 @@ def self.listUsedCredentials(credsets) private_class_method :listUsedCredentials def self.cleanCloud(cloud, habitats, regions, credsets) - cloudclass = Object.const_get("MU").const_get("Cloud").const_get(cloud) + cloudclass = MU::Cloud.cloudClass(cloud) credsets.each_pair { |credset, acct_regions| next if @credsused and !@credsused.include?(credset) global_vs_region_semaphore = Mutex.new @@ -214,8 +214,8 @@ def self.cleanCloud(cloud, habitats, regions, credsets) def self.cleanRegion(cloud, credset, region, global_vs_region_semaphore, global_done, habitats) had_failures = false - cloudclass = Object.const_get("MU").const_get("Cloud").const_get(cloud) - habitatclass = Object.const_get("MU").const_get("Cloud").const_get(cloud).const_get("Habitat") + cloudclass = MU::Cloud.cloudClass(cloud) + habitatclass = MU::Cloud.resourceClass(cloud, "Habitat") projects = [] if habitats @@ -282,8 +282,7 @@ def self.cleanHabitat(cloud, credset, region, habitat, global_vs_region_semaphor begin skipme = false global_vs_region_semaphore.synchronize { - MU::Cloud.loadCloudType(cloud, t) - if Object.const_get("MU").const_get("Cloud").const_get(cloud).const_get(t).isGlobal? + if MU::Cloud.resourceClass(cloud, t).isGlobal? global_done[habitat] ||= [] if !global_done[habitat].include?(t) global_done[habitat] << t @@ -337,9 +336,8 @@ def self.call_cleanup(type, credset, provider, flags, region) flags['known'] << found.cloud_id end end - resclass = Object.const_get("MU").const_get("Cloud").const_get(type) - resclass.cleanup( + MU::Cloud.loadBaseType(type).cleanup( noop: @noop, ignoremaster: @ignoremaster, region: region, diff --git a/modules/mu/cloud.rb b/modules/mu/cloud.rb index 6492d43ee..f2abe13fc 100644 --- a/modules/mu/cloud.rb +++ b/modules/mu/cloud.rb @@ -24,11 +24,6 @@ class Cloud class BootstrapTempFail < MuNonFatal; end - # An exception we can use with transient Net::SSH errors, which require - # special handling due to obnoxious asynchronous interrupt behaviors. - class NetSSHFail < MuNonFatal; - end - # Exception thrown when a request is made to an unimplemented cloud # resource. class MuCloudResourceNotImplemented < StandardError; @@ -45,11 +40,11 @@ class MuDefunctHabitat < StandardError; end # Methods which a cloud resource implementation, e.g. Server, must implement - generic_class_methods = [:find, :cleanup, :validateConfig, :schema, :isGlobal?] - generic_instance_methods = [:create, :notify, :mu_name, :cloud_id, :config] + @@generic_class_methods = [:find, :cleanup, :validateConfig, :schema, :isGlobal?] + @@generic_instance_methods = [:create, :notify, :mu_name, :cloud_id, :config] # Class methods which the base of a cloud implementation must implement - generic_class_methods_toplevel = [:required_instance_methods, :myRegion, :listRegions, :listAZs, :hosted?, :hosted_config, :config_example, :writeDeploySecret, :listCredentials, :credConfig, :listInstanceTypes, :adminBucketName, :adminBucketUrl, :listHabitats, :habitat, :virtual?] + @@generic_class_methods_toplevel = [:required_instance_methods, :myRegion, :listRegions, :listAZs, :hosted?, :hosted_config, :config_example, :writeDeploySecret, :listCredentials, :credConfig, :listInstanceTypes, :adminBucketName, :adminBucketUrl, :listHabitats, :habitat, :virtual?] # Public attributes which will be available on all instantiated cloud resource objects # @@ -178,8 +173,8 @@ class NoSQLDB; :interface => self.const_get("Folder"), :deps_wait_on_my_creation => true, :waits_on_parent_completion => true, - :class => generic_class_methods, - :instance => generic_instance_methods + :class => @@generic_class_methods, + :instance => @@generic_instance_methods }, :Habitat => { :has_multiples => false, @@ -189,8 +184,8 @@ class NoSQLDB; :interface => self.const_get("Habitat"), :deps_wait_on_my_creation => true, :waits_on_parent_completion => true, - :class => generic_class_methods + [:isLive?], - :instance => generic_instance_methods + [:groom] + :class => @@generic_class_methods + [:isLive?], + :instance => @@generic_instance_methods + [:groom] }, :Collection => { :has_multiples => false, @@ -200,8 +195,8 @@ class NoSQLDB; :interface => self.const_get("Collection"), :deps_wait_on_my_creation => true, :waits_on_parent_completion => false, - :class => generic_class_methods, - :instance => generic_instance_methods + :class => @@generic_class_methods, + :instance => @@generic_instance_methods }, :Database => { :has_multiples => true, @@ -211,8 +206,8 @@ class NoSQLDB; :interface => self.const_get("Database"), :deps_wait_on_my_creation => true, :waits_on_parent_completion => false, - :class => generic_class_methods, - :instance => generic_instance_methods + [:groom, :allowHost] + :class => @@generic_class_methods, + :instance => @@generic_instance_methods + [:groom, :allowHost] }, :DNSZone => { :has_multiples => false, @@ -222,8 +217,8 @@ class NoSQLDB; :interface => self.const_get("DNSZone"), :deps_wait_on_my_creation => true, :waits_on_parent_completion => true, - :class => generic_class_methods + [:genericMuDNSEntry, :createRecordsFromConfig], - :instance => generic_instance_methods + :class => @@generic_class_methods + [:genericMuDNSEntry, :createRecordsFromConfig], + :instance => @@generic_instance_methods }, :FirewallRule => { :has_multiples => false, @@ -233,8 +228,8 @@ class NoSQLDB; :interface => self.const_get("FirewallRule"), :deps_wait_on_my_creation => true, :waits_on_parent_completion => false, - :class => generic_class_methods, - :instance => generic_instance_methods + [:groom, :addRule] + :class => @@generic_class_methods, + :instance => @@generic_instance_methods + [:groom, :addRule] }, :LoadBalancer => { :has_multiples => false, @@ -244,8 +239,8 @@ class NoSQLDB; :interface => self.const_get("LoadBalancer"), :deps_wait_on_my_creation => true, :waits_on_parent_completion => false, - :class => generic_class_methods, - :instance => generic_instance_methods + [:groom, :registerNode] + :class => @@generic_class_methods, + :instance => @@generic_instance_methods + [:groom, :registerNode] }, :Server => { :has_multiples => true, @@ -255,8 +250,8 @@ class NoSQLDB; :interface => self.const_get("Server"), :deps_wait_on_my_creation => false, :waits_on_parent_completion => false, - :class => generic_class_methods + [:validateInstanceType, :imageTimeStamp], - :instance => generic_instance_methods + [:groom, :postBoot, :getSSHConfig, :canonicalIP, :getWindowsAdminPassword, :active?, :groomer, :mu_windows_name, :mu_windows_name=, :reboot, :addVolume, :genericNAT, :listIPs] + :class => @@generic_class_methods + [:validateInstanceType, :imageTimeStamp], + :instance => @@generic_instance_methods + [:groom, :postBoot, :getSSHConfig, :canonicalIP, :getWindowsAdminPassword, :active?, :groomer, :mu_windows_name, :mu_windows_name=, :reboot, :addVolume, :genericNAT, :listIPs] }, :ServerPool => { :has_multiples => false, @@ -266,8 +261,8 @@ class NoSQLDB; :interface => self.const_get("ServerPool"), :deps_wait_on_my_creation => false, :waits_on_parent_completion => true, - :class => generic_class_methods, - :instance => generic_instance_methods + [:groom, :listNodes] + :class => @@generic_class_methods, + :instance => @@generic_instance_methods + [:groom, :listNodes] }, :VPC => { :has_multiples => false, @@ -277,8 +272,8 @@ class NoSQLDB; :interface => self.const_get("VPC"), :deps_wait_on_my_creation => true, :waits_on_parent_completion => false, - :class => generic_class_methods, - :instance => generic_instance_methods + [:groom, :subnets, :getSubnet, :listSubnets, :findBastion, :findNat] + :class => @@generic_class_methods, + :instance => @@generic_instance_methods + [:groom, :subnets, :getSubnet, :findBastion, :findNat] }, :CacheCluster => { :has_multiples => true, @@ -288,8 +283,8 @@ class NoSQLDB; :interface => self.const_get("CacheCluster"), :deps_wait_on_my_creation => true, :waits_on_parent_completion => false, - :class => generic_class_methods, - :instance => generic_instance_methods + [:groom] + :class => @@generic_class_methods, + :instance => @@generic_instance_methods + [:groom] }, :Alarm => { :has_multiples => false, @@ -299,8 +294,8 @@ class NoSQLDB; :interface => self.const_get("Alarm"), :deps_wait_on_my_creation => false, :waits_on_parent_completion => true, - :class => generic_class_methods, - :instance => generic_instance_methods + [:groom] + :class => @@generic_class_methods, + :instance => @@generic_instance_methods + [:groom] }, :Notifier => { :has_multiples => false, @@ -310,8 +305,8 @@ class NoSQLDB; :interface => self.const_get("Notifier"), :deps_wait_on_my_creation => false, :waits_on_parent_completion => false, - :class => generic_class_methods, - :instance => generic_instance_methods + [:groom] + :class => @@generic_class_methods, + :instance => @@generic_instance_methods + [:groom] }, :Log => { :has_multiples => false, @@ -321,8 +316,8 @@ class NoSQLDB; :interface => self.const_get("Log"), :deps_wait_on_my_creation => true, :waits_on_parent_completion => true, - :class => generic_class_methods, - :instance => generic_instance_methods + [:groom] + :class => @@generic_class_methods, + :instance => @@generic_instance_methods + [:groom] }, :StoragePool => { :has_multiples => false, @@ -332,8 +327,8 @@ class NoSQLDB; :interface => self.const_get("StoragePool"), :deps_wait_on_my_creation => true, :waits_on_parent_completion => false, - :class => generic_class_methods, - :instance => generic_instance_methods + [:groom] + :class => @@generic_class_methods, + :instance => @@generic_instance_methods + [:groom] }, :Function => { :has_multiples => false, @@ -343,8 +338,8 @@ class NoSQLDB; :interface => self.const_get("Function"), :deps_wait_on_my_creation => true, :waits_on_parent_completion => false, - :class => generic_class_methods, - :instance => generic_instance_methods + [:groom] + :class => @@generic_class_methods, + :instance => @@generic_instance_methods + [:groom] }, :Endpoint => { :has_multiples => false, @@ -354,8 +349,8 @@ class NoSQLDB; :interface => self.const_get("Endpoint"), :deps_wait_on_my_creation => true, :waits_on_parent_completion => false, - :class => generic_class_methods, - :instance => generic_instance_methods + [:groom] + :class => @@generic_class_methods, + :instance => @@generic_instance_methods + [:groom] }, :ContainerCluster => { :has_multiples => false, @@ -365,8 +360,8 @@ class NoSQLDB; :interface => self.const_get("ContainerCluster"), :deps_wait_on_my_creation => true, :waits_on_parent_completion => false, - :class => generic_class_methods, - :instance => generic_instance_methods + [:groom] + :class => @@generic_class_methods, + :instance => @@generic_instance_methods + [:groom] }, :SearchDomain => { :has_multiples => false, @@ -376,8 +371,8 @@ class NoSQLDB; :interface => self.const_get("SearchDomain"), :deps_wait_on_my_creation => true, :waits_on_parent_completion => false, - :class => generic_class_methods, - :instance => generic_instance_methods + [:groom] + :class => @@generic_class_methods, + :instance => @@generic_instance_methods + [:groom] }, :MsgQueue => { :has_multiples => false, @@ -387,8 +382,8 @@ class NoSQLDB; :interface => self.const_get("MsgQueue"), :deps_wait_on_my_creation => true, :waits_on_parent_completion => true, - :class => generic_class_methods, - :instance => generic_instance_methods + [:groom] + :class => @@generic_class_methods, + :instance => @@generic_instance_methods + [:groom] }, :User => { :has_multiples => false, @@ -398,8 +393,8 @@ class NoSQLDB; :interface => self.const_get("User"), :deps_wait_on_my_creation => true, :waits_on_parent_completion => true, - :class => generic_class_methods, - :instance => generic_instance_methods + [:groom] + :class => @@generic_class_methods, + :instance => @@generic_instance_methods + [:groom] }, :Group => { :has_multiples => false, @@ -409,8 +404,8 @@ class NoSQLDB; :interface => self.const_get("Group"), :deps_wait_on_my_creation => true, :waits_on_parent_completion => true, - :class => generic_class_methods, - :instance => generic_instance_methods + [:groom] + :class => @@generic_class_methods, + :instance => @@generic_instance_methods + [:groom] }, :Role => { :has_multiples => false, @@ -420,8 +415,8 @@ class NoSQLDB; :interface => self.const_get("Role"), :deps_wait_on_my_creation => true, :waits_on_parent_completion => true, - :class => generic_class_methods, - :instance => generic_instance_methods + [:groom] + :class => @@generic_class_methods, + :instance => @@generic_instance_methods + [:groom] }, :Bucket => { :has_multiples => false, @@ -431,8 +426,8 @@ class NoSQLDB; :interface => self.const_get("Bucket"), :deps_wait_on_my_creation => true, :waits_on_parent_completion => true, - :class => generic_class_methods + [:upload], - :instance => generic_instance_methods + [:groom, :upload] + :class => @@generic_class_methods + [:upload], + :instance => @@generic_instance_methods + [:groom, :upload] }, :NoSQLDB => { :has_multiples => false, @@ -442,201 +437,11 @@ class NoSQLDB; :interface => self.const_get("NoSQLDB"), :deps_wait_on_my_creation => true, :waits_on_parent_completion => true, - :class => generic_class_methods, - :instance => generic_instance_methods + [:groom] + :class => @@generic_class_methods, + :instance => @@generic_instance_methods + [:groom] } }.freeze - # The public AWS S3 bucket where we expect to find YAML files listing our - # standard base images for various platforms. - BASE_IMAGE_BUCKET = "cloudamatic" - # The path in the AWS S3 bucket where we expect to find YAML files listing - # our standard base images for various platforms. - BASE_IMAGE_PATH = "/images" - - # Aliases for platform names, in case we don't have actual images built for - # them. - PLATFORM_ALIASES = { - "linux" => "centos7", - "windows" => "win2k12r2", - "win2k12" => "win2k12r2", - "ubuntu" => "ubuntu16", - "centos" => "centos7", - "rhel7" => "rhel71", - "rhel" => "rhel71", - "amazon" => "amazon2016" - } - - @@image_fetch_cache = {} - @@platform_cache = [] - @@image_fetch_semaphore = Mutex.new - - # Rifle our image lists from {MU::Cloud.getStockImage} and return a list - # of valid +platform+ names. - # @return [Array] - def self.listPlatforms - return @@platform_cache if @@platform_cache and !@@platform_cache.empty? - @@platform_cache = MU::Cloud.supportedClouds.map { |cloud| - begin - loadCloudType(cloud, :Server) - rescue MU::Cloud::MuCloudResourceNotImplemented, MU::MuError - next - end - - images = MU::Cloud.getStockImage(cloud, quiet: true) - if images - images.keys - else - nil - end - }.flatten.uniq - @@platform_cache.delete(nil) - @@platform_cache.sort - @@platform_cache - end - - # Locate a base image for a {MU::Cloud::Server} resource. First we check - # Mu's public bucket, which should list the latest and greatest. If we can't - # fetch that, then we fall back to a YAML file that's bundled as part of Mu, - # but which will typically be less up-to-date. - # @param cloud [String]: The cloud provider for which to return an image list - # @param platform [String]: The supported platform for which to return an image or images. If not specified, we'll return our entire library for the appropriate cloud provider. - # @param region [String]: The region for which the returned image or images should be supported, for cloud providers which require it (such as AWS). - # @param fail_hard [Boolean]: Raise an exception on most errors, such as an inability to reach our public listing, lack of matching images, etc. - # @return [Hash,String,nil] - def self.getStockImage(cloud = MU::Config.defaultCloud, platform: nil, region: nil, fail_hard: false, quiet: false) - - if !MU::Cloud.supportedClouds.include?(cloud) - MU.log "'#{cloud}' is not a supported cloud provider! Available providers:", MU::ERR, details: MU::Cloud.supportedClouds - raise MuError, "'#{cloud}' is not a supported cloud provider!" - end - - urls = ["http://"+BASE_IMAGE_BUCKET+".s3-website-us-east-1.amazonaws.com"+BASE_IMAGE_PATH] - if $MU_CFG and $MU_CFG['custom_images_url'] - urls << $MU_CFG['custom_images_url'] - end - - images = nil - urls.each { |base_url| - @@image_fetch_semaphore.synchronize { - if @@image_fetch_cache[cloud] and (Time.now - @@image_fetch_cache[cloud]['time']) < 30 - images = @@image_fetch_cache[cloud]['contents'].dup - else - begin - Timeout.timeout(2) do - response = open("#{base_url}/#{cloud}.yaml").read - images ||= {} - images.deep_merge!(YAML.load(response)) - break - end - rescue StandardError => e - if fail_hard - raise MuError, "Failed to fetch stock images from #{base_url}/#{cloud}.yaml (#{e.message})" - else - MU.log "Failed to fetch stock images from #{base_url}/#{cloud}.yaml (#{e.message})", MU::WARN if !quiet - end - end - end - } - } - - @@image_fetch_semaphore.synchronize { - @@image_fetch_cache[cloud] = { - 'contents' => images.dup, - 'time' => Time.now - } - } - - backwards_compat = { - "AWS" => "amazon_images", - "Google" => "google_images", - } - - # Load from inside our repository, if we didn't get images elsewise - if images.nil? - [backwards_compat[cloud], cloud].each { |file| - next if file.nil? - if File.exist?("#{MU.myRoot}/modules/mu/defaults/#{file}.yaml") - images = YAML.load(File.read("#{MU.myRoot}/modules/mu/defaults/#{file}.yaml")) - break - end - } - end - - # Now overlay local overrides, both of the systemwide (/opt/mu/etc) and - # per-user (~/.mu/etc) variety. - [backwards_compat[cloud], cloud].each { |file| - next if file.nil? - if File.exist?("#{MU.etcDir}/#{file}.yaml") - images ||= {} - images.deep_merge!(YAML.load(File.read("#{MU.etcDir}/#{file}.yaml"))) - end - if Process.uid != 0 - basepath = Etc.getpwuid(Process.uid).dir+"/.mu/etc" - if File.exist?("#{basepath}/#{file}.yaml") - images ||= {} - images.deep_merge!(YAML.load(File.read("#{basepath}/#{file}.yaml"))) - end - end - } - - if images.nil? - if fail_hard - raise MuError, "Failed to find any base images for #{cloud}" - else - MU.log "Failed to find any base images for #{cloud}", MU::WARN if !quiet - return nil - end - end - - PLATFORM_ALIASES.each_pair { |a, t| - if images[t] and !images[a] - images[a] = images[t] - end - } - - if platform - if !images[platform] - if fail_hard - raise MuError, "No base image for platform #{platform} in cloud #{cloud}" - else - MU.log "No base image for platform #{platform} in cloud #{cloud}", MU::WARN if !quiet - return nil - end - end - images = images[platform] - - if region - # We won't fuss about the region argument if this isn't a cloud that - # has regions, just quietly don't bother. - if images.is_a?(Hash) - if images[region] - images = images[region] - else - if fail_hard - raise MuError, "No base image for platform #{platform} in cloud #{cloud} region #{region} found" - else - MU.log "No base image for platform #{platform} in cloud #{cloud} region #{region} found", MU::WARN if !quiet - return nil - end - end - end - end - else - if region - images.values.each { |regions| - # Filter to match our requested region, but for all the platforms, - # since we didn't specify one. - if regions.is_a?(Hash) - regions.delete_if { |r| r != region } - end - } - end - end - - images - end - # A list of supported cloud resource types as Mu classes def self.resource_types; @@resource_types @@ -670,80 +475,6 @@ def self.getResourceNames(type, assert = true) [nil, nil, nil, nil, {}] end - # Net::SSH exceptions seem to have their own behavior vis a vis threads, - # and our regular call stack gets circumvented when they're thrown. Cheat - # here to catch them gracefully. - def self.handleNetSSHExceptions - Thread.handle_interrupt(Net::SSH::Exception => :never) { - begin - Thread.handle_interrupt(Net::SSH::Exception => :immediate) { - MU.log "(Probably harmless) Caught a Net::SSH Exception in #{Thread.current.inspect}", MU::DEBUG, details: Thread.current.backtrace - } - ensure -# raise NetSSHFail, "Net::SSH had a nutty" - end - } - end - - # List of known/supported Cloud providers. This may be modified at runtime - # if an implemention is defective or missing required methods. - @@supportedCloudList = ['AWS', 'CloudFormation', 'Google', 'Azure'] - - # List of known/supported Cloud providers - # @return [Array] - def self.supportedClouds - @@supportedCloudList - end - - # Raise an exception if the cloud provider specified isn't valid - def self.assertSupportedCloud(cloud) - if cloud.nil? or !supportedClouds.include?(cloud.to_s) - raise MuError, "Cloud provider #{cloud} is not supported" - end - Object.const_get("MU").const_get("Cloud").const_get(cloud.to_s) - end - - # List of known/supported Cloud providers for which we have at least one - # set of credentials configured. - # @return [Array] - def self.availableClouds - available = [] - MU::Cloud.supportedClouds.each { |cloud| - begin - cloudbase = Object.const_get("MU").const_get("Cloud").const_get(cloud) - next if cloudbase.listCredentials.nil? or cloudbase.listCredentials.empty? - available << cloud - rescue NameError - end - } - - available - end - - # Raise an exception if the cloud provider specified isn't valid or we - # don't have any credentials configured for it. - def self.assertAvailableCloud(cloud) - if cloud.nil? or availableClouds.include?(cloud.to_s) - raise MuError, "Cloud provider #{cloud} is not available" - end - end - - # Load the container class for each cloud we know about, and inject autoload - # code for each of its supported resource type classes. - failed = [] - MU::Cloud.supportedClouds.each { |cloud| - require "mu/providers/#{cloud.downcase}" - cloudclass = Object.const_get("MU").const_get("Cloud").const_get(cloud) - generic_class_methods_toplevel.each { |method| - if !cloudclass.respond_to?(method) - MU.log "MU::Cloud::#{cloud} has not implemented required class method #{method}, disabling", MU::ERR - failed << cloud - end - } - } - failed.uniq! - @@supportedCloudList = @@supportedCloudList - failed - # @return [Mutex] def self.userdata_mutex @userdata_mutex ||= Mutex.new @@ -821,13 +552,25 @@ def self.fetchUserdata(platform: "linux", template_variables: {}, custom_append: } end + # Given a resource type, validate that it's legit and return its base class from the {MU::Cloud} module + # @param type [String] + # @return [MU::Cloud] + def self.loadBaseType(type) + raise MuError, "Argument to MU::Cloud.loadBaseType cannot be nil" if type.nil? + shortclass, cfg_name, _cfg_plural, _classname = MU::Cloud.getResourceNames(type) + if !shortclass + raise MuCloudResourceNotImplemented, "#{type} does not appear to be a valid resource type" + end + Object.const_get("MU").const_get("Cloud").const_get(shortclass) + end + @cloud_class_cache = {} # Given a cloud layer and resource type, return the class which implements it. # @param cloud [String]: The Cloud layer # @param type [String]: The resource type. Can be the full class name, symbolic name, or Basket of Kittens configuration shorthand for the resource type. # @return [Class]: The cloud-specific class implementing this resource - def self.loadCloudType(cloud, type) - raise MuError, "cloud argument to MU::Cloud.loadCloudType cannot be nil" if cloud.nil? + def self.resourceClass(cloud, type) + raise MuError, "cloud argument to MU::Cloud.resourceClass cannot be nil" if cloud.nil? shortclass, cfg_name, _cfg_plural, _classname = MU::Cloud.getResourceNames(type) if @cloud_class_cache.has_key?(cloud) and @cloud_class_cache[cloud].has_key?(type) if @cloud_class_cache[cloud][type].nil? @@ -847,10 +590,12 @@ def self.loadCloudType(cloud, type) rescue LoadError => e raise MuCloudResourceNotImplemented, "MU::Cloud::#{cloud} does not currently implement #{shortclass}, or implementation does not load correctly (#{e.message})" end + @cloud_class_cache[cloud] = {} if !@cloud_class_cache.has_key?(cloud) begin cloudclass = Object.const_get("MU").const_get("Cloud").const_get(cloud) myclass = Object.const_get("MU").const_get("Cloud").const_get(cloud).const_get(shortclass) + @@resource_types[shortclass.to_sym][:class].each { |class_method| if !myclass.respond_to?(class_method) or myclass.method(class_method).owner.to_s != "#" raise MuError, "MU::Cloud::#{cloud}::#{shortclass} has not implemented required class method #{class_method}" @@ -868,6 +613,7 @@ def self.loadCloudType(cloud, type) } @cloud_class_cache[cloud][type] = myclass + return myclass rescue NameError => e @cloud_class_cache[cloud][type] = nil @@ -875,1469 +621,9 @@ def self.loadCloudType(cloud, type) end end - MU::Cloud.supportedClouds.each { |cloud| - Object.const_get("MU").const_get("Cloud").const_get(cloud).class_eval { - - # Automatically load supported cloud resource classes when they're - # referenced. - def self.const_missing(symbol) - if MU::Cloud.resource_types.has_key?(symbol.to_sym) - return MU::Cloud.loadCloudType(name.sub(/.*?::([^:]+)$/, '\1'), symbol) - else - raise MuCloudResourceNotImplemented, "No such cloud resource #{name}:#{symbol}" - end - end - } - } - - @@resource_types.keys.each { |name| - Object.const_get("MU").const_get("Cloud").const_get(name).class_eval { - attr_reader :cloudclass - attr_reader :cloudobj - attr_reader :credentials - attr_reader :config - attr_reader :destroyed - attr_reader :delayed_save - - def self.shortname - name.sub(/.*?::([^:]+)$/, '\1') - end - - def self.cfg_plural - MU::Cloud.resource_types[shortname.to_sym][:cfg_plural] - end - - def self.has_multiples - MU::Cloud.resource_types[shortname.to_sym][:has_multiples] - end - - def self.cfg_name - MU::Cloud.resource_types[shortname.to_sym][:cfg_name] - end - - def self.can_live_in_vpc - MU::Cloud.resource_types[shortname.to_sym][:can_live_in_vpc] - end - - def self.waits_on_parent_completion - MU::Cloud.resource_types[shortname.to_sym][:waits_on_parent_completion] - end - - def self.deps_wait_on_my_creation - MU::Cloud.resource_types[shortname.to_sym][:deps_wait_on_my_creation] - end - - # Print something palatable when we're called in a string context. - def to_s - fullname = "#{self.class.shortname}" - if !@cloudobj.nil? and !@cloudobj.mu_name.nil? - @mu_name ||= @cloudobj.mu_name - end - if !@mu_name.nil? and !@mu_name.empty? - fullname = fullname + " '#{@mu_name}'" - end - if !@cloud_id.nil? - fullname = fullname + " (#{@cloud_id})" - end - return fullname - end - - # Set our +deploy+ and +deploy_id+ attributes, optionally doing so even - # if they have already been set. - # - # @param mommacat [MU::MommaCat]: The deploy to which we're being told we belong - # @param force [Boolean]: Set even if we already have a deploy object - # @return [String]: Our new +deploy_id+ - def intoDeploy(mommacat, force: false) - if force or (!@deploy) - MU.log "Inserting #{self} [#{self.object_id}] into #{mommacat.deploy_id} as a #{@config['name']}", MU::DEBUG - - @deploy = mommacat - @deploy.addKitten(@cloudclass.cfg_plural, @config['name'], self) - @deploy_id = @deploy.deploy_id - @cloudobj.intoDeploy(mommacat, force: force) if @cloudobj - end - @deploy_id - end - - # Return the +virtual_name+ config field, if it is set. - # @param name [String]: If set, will only return a value if +virtual_name+ matches this string - # @return [String,nil] - def virtual_name(name = nil) - if @config and @config['virtual_name'] and - (!name or name == @config['virtual_name']) - return @config['virtual_name'] - end - nil - end - - # @param mommacat [MU::MommaCat]: The deployment containing this cloud resource - # @param mu_name [String]: Optional- specify the full Mu resource name of an existing resource to load, instead of creating a new one - # @param cloud_id [String]: Optional- specify the cloud provider's identifier for an existing resource to load, instead of creating a new one - # @param kitten_cfg [Hash]: The parse configuration for this object from {MU::Config} - def initialize(**args) - raise MuError, "Cannot invoke Cloud objects without a configuration" if args[:kitten_cfg].nil? - - # We are a parent wrapper object. Initialize our child object and - # housekeeping bits accordingly. - if self.class.name.match(/^MU::Cloud::([^:]+)$/) - @live = true - @delayed_save = args[:delayed_save] - @method_semaphore = Mutex.new - @method_locks = {} - if args[:mommacat] - MU.log "Initializing an instance of #{self.class.name} in #{args[:mommacat].deploy_id} #{mu_name}", MU::DEBUG, details: args[:kitten_cfg] - elsif args[:mu_name].nil? - raise MuError, "Can't instantiate a MU::Cloud object with a live deploy or giving us a mu_name" - else - MU.log "Initializing a detached #{self.class.name} named #{args[:mu_name]}", MU::DEBUG, details: args[:kitten_cfg] - end - - my_cloud = args[:kitten_cfg]['cloud'].to_s || MU::Config.defaultCloud - if my_cloud.nil? or !MU::Cloud.supportedClouds.include?(my_cloud) - raise MuError, "Can't instantiate a MU::Cloud object without a valid cloud (saw '#{my_cloud}')" - end - @cloudclass = MU::Cloud.loadCloudType(my_cloud, self.class.shortname) - @cloudparentclass = Object.const_get("MU").const_get("Cloud").const_get(my_cloud) - @cloudobj = @cloudclass.new( - mommacat: args[:mommacat], - kitten_cfg: args[:kitten_cfg], - cloud_id: args[:cloud_id], - mu_name: args[:mu_name] - ) - raise MuError, "Unknown error instantiating #{self}" if @cloudobj.nil? -# These should actually call the method live instead of caching a static value - PUBLIC_ATTRS.each { |a| - instance_variable_set(("@"+a.to_s).to_sym, @cloudobj.send(a)) - } - @deploy ||= args[:mommacat] - @deploy_id ||= @deploy.deploy_id if @deploy - - # Register with the containing deployment - if !@deploy.nil? and !@cloudobj.mu_name.nil? and - !@cloudobj.mu_name.empty? and !args[:delay_descriptor_load] - describe # XXX is this actually safe here? - @deploy.addKitten(self.class.cfg_name, @config['name'], self) - elsif !@deploy.nil? and @cloudobj.mu_name.nil? - MU.log "#{self} in #{@deploy.deploy_id} didn't generate a mu_name after being loaded/initialized, dependencies on this resource will probably be confused!", MU::ERR, details: [caller, args.keys] - end - - # We are actually a child object invoking this via super() from its - # own initialize(), so initialize all the attributes and instance - # variables we know to be universal. - else - - # Declare the attributes that everyone should have - class << self - PUBLIC_ATTRS.each { |a| - attr_reader a - } - end - -# XXX this butchers ::Id and ::Ref objects that might be used by dependencies() to good effect, but we also can't expect our implementations to cope with knowing when a .to_s has to be appended to things at random - @config = MU::Config.manxify(args[:kitten_cfg]) || MU::Config.manxify(args[:config]) - - if !@config - MU.log "Missing config arguments in setInstanceVariables, can't initialize a cloud object without it", MU::ERR, details: args.keys - raise MuError, "Missing config arguments in setInstanceVariables" - end - - @deploy = args[:mommacat] || args[:deploy] - - @credentials = args[:credentials] - @credentials ||= @config['credentials'] - - @cloud = @config['cloud'] - if !@cloud - if self.class.name.match(/^MU::Cloud::([^:]+)(?:::.+|$)/) - cloudclass_name = Regexp.last_match[1] - if MU::Cloud.supportedClouds.include?(cloudclass_name) - @cloud = cloudclass_name - end - end - end - if !@cloud - raise MuError, "Failed to determine what cloud #{self} should be in!" - end - - @environment = @config['environment'] - if @deploy - @deploy_id = @deploy.deploy_id - @appname = @deploy.appname - end - - @cloudclass = MU::Cloud.loadCloudType(@cloud, self.class.shortname) - @cloudparentclass = Object.const_get("MU").const_get("Cloud").const_get(@cloud) - - # A pre-existing object, you say? - if args[:cloud_id] - -# TODO implement ::Id for every cloud... and they should know how to get from -# cloud_desc to a fully-resolved ::Id object, not just the short string - - @cloud_id = args[:cloud_id] - describe(cloud_id: @cloud_id) - @habitat_id = habitat_id # effectively, cache this - - # If we can build us an ::Id object for @cloud_id instead of a - # string, do so. - begin - idclass = Object.const_get("MU").const_get("Cloud").const_get(@cloud).const_get("Id") - long_id = if @deploydata and @deploydata[idclass.idattr.to_s] - @deploydata[idclass.idattr.to_s] - elsif self.respond_to?(idclass.idattr) - self.send(idclass.idattr) - end - - @cloud_id = idclass.new(long_id) if !long_id.nil? and !long_id.empty? -# 1 see if we have the value on the object directly or in deploy data -# 2 set an attr_reader with the value -# 3 rewrite our @cloud_id attribute with a ::Id object - rescue NameError, MU::Cloud::MuCloudResourceNotImplemented - end - - end - - # Use pre-existing mu_name (we're probably loading an extant deploy) - # if available - if args[:mu_name] - @mu_name = args[:mu_name].dup - # If scrub_mu_isms is set, our mu_name is always just the bare name - # field of the resource. - elsif @config['scrub_mu_isms'] - @mu_name = @config['name'].dup -# XXX feck it insert an inheritable method right here? Set a default? How should resource implementations determine whether they're instantiating a new object? - end - - @tags = {} - if !@config['scrub_mu_isms'] - @tags = @deploy ? @deploy.listStandardTags : MU::MommaCat.listStandardTags - end - if @config['tags'] - @config['tags'].each { |tag| - @tags[tag['key']] = tag['value'] - } - end - - if @cloudparentclass.respond_to?(:resourceInitHook) - @cloudparentclass.resourceInitHook(self, @deploy) - end - - # Add cloud-specific instance methods for our resource objects to - # inherit. - if @cloudparentclass.const_defined?(:AdditionalResourceMethods) - self.extend @cloudparentclass.const_get(:AdditionalResourceMethods) - end - - if ["Server", "ServerPool"].include?(self.class.shortname) and @deploy - @mu_name ||= @deploy.getResourceName(@config['name'], need_unique_string: @config.has_key?("basis")) - if self.class.shortname == "Server" - @groomer = MU::Groomer.new(self) - end - - @groomclass = MU::Groomer.loadGroomer(@config["groomer"]) - - if windows? or @config['active_directory'] and !@mu_windows_name - if !@deploydata.nil? and !@deploydata['mu_windows_name'].nil? - @mu_windows_name = @deploydata['mu_windows_name'] - else - # Use the same random differentiator as the "real" name if we're - # from a ServerPool. Helpful for admin sanity. - unq = @mu_name.sub(/^.*?-(...)$/, '\1') - if @config['basis'] and !unq.nil? and !unq.empty? - @mu_windows_name = @deploy.getResourceName(@config['name'], max_length: 15, need_unique_string: true, use_unique_string: unq, reuse_unique_string: true) - else - @mu_windows_name = @deploy.getResourceName(@config['name'], max_length: 15, need_unique_string: true) - end - end - end - class << self - attr_reader :groomer - attr_reader :groomerclass - attr_accessor :mu_windows_name # XXX might be ok as reader now - end - end - end - - end - - def cloud - if @cloud - @cloud - elsif @config and @config['cloud'] - @config['cloud'] - elsif self.class.name.match(/^MU::Cloud::([^:]+)::.+/) - cloudclass_name = Regexp.last_match[1] - if MU::Cloud.supportedClouds.include?(cloudclass_name) - cloudclass_name - else - nil - end - else - nil - end - end - - - # Remove all metadata and cloud resources associated with this object - def destroy - if self.class.cfg_name == "server" - begin - ip = canonicalIP - MU::Master.removeIPFromSSHKnownHosts(ip) if ip - if @deploy and @deploy.deployment and - @deploy.deployment['servers'] and @config['name'] - me = @deploy.deployment['servers'][@config['name']][@mu_name] - if me - ["private_ip_address", "public_ip_address"].each { |field| - if me[field] - MU::Master.removeIPFromSSHKnownHosts(me[field]) - end - } - if me["private_ip_list"] - me["private_ip_list"].each { |private_ip| - MU::Master.removeIPFromSSHKnownHosts(private_ip) - } - end - end - end - rescue MU::MuError => e - MU.log e.message, MU::WARN - end - end - if !@cloudobj.nil? and !@cloudobj.groomer.nil? - @cloudobj.groomer.cleanup - elsif !@groomer.nil? - @groomer.cleanup - end - if !@deploy.nil? - if !@cloudobj.nil? and !@config.nil? and !@cloudobj.mu_name.nil? - @deploy.notify(self.class.cfg_plural, @config['name'], nil, mu_name: @cloudobj.mu_name, remove: true, triggering_node: @cloudobj, delayed_save: @delayed_save) - elsif !@mu_name.nil? - @deploy.notify(self.class.cfg_plural, @config['name'], nil, mu_name: @mu_name, remove: true, triggering_node: self, delayed_save: @delayed_save) - end - @deploy.removeKitten(self) - end - # Make sure that if notify gets called again it won't go returning a - # bunch of now-bogus metadata. - @destroyed = true - if !@cloudobj.nil? - def @cloudobj.notify - {} - end - else - def notify - {} - end - end - end - - # Return the cloud object's idea of where it lives (project, account, - # etc) in the form of an identifier. If not applicable for this object, - # we expect to return +nil+. - # @return [String,nil] - def habitat(nolookup: true) - return nil if ["folder", "habitat"].include?(self.class.cfg_name) - if @cloudobj - @cloudparentclass.habitat(@cloudobj, nolookup: nolookup, deploy: @deploy) - else - @cloudparentclass.habitat(self, nolookup: nolookup, deploy: @deploy) - end - end - - def habitat_id(nolookup: false) - @habitat_id ||= habitat(nolookup: nolookup) - @habitat_id - end - - # We're fundamentally a wrapper class, so go ahead and reroute requests - # that are meant for our wrapped object. - def method_missing(method_sym, *arguments) - if @cloudobj - MU.log "INVOKING #{method_sym.to_s} FROM PARENT CLOUD OBJECT #{self}", MU::DEBUG, details: arguments - @cloudobj.method(method_sym).call(*arguments) - else - raise NoMethodError, "No such instance method #{method_sym.to_s} available on #{self.class.name}" - end - end - - # Merge the passed hash into the existing configuration hash of this - # cloud object. Currently this is only used by the {MU::Adoption} - # module. I don't love exposing this to the whole internal API, but I'm - # probably overthinking that. - # @param newcfg [Hash] - def config!(newcfg) - @config.merge!(newcfg) - end - - def cloud_desc(use_cache: true) - describe - - if !@cloudobj.nil? - if @cloudobj.class.instance_methods(false).include?(:cloud_desc) - @cloud_desc_cache ||= @cloudobj.cloud_desc - end - end - if !@config.nil? and !@cloud_id.nil? and (!use_cache or @cloud_desc_cache.nil?) - # The find() method should be returning a Hash with the cloud_id - # as a key and a cloud platform descriptor as the value. - begin - args = { - :region => @config['region'], - :cloud => @config['cloud'], - :cloud_id => @cloud_id, - :credentials => @credentials, - :project => habitat_id, # XXX this belongs in our required_instance_methods hack - :flags => @config - } - @cloudparentclass.required_instance_methods.each { |m| -# if respond_to?(m) -# args[m] = method(m).call -# else - args[m] = instance_variable_get(("@"+m.to_s).to_sym) -# end - } - - matches = self.class.find(args) - if !matches.nil? and matches.is_a?(Hash) -# XXX or if the hash is keyed with an ::Id element, oh boy -# puts matches[@cloud_id][:self_link] -# puts matches[@cloud_id][:url] -# if matches[@cloud_id][:self_link] -# @url ||= matches[@cloud_id][:self_link] -# elsif matches[@cloud_id][:url] -# @url ||= matches[@cloud_id][:url] -# elsif matches[@cloud_id][:arn] -# @arn ||= matches[@cloud_id][:arn] -# end - if matches[@cloud_id] - @cloud_desc_cache = matches[@cloud_id] - else - matches.each_pair { |k, v| # flatten out ::Id objects just in case - if @cloud_id.to_s == k.to_s - @cloud_desc_cache = v - break - end - } - end - end - - if !@cloud_desc_cache - MU.log "cloud_desc via #{self.class.name}.find() failed to locate a live object.\nWas called by #{caller[0]}", MU::WARN, details: args - end - rescue StandardError => e - MU.log "Got #{e.inspect} trying to find cloud handle for #{self.class.shortname} #{@mu_name} (#{@cloud_id})", MU::WARN - raise e - end - end - - return @cloud_desc_cache - end - - # Retrieve all of the known metadata for this resource. - # @param cloud_id [String]: The cloud platform's identifier for the resource we're describing. Makes lookups more efficient. - # @return [Array]: mu_name, config, deploydata - def describe(cloud_id: nil) - if cloud_id.nil? and !@cloudobj.nil? - @cloud_id ||= @cloudobj.cloud_id - end - res_type = self.class.cfg_plural - res_name = @config['name'] if !@config.nil? - @credentials ||= @config['credentials'] if !@config.nil? - deploydata = nil - if !@deploy.nil? and @deploy.is_a?(MU::MommaCat) and - !@deploy.deployment.nil? and - !@deploy.deployment[res_type].nil? and - !@deploy.deployment[res_type][res_name].nil? - deploydata = @deploy.deployment[res_type][res_name] - else - # XXX This should only happen on a brand new resource, but we should - # probably complain under other circumstances, if we can - # differentiate them. - end - - if self.class.has_multiples and !@mu_name.nil? and deploydata.is_a?(Hash) and deploydata.has_key?(@mu_name) - @deploydata = deploydata[@mu_name] - elsif deploydata.is_a?(Hash) - @deploydata = deploydata - end - - if @cloud_id.nil? and @deploydata.is_a?(Hash) - if @mu_name.nil? and @deploydata.has_key?('#MU_NAME') - @mu_name = @deploydata['#MU_NAME'] - end - if @deploydata.has_key?('cloud_id') - @cloud_id ||= @deploydata['cloud_id'] - end - end - - return [@mu_name, @config, @deploydata] - end - - # Fetch MU::Cloud objects for each of this object's dependencies, and - # return in an easily-navigable Hash. This can include things listed in - # @config['dependencies'], implicitly-defined dependencies such as - # add_firewall_rules or vpc stanzas, and may refer to objects internal - # to this deployment or external. Will populate the instance variables - # @dependencies (general dependencies, which can only be sibling - # resources in this deployment), as well as for certain config stanzas - # which can refer to external resources (@vpc, @loadbalancers, - # @add_firewall_rules) - def dependencies(use_cache: false, debug: false) - @dependencies ||= {} - @loadbalancers ||= [] - @firewall_rules ||= [] - - if @config.nil? - return [@dependencies, @vpc, @loadbalancers] - end - if use_cache and @dependencies.size > 0 - return [@dependencies, @vpc, @loadbalancers] - end - @config['dependencies'] = [] if @config['dependencies'].nil? - - loglevel = debug ? MU::NOTICE : MU::DEBUG - - # First, general dependencies. These should all be fellow members of - # the current deployment. - @config['dependencies'].each { |dep| - @dependencies[dep['type']] ||= {} - next if @dependencies[dep['type']].has_key?(dep['name']) - handle = @deploy.findLitterMate(type: dep['type'], name: dep['name']) if !@deploy.nil? - if !handle.nil? - MU.log "Loaded dependency for #{self}: #{dep['name']} => #{handle}", loglevel - @dependencies[dep['type']][dep['name']] = handle - else - # XXX yell under circumstances where we should expect to have - # our stuff available already? - end - } - - # Special dependencies: my containing VPC - if self.class.can_live_in_vpc and !@config['vpc'].nil? - @config['vpc']["id"] ||= @config['vpc']["vpc_id"] # old deploys - @config['vpc']["name"] ||= @config['vpc']["vpc_name"] # old deploys - # If something hash-ified a MU::Config::Ref here, fix it - if !@config['vpc']["id"].nil? and @config['vpc']["id"].is_a?(Hash) - @config['vpc']["id"] = MU::Config::Ref.new(@config['vpc']["id"]) - end - if !@config['vpc']["id"].nil? - if @config['vpc']["id"].is_a?(MU::Config::Ref) and !@config['vpc']["id"].kitten.nil? - @vpc = @config['vpc']["id"].kitten - else - if @config['vpc']['habitat'] - @config['vpc']['habitat'] = MU::Config::Ref.get(@config['vpc']['habitat']) - end - vpc_ref = MU::Config::Ref.get(@config['vpc']) - @vpc = vpc_ref.kitten - end - elsif !@config['vpc']["name"].nil? and @deploy - MU.log "Attempting findLitterMate on VPC for #{self}", loglevel, details: @config['vpc'] - - sib_by_name = @deploy.findLitterMate(name: @config['vpc']['name'], type: "vpcs", return_all: true, habitat: @config['vpc']['project'], debug: debug) - if sib_by_name.is_a?(Array) - if sib_by_name.size == 1 - @vpc = matches.first - MU.log "Single VPC match for #{self}", loglevel, details: @vpc.to_s - else -# XXX ok but this is the wrong place for this really the config parser needs to sort this out somehow - # we got multiple matches, try to pick one by preferred subnet - # behavior - MU.log "Sorting a bunch of VPC matches for #{self}", loglevel, details: sib_by_name.map { |s| s.to_s }.join(", ") - sib_by_name.each { |sibling| - all_private = sibling.subnets.map { |s| s.private? }.all?(true) - all_public = sibling.subnets.map { |s| s.private? }.all?(false) - names = sibling.subnets.map { |s| s.name } - ids = sibling.subnets.map { |s| s.cloud_id } - if all_private and ["private", "all_private"].include?(@config['vpc']['subnet_pref']) - @vpc = sibling - break - elsif all_public and ["public", "all_public"].include?(@config['vpc']['subnet_pref']) - @vpc = sibling - break - elsif @config['vpc']['subnet_name'] and - names.include?(@config['vpc']['subnet_name']) -puts "CHOOSING #{@vpc.to_s} 'cause it has #{@config['vpc']['subnet_name']}" - @vpc = sibling - break - elsif @config['vpc']['subnet_id'] and - ids.include?(@config['vpc']['subnet_id']) - @vpc = sibling - break - end - } - if !@vpc - sibling = sib_by_name.sample - MU.log "Got multiple matching VPCs for #{self.class.cfg_name} #{@mu_name}, so I'm arbitrarily choosing #{sibling.mu_name}", MU::WARN, details: @config['vpc'] - @vpc = sibling - end - end - else - @vpc = sib_by_name - MU.log "Found exact VPC match for #{self}", loglevel, details: sib_by_name.to_s - end - else - MU.log "No shortcuts available to fetch VPC for #{self}", loglevel, details: @config['vpc'] - end - - if !@vpc and !@config['vpc']["name"].nil? and - @dependencies.has_key?("vpc") and - @dependencies["vpc"].has_key?(@config['vpc']["name"]) - MU.log "Grabbing VPC I see in @dependencies['vpc']['#{@config['vpc']["name"]}'] for #{self}", loglevel, details: @config['vpc'] - @vpc = @dependencies["vpc"][@config['vpc']["name"]] - elsif !@vpc - tag_key, tag_value = @config['vpc']['tag'].split(/=/, 2) if !@config['vpc']['tag'].nil? - if !@config['vpc'].has_key?("id") and - !@config['vpc'].has_key?("deploy_id") and !@deploy.nil? - @config['vpc']["deploy_id"] = @deploy.deploy_id - end - MU.log "Doing findStray for VPC for #{self}", loglevel, details: @config['vpc'] - vpcs = MU::MommaCat.findStray( - @config['cloud'], - "vpc", - deploy_id: @config['vpc']["deploy_id"], - cloud_id: @config['vpc']["id"], - name: @config['vpc']["name"], - tag_key: tag_key, - tag_value: tag_value, - habitats: [@project_id], - region: @config['vpc']["region"], - calling_deploy: @deploy, - credentials: @credentials, - dummy_ok: true, - debug: debug - ) - @vpc = vpcs.first if !vpcs.nil? and vpcs.size > 0 - end - if @vpc and @vpc.config and @vpc.config['bastion'] and - @vpc.config['bastion'].to_h['name'] != @config['name'] - refhash = @vpc.config['bastion'].to_h - refhash['deploy_id'] ||= @vpc.deploy.deploy_id - natref = MU::Config::Ref.get(refhash) - if natref and natref.kitten(@vpc.deploy) - @nat = natref.kitten(@vpc.deploy) - end - end - if @nat.nil? and !@vpc.nil? and ( - @config['vpc'].has_key?("nat_host_id") or - @config['vpc'].has_key?("nat_host_tag") or - @config['vpc'].has_key?("nat_host_ip") or - @config['vpc'].has_key?("nat_host_name") - ) - - nat_tag_key, nat_tag_value = @config['vpc']['nat_host_tag'].split(/=/, 2) if !@config['vpc']['nat_host_tag'].nil? - - @nat = @vpc.findBastion( - nat_name: @config['vpc']['nat_host_name'], - nat_cloud_id: @config['vpc']['nat_host_id'], - nat_tag_key: nat_tag_key, - nat_tag_value: nat_tag_value, - nat_ip: @config['vpc']['nat_host_ip'] - ) - - if @nat.nil? - if !@vpc.cloud_desc.nil? - @nat = @vpc.findNat( - nat_cloud_id: @config['vpc']['nat_host_id'], - nat_filter_key: "vpc-id", - region: @config['vpc']["region"], - nat_filter_value: @vpc.cloud_id, - credentials: @config['credentials'] - ) - else - @nat = @vpc.findNat( - nat_cloud_id: @config['vpc']['nat_host_id'], - region: @config['vpc']["region"], - credentials: @config['credentials'] - ) - end - end - end - elsif self.class.cfg_name == "vpc" - @vpc = self - end - - # Google accounts usually have a useful default VPC we can use - if @vpc.nil? and @project_id and @cloud == "Google" and - self.class.can_live_in_vpc - MU.log "Seeing about default VPC for #{self.to_s}", MU::NOTICE - vpcs = MU::MommaCat.findStray( - "Google", - "vpc", - cloud_id: "default", - habitats: [@project_id], - credentials: @credentials, - dummy_ok: true, - debug: debug - ) - @vpc = vpcs.first if !vpcs.nil? and vpcs.size > 0 - end - - # Special dependencies: LoadBalancers I've asked to attach to an - # instance. - if @config.has_key?("loadbalancers") - @loadbalancers = [] if !@loadbalancers - @config['loadbalancers'].each { |lb| - MU.log "Loading LoadBalancer for #{self}", MU::DEBUG, details: lb - if @dependencies.has_key?("loadbalancer") and - @dependencies["loadbalancer"].has_key?(lb['concurrent_load_balancer']) - @loadbalancers << @dependencies["loadbalancer"][lb['concurrent_load_balancer']] - else - if !lb.has_key?("existing_load_balancer") and - !lb.has_key?("deploy_id") and !@deploy.nil? - lb["deploy_id"] = @deploy.deploy_id - end - lbs = MU::MommaCat.findStray( - @config['cloud'], - "loadbalancer", - deploy_id: lb["deploy_id"], - cloud_id: lb['existing_load_balancer'], - name: lb['concurrent_load_balancer'], - region: @config["region"], - calling_deploy: @deploy, - dummy_ok: true - ) - @loadbalancers << lbs.first if !lbs.nil? and lbs.size > 0 - end - } - end - - # Munge in external resources referenced by the existing_deploys - # keyword - if @config["existing_deploys"] && !@config["existing_deploys"].empty? - @config["existing_deploys"].each { |ext_deploy| - if ext_deploy["cloud_id"] - found = MU::MommaCat.findStray( - @config['cloud'], - ext_deploy["cloud_type"], - cloud_id: ext_deploy["cloud_id"], - region: @config['region'], - dummy_ok: false - ).first - - MU.log "Couldn't find existing resource #{ext_deploy["cloud_id"]}, #{ext_deploy["cloud_type"]}", MU::ERR if found.nil? - @deploy.notify(ext_deploy["cloud_type"], found.config["name"], found.deploydata, mu_name: found.mu_name, triggering_node: @mu_name) - elsif ext_deploy["mu_name"] && ext_deploy["deploy_id"] - MU.log "#{ext_deploy["mu_name"]} / #{ext_deploy["deploy_id"]}" - found = MU::MommaCat.findStray( - @config['cloud'], - ext_deploy["cloud_type"], - deploy_id: ext_deploy["deploy_id"], - mu_name: ext_deploy["mu_name"], - region: @config['region'], - dummy_ok: false - ).first - - MU.log "Couldn't find existing resource #{ext_deploy["mu_name"]}/#{ext_deploy["deploy_id"]}, #{ext_deploy["cloud_type"]}", MU::ERR if found.nil? - @deploy.notify(ext_deploy["cloud_type"], found.config["name"], found.deploydata, mu_name: ext_deploy["mu_name"], triggering_node: @mu_name) - else - MU.log "Trying to find existing deploy, but either the cloud_id is not valid or no mu_name and deploy_id where provided", MU::ERR - end - } - end - - if @config['dns_records'] && !@config['dns_records'].empty? - @config['dns_records'].each { |dnsrec| - if dnsrec.has_key?("name") - if dnsrec['name'].start_with?(@deploy.deploy_id.downcase) && !dnsrec['name'].start_with?(@mu_name.downcase) - MU.log "DNS records for #{@mu_name} seem to be wrong, deleting from current config", MU::WARN, details: dnsrec - dnsrec.delete('name') - dnsrec.delete('target') - end - end - } - end - - return [@dependencies, @vpc, @loadbalancers] - end - - # Using the automatically-defined +@vpc+ from {dependencies} in - # conjunction with our config, return our configured subnets. - # @return [Array] - def mySubnets - dependencies - if !@vpc or !@config["vpc"] - return nil - end - - if @config["vpc"]["subnet_id"] or @config["vpc"]["subnet_name"] - @config["vpc"]["subnets"] ||= [] - subnet_block = {} - subnet_block["subnet_id"] = @config["vpc"]["subnet_id"] if @config["vpc"]["subnet_id"] - subnet_block["subnet_name"] = @config["vpc"]["subnet_name"] if @config["vpc"]["subnet_name"] - @config["vpc"]["subnets"] << subnet_block - @config["vpc"]["subnets"].uniq! - end - - if (!@config["vpc"]["subnets"] or @config["vpc"]["subnets"].empty?) and - !@config["vpc"]["subnet_id"] - return @vpc.subnets - end - - subnets = [] - @config["vpc"]["subnets"].each { |subnet| - subnet_obj = @vpc.getSubnet(cloud_id: subnet["subnet_id"].to_s, name: subnet["subnet_name"].to_s) - raise MuError, "Couldn't find a live subnet for #{self.to_s} matching #{subnet} in #{@vpc.to_s} (#{@vpc.subnets.map { |s| s.name }.join(",")})" if subnet_obj.nil? - subnets << subnet_obj - } - - subnets - end - - # @return [Array] - def myFirewallRules - dependencies - - rules = [] - if @dependencies.has_key?("firewall_rule") - rules = @dependencies['firewall_rule'].values - end -# XXX what other ways are these specified? - - rules - end - - # If applicable, allow this resource's NAT host blanket access via - # rules in its associated +admin+ firewall rule set. - def allowBastionAccess - return nil if !@nat or !@nat.is_a?(MU::Cloud::Server) - - myFirewallRules.each { |acl| - if acl.config["admin"] - acl.addRule(@nat.listIPs, proto: "tcp") - acl.addRule(@nat.listIPs, proto: "udp") - acl.addRule(@nat.listIPs, proto: "icmp") - end - } - end - - # Defaults any resources that don't declare their release-readiness to - # ALPHA. That'll learn 'em. - def self.quality - MU::Cloud::ALPHA - end - - # Return a list of "container" artifacts, by class, that apply to this - # resource type in a cloud provider. This is so methods that call find - # know whether to call +find+ with identifiers for parent resources. - # This is similar in purpose to the +isGlobal?+ resource class method, - # which tells our search functions whether or not a resource scopes to - # a region. In almost all cases this is one-entry list consisting of - # +:Habitat+. Notable exceptions include most implementations of - # +Habitat+, which either reside inside a +:Folder+ or nothing at all; - # whereas a +:Folder+ tends to not have any containing parent. Very few - # resource implementations will need to override this. - # A +nil+ entry in this list is interpreted as "this resource can be - # global." - # @return [Array] - def self.canLiveIn - if self.shortname == "Folder" - [nil, :Folder] - elsif self.shortname == "Habitat" - [:Folder] - else - [:Habitat] - end - end - - def self.find(*flags) - allfound = {} - - MU::Cloud.availableClouds.each { |cloud| - begin - args = flags.first - next if args[:cloud] and args[:cloud] != cloud - # skip this cloud if we have a region argument that makes no - # sense there - cloudbase = Object.const_get("MU").const_get("Cloud").const_get(cloud) - next if cloudbase.listCredentials.nil? or cloudbase.listCredentials.empty? or cloudbase.credConfig(args[:credentials]).nil? - if args[:region] and cloudbase.respond_to?(:listRegions) - if !cloudbase.listRegions(credentials: args[:credentials]) - MU.log "Failed to get region list for credentials #{args[:credentials]} in cloud #{cloud}", MU::ERR, details: caller - else - next if !cloudbase.listRegions(credentials: args[:credentials]).include?(args[:region]) - end - end - begin - cloudclass = MU::Cloud.loadCloudType(cloud, shortname) - rescue MU::MuError - next - end - - found = cloudclass.find(args) - if !found.nil? - if found.is_a?(Hash) - allfound.merge!(found) - else - raise MuError, "#{cloudclass}.find returned a non-Hash result" - end - end - rescue MuCloudResourceNotImplemented - end - } - allfound - end - - - if shortname == "Database" - - # Getting the password for a database's master user, and saving it in a database / cluster specific vault - def getPassword - if @config['password'].nil? - if @config['auth_vault'] && !@config['auth_vault'].empty? - @config['password'] = @groomclass.getSecret( - vault: @config['auth_vault']['vault'], - item: @config['auth_vault']['item'], - field: @config['auth_vault']['password_field'] - ) - else - # Should we use random instead? - @config['password'] = Password.pronounceable(10..12) - end - end - - creds = { - "username" => @config["master_user"], - "password" => @config["password"] - } - @groomclass.saveSecret(vault: @mu_name, item: "database_credentials", data: creds) - end - end - - if shortname == "DNSZone" - def self.genericMuDNSEntry(*flags) -# XXX have this switch on a global config for where Mu puts its DNS - cloudclass = MU::Cloud.loadCloudType(MU::Config.defaultCloud, "DNSZone") - cloudclass.genericMuDNSEntry(flags.first) - end - def self.createRecordsFromConfig(*flags) - cloudclass = MU::Cloud.loadCloudType(MU::Config.defaultCloud, "DNSZone") - if !flags.nil? and flags.size == 1 - cloudclass.createRecordsFromConfig(flags.first) - else - cloudclass.createRecordsFromConfig(*flags) - end - end - end - - if shortname == "Server" or shortname == "ServerPool" - def windows? - return true if %w{win2k16 win2k12r2 win2k12 win2k8 win2k8r2 win2k19 windows}.include?(@config['platform']) - begin - return true if cloud_desc.respond_to?(:platform) and cloud_desc.platform == "Windows" -# XXX ^ that's AWS-speak, doesn't cover GCP or anything else; maybe we should require cloud layers to implement this so we can just call @cloudobj.windows? - rescue MU::MuError - return false - end - false - end - - # Gracefully message and attempt to accommodate the common transient errors peculiar to Windows nodes - # @param e [Exception]: The exception that we're handling - # @param retries [Integer]: The current number of retries, which we'll increment and pass back to the caller - # @param rebootable_fails [Integer]: The current number of reboot-worthy failures, which we'll increment and pass back to the caller - # @param max_retries [Integer]: Maximum number of retries to attempt; we'll raise an exception if this is exceeded - # @param reboot_on_problems [Boolean]: Whether we should try to reboot a "stuck" machine - # @param retry_interval [Integer]: How many seconds to wait before returning for another attempt - def handleWindowsFail(e, retries, rebootable_fails, max_retries: 30, reboot_on_problems: false, retry_interval: 45) - msg = "WinRM connection to https://"+@mu_name+":5986/wsman: #{e.message}, waiting #{retry_interval}s (attempt #{retries}/#{max_retries})" - if e.class.name == "WinRM::WinRMAuthorizationError" or e.message.match(/execution expired/) and reboot_on_problems - if rebootable_fails > 0 and (rebootable_fails % 7) == 0 - MU.log "#{@mu_name} still misbehaving, forcing Stop and Start from API", MU::WARN - reboot(true) # vicious API stop/start - sleep retry_interval*3 - rebootable_fails = 0 - else - if rebootable_fails == 5 - MU.log "#{@mu_name} misbehaving, attempting to reboot from API", MU::WARN - reboot # graceful API restart - sleep retry_interval*2 - end - rebootable_fails = rebootable_fails + 1 - end - end - if retries < max_retries - if retries == 1 or (retries/max_retries <= 0.5 and (retries % 3) == 0 and retries != 0) - MU.log msg, MU::NOTICE - elsif retries/max_retries > 0.5 - MU.log msg, MU::WARN, details: e.inspect - end - sleep retry_interval - retries = retries + 1 - else - raise MuError, "#{@mu_name}: #{e.inspect} trying to connect with WinRM, max_retries exceeded", e.backtrace - end - return [retries, rebootable_fails] - end - - def windowsRebootPending?(shell = nil) - if shell.nil? - shell = getWinRMSession(1, 30) - end -# if (Get-Item "HKLM:/SOFTWARE/Microsoft/Windows/CurrentVersion/WindowsUpdate/Auto Update/RebootRequired" -EA Ignore) { exit 1 } - cmd = %Q{ - if (Get-ChildItem "HKLM:/Software/Microsoft/Windows/CurrentVersion/Component Based Servicing/RebootPending" -EA Ignore) { - echo "Component Based Servicing/RebootPending is true" - exit 1 - } - if (Get-ItemProperty "HKLM:/SYSTEM/CurrentControlSet/Control/Session Manager" -Name PendingFileRenameOperations -EA Ignore) { - echo "Control/Session Manager/PendingFileRenameOperations is true" - exit 1 - } - try { - $util = [wmiclass]"\\\\.\\root\\ccm\\clientsdk:CCM_ClientUtilities" - $status = $util.DetermineIfRebootPending() - if(($status -ne $null) -and $status.RebootPending){ - echo "WMI says RebootPending is true" - exit 1 - } - } catch { - exit 0 - } - exit 0 - } - resp = shell.run(cmd) - returnval = resp.exitcode == 0 ? false : true - shell.close - returnval - end - - # Basic setup tasks performed on a new node during its first WinRM - # connection. Most of this is terrible Windows glue. - # @param shell [WinRM::Shells::Powershell]: An active Powershell session to the new node. - def initialWinRMTasks(shell) - retries = 0 - rebootable_fails = 0 - begin - if !@config['use_cloud_provider_windows_password'] - pw = @groomer.getSecret( - vault: @config['mu_name'], - item: "windows_credentials", - field: "password" - ) - win_check_for_pw = %Q{Add-Type -AssemblyName System.DirectoryServices.AccountManagement; $Creds = (New-Object System.Management.Automation.PSCredential("#{@config["windows_admin_username"]}", (ConvertTo-SecureString "#{pw}" -AsPlainText -Force)));$DS = New-Object System.DirectoryServices.AccountManagement.PrincipalContext([System.DirectoryServices.AccountManagement.ContextType]::Machine); $DS.ValidateCredentials($Creds.GetNetworkCredential().UserName, $Creds.GetNetworkCredential().password); echo $Result} - resp = shell.run(win_check_for_pw) - if resp.stdout.chomp != "True" - win_set_pw = %Q{(([adsi]('WinNT://./#{@config["windows_admin_username"]}, user')).psbase.invoke('SetPassword', '#{pw}'))} - resp = shell.run(win_set_pw) - puts resp.stdout - MU.log "Resetting Windows host password", MU::NOTICE, details: resp.stdout - end - end - - # Install Cygwin here, because for some reason it breaks inside Chef - # XXX would love to not do this here - pkgs = ["bash", "mintty", "vim", "curl", "openssl", "wget", "lynx", "openssh"] - admin_home = "c:/bin/cygwin/home/#{@config["windows_admin_username"]}" - install_cygwin = %Q{ - If (!(Test-Path "c:/bin/cygwin/Cygwin.bat")){ - $WebClient = New-Object System.Net.WebClient - $WebClient.DownloadFile("http://cygwin.com/setup-x86_64.exe","$env:Temp/setup-x86_64.exe") - Start-Process -wait -FilePath $env:Temp/setup-x86_64.exe -ArgumentList "-q -n -l $env:Temp/cygwin -R c:/bin/cygwin -s http://mirror.cs.vt.edu/pub/cygwin/cygwin/ -P #{pkgs.join(',')}" - } - if(!(Test-Path #{admin_home})){ - New-Item -type directory -path #{admin_home} - } - if(!(Test-Path #{admin_home}/.ssh)){ - New-Item -type directory -path #{admin_home}/.ssh - } - if(!(Test-Path #{admin_home}/.ssh/authorized_keys)){ - New-Item #{admin_home}/.ssh/authorized_keys -type file -force -value "#{@deploy.ssh_public_key}" - } - } - resp = shell.run(install_cygwin) - if resp.exitcode != 0 - MU.log "Failed at installing Cygwin", MU::ERR, details: resp - end - - hostname = nil - if !@config['active_directory'].nil? - if @config['active_directory']['node_type'] == "domain_controller" && @config['active_directory']['domain_controller_hostname'] - hostname = @config['active_directory']['domain_controller_hostname'] - @mu_windows_name = hostname - else - # Do we have an AD specific hostname? - hostname = @mu_windows_name - end - else - hostname = @mu_windows_name - end - resp = shell.run(%Q{hostname}) - - if resp.stdout.chomp != hostname - resp = shell.run(%Q{Rename-Computer -NewName '#{hostname}' -Force -PassThru -Restart; Restart-Computer -Force}) - MU.log "Renaming Windows host to #{hostname}; this will trigger a reboot", MU::NOTICE, details: resp.stdout - reboot(true) - sleep 30 - end - rescue WinRM::WinRMError, HTTPClient::ConnectTimeoutError => e - retries, rebootable_fails = handleWindowsFail(e, retries, rebootable_fails, max_retries: 10, reboot_on_problems: true, retry_interval: 30) - retry - end - end - - - # Basic setup tasks performed on a new node during its first initial - # ssh connection. Most of this is terrible Windows glue. - # @param ssh [Net::SSH::Connection::Session]: The active SSH session to the new node. - def initialSSHTasks(ssh) - win_env_fix = %q{echo 'export PATH="$PATH:/cygdrive/c/opscode/chef/embedded/bin"' > "$HOME/chef-client"; echo 'prev_dir="`pwd`"; for __dir in /proc/registry/HKEY_LOCAL_MACHINE/SYSTEM/CurrentControlSet/Control/Session\ Manager/Environment;do cd "$__dir"; for __var in `ls * | grep -v TEMP | grep -v TMP`;do __var=`echo $__var | tr "[a-z]" "[A-Z]"`; test -z "${!__var}" && export $__var="`cat $__var`" >/dev/null 2>&1; done; done; cd "$prev_dir"; /cygdrive/c/opscode/chef/bin/chef-client.bat $@' >> "$HOME/chef-client"; chmod 700 "$HOME/chef-client"; ( grep "^alias chef-client=" "$HOME/.bashrc" || echo 'alias chef-client="$HOME/chef-client"' >> "$HOME/.bashrc" ) ; ( grep "^alias mu-groom=" "$HOME/.bashrc" || echo 'alias mu-groom="powershell -File \"c:/Program Files/Amazon/Ec2ConfigService/Scripts/UserScript.ps1\""' >> "$HOME/.bashrc" )} - win_installer_check = %q{ls /proc/registry/HKEY_LOCAL_MACHINE/SOFTWARE/Microsoft/Windows/CurrentVersion/Installer/} - lnx_installer_check = %q{ps auxww | awk '{print $11}' | egrep '(/usr/bin/yum|apt-get|dpkg)'} - lnx_updates_check = %q{( test -f /.mu-installer-ran-updates || ! test -d /var/lib/cloud/instance ) || echo "userdata still running"} - win_set_pw = nil - - if windows? and !@config['use_cloud_provider_windows_password'] - # This covers both the case where we have a windows password passed from a vault and where we need to use a a random Windows Admin password generated by MU::Cloud::Server.generateWindowsPassword - pw = @groomer.getSecret( - vault: @config['mu_name'], - item: "windows_credentials", - field: "password" - ) - win_check_for_pw = %Q{powershell -Command '& {Add-Type -AssemblyName System.DirectoryServices.AccountManagement; $Creds = (New-Object System.Management.Automation.PSCredential("#{@config["windows_admin_username"]}", (ConvertTo-SecureString "#{pw}" -AsPlainText -Force)));$DS = New-Object System.DirectoryServices.AccountManagement.PrincipalContext([System.DirectoryServices.AccountManagement.ContextType]::Machine); $DS.ValidateCredentials($Creds.GetNetworkCredential().UserName, $Creds.GetNetworkCredential().password); echo $Result}'} - win_set_pw = %Q{powershell -Command "& {(([adsi]('WinNT://./#{@config["windows_admin_username"]}, user')).psbase.invoke('SetPassword', '#{pw}'))}"} - end - - # There shouldn't be a use case where a domain joined computer goes through initialSSHTasks. Removing Active Directory specific computer rename. - set_hostname = true - hostname = nil - if !@config['active_directory'].nil? - if @config['active_directory']['node_type'] == "domain_controller" && @config['active_directory']['domain_controller_hostname'] - hostname = @config['active_directory']['domain_controller_hostname'] - @mu_windows_name = hostname - set_hostname = true - else - # Do we have an AD specific hostname? - hostname = @mu_windows_name - set_hostname = true - end - else - hostname = @mu_windows_name - end - win_check_for_hostname = %Q{powershell -Command '& {hostname}'} - win_set_hostname = %Q{powershell -Command "& {Rename-Computer -NewName '#{hostname}' -Force -PassThru -Restart; Restart-Computer -Force }"} - - begin - # Set our admin password first, if we need to - if windows? and !win_set_pw.nil? and !win_check_for_pw.nil? - output = ssh.exec!(win_check_for_pw) - raise MU::Cloud::BootstrapTempFail, "Got nil output from ssh session, waiting and retrying" if output.nil? - if !output.match(/True/) - MU.log "Setting Windows password for user #{@config['windows_admin_username']}", details: ssh.exec!(win_set_pw) - end - end - if windows? - output = ssh.exec!(win_env_fix) - output += ssh.exec!(win_installer_check) - raise MU::Cloud::BootstrapTempFail, "Got nil output from ssh session, waiting and retrying" if output.nil? - if output.match(/InProgress/) - raise MU::Cloud::BootstrapTempFail, "Windows Installer service is still doing something, need to wait" - end - if set_hostname and !@hostname_set and @mu_windows_name - output = ssh.exec!(win_check_for_hostname) - raise MU::Cloud::BootstrapTempFail, "Got nil output from ssh session, waiting and retrying" if output.nil? - if !output.match(/#{@mu_windows_name}/) - MU.log "Setting Windows hostname to #{@mu_windows_name}", details: ssh.exec!(win_set_hostname) - @hostname_set = true - # Reboot from the API too, in case Windows is flailing - if !@cloudobj.nil? - @cloudobj.reboot - else - reboot - end - raise MU::Cloud::BootstrapTempFail, "Set hostname in Windows, waiting for reboot" - end - end - else - output = ssh.exec!(lnx_installer_check) - if !output.nil? and !output.empty? - raise MU::Cloud::BootstrapTempFail, "Linux package manager is still doing something, need to wait (#{output})" - end - if !@config['skipinitialupdates'] and - !@config['scrub_mu_isms'] and - !@config['userdata_script'] - output = ssh.exec!(lnx_updates_check) - if !output.nil? and output.match(/userdata still running/) - raise MU::Cloud::BootstrapTempFail, "Waiting for initial userdata system updates to complete" - end - end - end - rescue RuntimeError => e - raise MU::Cloud::BootstrapTempFail, "Got #{e.inspect} performing initial SSH connect tasks, will try again" - end - - end - - # Get a privileged Powershell session on the server in question, using SSL-encrypted WinRM with certificate authentication. - # @param max_retries [Integer]: - # @param retry_interval [Integer]: - # @param timeout [Integer]: - # @param winrm_retries [Integer]: - # @param reboot_on_problems [Boolean]: - def getWinRMSession(max_retries = 40, retry_interval = 60, timeout: 30, winrm_retries: 2, reboot_on_problems: false) - _nat_ssh_key, _nat_ssh_user, _nat_ssh_host, canonical_ip, _ssh_user, _ssh_key_name = getSSHConfig - @mu_name ||= @config['mu_name'] - - shell = nil - opts = nil - # and now, a thing I really don't want to do - MU::Master.addInstanceToEtcHosts(canonical_ip, @mu_name) - - # catch exceptions that circumvent our regular call stack - Thread.abort_on_exception = false - Thread.handle_interrupt(WinRM::WinRMWSManFault => :never) { - begin - Thread.handle_interrupt(WinRM::WinRMWSManFault => :immediate) { - MU.log "(Probably harmless) Caught a WinRM::WinRMWSManFault in #{Thread.current.inspect}", MU::DEBUG, details: Thread.current.backtrace - } - ensure - # Reraise something useful - end - } - - retries = 0 - rebootable_fails = 0 - begin - loglevel = retries > 4 ? MU::NOTICE : MU::DEBUG - MU.log "Calling WinRM on #{@mu_name}", loglevel, details: opts - opts = { - retry_limit: winrm_retries, - no_ssl_peer_verification: true, # XXX this should not be necessary; we get 'hostname "foo" does not match the server certificate' even when it clearly does match - ca_trust_path: "#{MU.mySSLDir}/Mu_CA.pem", - transport: :ssl, - operation_timeout: timeout, - } - if retries % 2 == 0 # NTLM password over https - opts[:endpoint] = 'https://'+canonical_ip+':5986/wsman' - opts[:user] = @config['windows_admin_username'] - opts[:password] = getWindowsAdminPassword - else # certificate auth over https - opts[:endpoint] = 'https://'+@mu_name+':5986/wsman' - opts[:client_cert] = "#{MU.mySSLDir}/#{@mu_name}-winrm.crt" - opts[:client_key] = "#{MU.mySSLDir}/#{@mu_name}-winrm.key" - end - conn = WinRM::Connection.new(opts) - conn.logger.level = :debug if retries > 2 - MU.log "WinRM connection to #{@mu_name} created", MU::DEBUG, details: conn - shell = conn.shell(:powershell) - shell.run('ipconfig') # verify that we can do something - rescue Errno::EHOSTUNREACH, Errno::ECONNREFUSED, HTTPClient::ConnectTimeoutError, OpenSSL::SSL::SSLError, SocketError, WinRM::WinRMError, Timeout::Error => e - retries, rebootable_fails = handleWindowsFail(e, retries, rebootable_fails, max_retries: max_retries, reboot_on_problems: reboot_on_problems, retry_interval: retry_interval) - retry - ensure - MU::Master.removeInstanceFromEtcHosts(@mu_name) - end - - shell - end - - # @param max_retries [Integer]: Number of connection attempts to make before giving up - # @param retry_interval [Integer]: Number of seconds to wait between connection attempts - # @return [Net::SSH::Connection::Session] - def getSSHSession(max_retries = 12, retry_interval = 30) - ssh_keydir = Etc.getpwnam(@deploy.mu_user).dir+"/.ssh" - nat_ssh_key, nat_ssh_user, nat_ssh_host, canonical_ip, ssh_user, _ssh_key_name = getSSHConfig - session = nil - retries = 0 - - vpc_class = Object.const_get("MU").const_get("Cloud").const_get(@cloud).const_get("VPC") - - # XXX WHY is this a thing - Thread.handle_interrupt(Errno::ECONNREFUSED => :never) { - } - - begin - MU::Cloud.handleNetSSHExceptions - if !nat_ssh_host.nil? - proxy_cmd = "ssh -q -o StrictHostKeyChecking=no -W %h:%p #{nat_ssh_user}@#{nat_ssh_host}" - MU.log "Attempting SSH to #{canonical_ip} (#{@mu_name}) as #{ssh_user} with key #{@deploy.ssh_key_name} using proxy '#{proxy_cmd}'" if retries == 0 - proxy = Net::SSH::Proxy::Command.new(proxy_cmd) - session = Net::SSH.start( - canonical_ip, - ssh_user, - :config => false, - :keys_only => true, - :keys => [ssh_keydir+"/"+nat_ssh_key, ssh_keydir+"/"+@deploy.ssh_key_name], - :verify_host_key => false, - # :verbose => :info, - :host_key => "ssh-rsa", - :port => 22, - :auth_methods => ['publickey'], - :proxy => proxy - ) - else - - MU.log "Attempting SSH to #{canonical_ip} (#{@mu_name}) as #{ssh_user} with key #{ssh_keydir}/#{@deploy.ssh_key_name}" if retries == 0 - session = Net::SSH.start( - canonical_ip, - ssh_user, - :config => false, - :keys_only => true, - :keys => [ssh_keydir+"/"+@deploy.ssh_key_name], - :verify_host_key => false, - # :verbose => :info, - :host_key => "ssh-rsa", - :port => 22, - :auth_methods => ['publickey'] - ) - end - retries = 0 - rescue Net::SSH::HostKeyMismatch => e - MU.log("Remembering new key: #{e.fingerprint}") - e.remember_host! - session.close - retry -# rescue SystemCallError, Timeout::Error, Errno::ECONNRESET, Errno::EHOSTUNREACH, Net::SSH::Proxy::ConnectError, SocketError, Net::SSH::Disconnect, Net::SSH::AuthenticationFailed, IOError, Net::SSH::ConnectionTimeout, Net::SSH::Proxy::ConnectError, MU::Cloud::NetSSHFail => e - rescue SystemExit, Timeout::Error, Net::SSH::AuthenticationFailed, Net::SSH::Disconnect, Net::SSH::ConnectionTimeout, Net::SSH::Proxy::ConnectError, Net::SSH::Exception, Errno::ECONNRESET, Errno::EHOSTUNREACH, Errno::ECONNREFUSED, Errno::EPIPE, SocketError, IOError => e - begin - session.close if !session.nil? - rescue Net::SSH::Disconnect, IOError => e - if windows? - MU.log "Windows has probably closed the ssh session before we could. Waiting before trying again", MU::NOTICE - else - MU.log "ssh session was closed unexpectedly, waiting before trying again", MU::NOTICE - end - sleep 10 - end - - if retries < max_retries - retries = retries + 1 - msg = "ssh #{ssh_user}@#{@mu_name}: #{e.message}, waiting #{retry_interval}s (attempt #{retries}/#{max_retries})" - if retries == 1 or (retries/max_retries <= 0.5 and (retries % 3) == 0) - MU.log msg, MU::NOTICE - if !vpc_class.haveRouteToInstance?(cloud_desc, credentials: @credentials) and - canonical_ip.match(/(^127\.)|(^192\.168\.)|(^10\.)|(^172\.1[6-9]\.)|(^172\.2[0-9]\.)|(^172\.3[0-1]\.)|(^::1$)|(^[fF][cCdD])/) and - !nat_ssh_host - MU.log "Node #{@mu_name} at #{canonical_ip} looks like it's in a private address space, and I don't appear to have a direct route to it. It may not be possible to connect with this routing!", MU::WARN - end - elsif retries/max_retries > 0.5 - MU.log msg, MU::WARN, details: e.inspect - end - sleep retry_interval - retry - else - raise MuError, "#{@mu_name}: #{e.inspect} trying to connect with SSH, max_retries exceeded", e.backtrace - end - end - return session - end - end - - # Wrapper for the cleanup class method of underlying cloud object implementations. - def self.cleanup(*flags) - ok = true - params = flags.first - clouds = MU::Cloud.supportedClouds - if params[:cloud] - clouds = [params[:cloud]] - params.delete(:cloud) - end - - clouds.each { |cloud| - begin - cloudclass = MU::Cloud.loadCloudType(cloud, shortname) - - if cloudclass.isGlobal? - params.delete(:region) - end - - raise MuCloudResourceNotImplemented if !cloudclass.respond_to?(:cleanup) or cloudclass.method(:cleanup).owner.to_s != "#" - MU.log "Invoking #{cloudclass}.cleanup from #{shortname}", MU::DEBUG, details: flags - cloudclass.cleanup(params) - rescue MuCloudResourceNotImplemented - MU.log "No #{cloud} implementation of #{shortname}.cleanup, skipping", MU::DEBUG, details: flags - rescue StandardError => e - in_msg = cloud - if params and params[:region] - in_msg += " "+params[:region] - end - if params and params[:flags] and params[:flags]["project"] and !params[:flags]["project"].empty? - in_msg += " project "+params[:flags]["project"] - end - MU.log "Skipping #{shortname} cleanup method in #{in_msg} due to #{e.class.name}: #{e.message}", MU::WARN, details: e.backtrace - ok = false - end - } - MU::MommaCat.unlockAll - - ok - end - - # A hook that is always called just before each instance method is - # invoked, so that we can ensure that repetitive setup tasks (like - # resolving +:resource_group+ for Azure resources) have always been - # done. - def resourceInitHook - @cloud ||= cloud - if @cloudparentclass.respond_to?(:resourceInitHook) - @cloudparentclass.resourceInitHook(@cloudobj, @deploy) - end - end - - # Wrap the instance methods that this cloud resource type has to - # implement. - MU::Cloud.resource_types[name.to_sym][:instance].each { |method| - define_method method do |*args| - return nil if @cloudobj.nil? - MU.log "Invoking #{@cloudobj}.#{method}", MU::DEBUG - - # Go ahead and guarantee that we can't accidentally trigger these - # methods recursively. - @method_semaphore.synchronize { - # We're looking for recursion, not contention, so ignore some - # obviously harmless things. - if @method_locks.has_key?(method) and method != :findBastion and method != :cloud_id - MU.log "Double-call to cloud method #{method} for #{self}", MU::DEBUG, details: caller + ["competing call stack:"] + @method_locks[method] - end - @method_locks[method] = caller - } - - # Make sure the describe() caches are fresh - @cloudobj.describe if method != :describe - - # Don't run through dependencies on simple attr_reader lookups - if ![:dependencies, :cloud_id, :config, :mu_name].include?(method) - @cloudobj.dependencies - end - - retval = nil - if !args.nil? and args.size == 1 - retval = @cloudobj.method(method).call(args.first) - elsif !args.nil? and args.size > 0 - retval = @cloudobj.method(method).call(*args) - else - retval = @cloudobj.method(method).call - end - if (method == :create or method == :groom or method == :postBoot) and - (!@destroyed and !@cloudobj.destroyed) - deploydata = @cloudobj.method(:notify).call - @deploydata ||= deploydata # XXX I don't remember why we're not just doing this from the get-go; maybe because we prefer some mangling occurring in @deploy.notify? - if deploydata.nil? or !deploydata.is_a?(Hash) - MU.log "#{self} notify method did not return a Hash of deployment data, attempting to fill in with cloud descriptor #{@cloudobj.cloud_id}", MU::WARN - deploydata = MU.structToHash(@cloudobj.cloud_desc) - raise MuError, "Failed to collect metadata about #{self}" if deploydata.nil? - end - deploydata['cloud_id'] ||= @cloudobj.cloud_id if !@cloudobj.cloud_id.nil? - deploydata['mu_name'] = @cloudobj.mu_name if !@cloudobj.mu_name.nil? - deploydata['nodename'] = @cloudobj.mu_name if !@cloudobj.mu_name.nil? - deploydata.delete("#MUOBJECT") - @deploy.notify(self.class.cfg_plural, @config['name'], deploydata, triggering_node: @cloudobj, delayed_save: @delayed_save) if !@deploy.nil? - elsif method == :notify - retval['cloud_id'] = @cloudobj.cloud_id.to_s if !@cloudobj.cloud_id.nil? - retval['mu_name'] = @cloudobj.mu_name if !@cloudobj.mu_name.nil? - @deploy.notify(self.class.cfg_plural, @config['name'], retval, triggering_node: @cloudobj, delayed_save: @delayed_save) if !@deploy.nil? - end - @method_semaphore.synchronize { - @method_locks.delete(method) - } - - @deploydata = @cloudobj.deploydata - @config = @cloudobj.config - retval - end - } # end instance method list - } # end dynamic class generation block - } # end resource type iteration + require 'mu/cloud/machine_images' + require 'mu/cloud/resource_base' + require 'mu/cloud/providers' end diff --git a/modules/mu/cloud/machine_images.rb b/modules/mu/cloud/machine_images.rb new file mode 100644 index 000000000..02fb841fc --- /dev/null +++ b/modules/mu/cloud/machine_images.rb @@ -0,0 +1,212 @@ +# Copyright:: Copyright (c) 2020 eGlobalTech, Inc., all rights reserved +# +# Licensed under the BSD-3 license (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License in the root of the project or at +# +# http://egt-labs.com/mu/LICENSE.html +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +module MU + # Plugins under this namespace serve as interfaces to cloud providers and + # other provisioning layers. + class Cloud + + # The public AWS S3 bucket where we expect to find YAML files listing our + # standard base images for various platforms. + BASE_IMAGE_BUCKET = "cloudamatic" + # The path in the AWS S3 bucket where we expect to find YAML files listing + # our standard base images for various platforms. + BASE_IMAGE_PATH = "/images" + + # Aliases for platform names, in case we don't have actual images built for + # them. + PLATFORM_ALIASES = { + "linux" => "centos7", + "windows" => "win2k12r2", + "win2k12" => "win2k12r2", + "ubuntu" => "ubuntu16", + "centos" => "centos7", + "rhel7" => "rhel71", + "rhel" => "rhel71", + "amazon" => "amazon2016" + } + + @@image_fetch_cache = {} + @@platform_cache = [] + @@image_fetch_semaphore = Mutex.new + + # Rifle our image lists from {MU::Cloud.getStockImage} and return a list + # of valid +platform+ names. + # @return [Array] + def self.listPlatforms + return @@platform_cache if @@platform_cache and !@@platform_cache.empty? + @@platform_cache = MU::Cloud.supportedClouds.map { |cloud| + begin + resourceClass(cloud, :Server) + rescue MU::Cloud::MuCloudResourceNotImplemented, MU::MuError + next + end + + images = MU::Cloud.getStockImage(cloud, quiet: true) + if images + images.keys + else + nil + end + }.flatten.uniq + @@platform_cache.delete(nil) + @@platform_cache.sort + @@platform_cache + end + + # Locate a base image for a {MU::Cloud::Server} resource. First we check + # Mu's public bucket, which should list the latest and greatest. If we can't + # fetch that, then we fall back to a YAML file that's bundled as part of Mu, + # but which will typically be less up-to-date. + # @param cloud [String]: The cloud provider for which to return an image list + # @param platform [String]: The supported platform for which to return an image or images. If not specified, we'll return our entire library for the appropriate cloud provider. + # @param region [String]: The region for which the returned image or images should be supported, for cloud providers which require it (such as AWS). + # @param fail_hard [Boolean]: Raise an exception on most errors, such as an inability to reach our public listing, lack of matching images, etc. + # @return [Hash,String,nil] + def self.getStockImage(cloud = MU::Config.defaultCloud, platform: nil, region: nil, fail_hard: false, quiet: false) + + if !MU::Cloud.supportedClouds.include?(cloud) + MU.log "'#{cloud}' is not a supported cloud provider! Available providers:", MU::ERR, details: MU::Cloud.supportedClouds + raise MuError, "'#{cloud}' is not a supported cloud provider!" + end + + urls = ["http://"+BASE_IMAGE_BUCKET+".s3-website-us-east-1.amazonaws.com"+BASE_IMAGE_PATH] + if $MU_CFG and $MU_CFG['custom_images_url'] + urls << $MU_CFG['custom_images_url'] + end + + images = nil + urls.each { |base_url| + @@image_fetch_semaphore.synchronize { + if @@image_fetch_cache[cloud] and (Time.now - @@image_fetch_cache[cloud]['time']) < 30 + images = @@image_fetch_cache[cloud]['contents'].dup + else + begin + Timeout.timeout(2) do + response = open("#{base_url}/#{cloud}.yaml").read + images ||= {} + images.deep_merge!(YAML.load(response)) + break + end + rescue StandardError => e + if fail_hard + raise MuError, "Failed to fetch stock images from #{base_url}/#{cloud}.yaml (#{e.message})" + else + MU.log "Failed to fetch stock images from #{base_url}/#{cloud}.yaml (#{e.message})", MU::WARN if !quiet + end + end + end + } + } + + @@image_fetch_semaphore.synchronize { + @@image_fetch_cache[cloud] = { + 'contents' => images.dup, + 'time' => Time.now + } + } + + backwards_compat = { + "AWS" => "amazon_images", + "Google" => "google_images", + } + + # Load from inside our repository, if we didn't get images elsewise + if images.nil? + [backwards_compat[cloud], cloud].each { |file| + next if file.nil? + if File.exist?("#{MU.myRoot}/modules/mu/defaults/#{file}.yaml") + images = YAML.load(File.read("#{MU.myRoot}/modules/mu/defaults/#{file}.yaml")) + break + end + } + end + + # Now overlay local overrides, both of the systemwide (/opt/mu/etc) and + # per-user (~/.mu/etc) variety. + [backwards_compat[cloud], cloud].each { |file| + next if file.nil? + if File.exist?("#{MU.etcDir}/#{file}.yaml") + images ||= {} + images.deep_merge!(YAML.load(File.read("#{MU.etcDir}/#{file}.yaml"))) + end + if Process.uid != 0 + basepath = Etc.getpwuid(Process.uid).dir+"/.mu/etc" + if File.exist?("#{basepath}/#{file}.yaml") + images ||= {} + images.deep_merge!(YAML.load(File.read("#{basepath}/#{file}.yaml"))) + end + end + } + + if images.nil? + if fail_hard + raise MuError, "Failed to find any base images for #{cloud}" + else + MU.log "Failed to find any base images for #{cloud}", MU::WARN if !quiet + return nil + end + end + + PLATFORM_ALIASES.each_pair { |a, t| + if images[t] and !images[a] + images[a] = images[t] + end + } + + if platform + if !images[platform] + if fail_hard + raise MuError, "No base image for platform #{platform} in cloud #{cloud}" + else + MU.log "No base image for platform #{platform} in cloud #{cloud}", MU::WARN if !quiet + return nil + end + end + images = images[platform] + + if region + # We won't fuss about the region argument if this isn't a cloud that + # has regions, just quietly don't bother. + if images.is_a?(Hash) + if images[region] + images = images[region] + else + if fail_hard + raise MuError, "No base image for platform #{platform} in cloud #{cloud} region #{region} found" + else + MU.log "No base image for platform #{platform} in cloud #{cloud} region #{region} found", MU::WARN if !quiet + return nil + end + end + end + end + else + if region + images.values.each { |regions| + # Filter to match our requested region, but for all the platforms, + # since we didn't specify one. + if regions.is_a?(Hash) + regions.delete_if { |r| r != region } + end + } + end + end + + images + end + + end + +end diff --git a/modules/mu/cloud/providers.rb b/modules/mu/cloud/providers.rb new file mode 100644 index 000000000..dc101b2c8 --- /dev/null +++ b/modules/mu/cloud/providers.rb @@ -0,0 +1,81 @@ +# Copyright:: Copyright (c) 2020 eGlobalTech, Inc., all rights reserved +# +# Licensed under the BSD-3 license (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License in the root of the project or at +# +# http://egt-labs.com/mu/LICENSE.html +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +module MU + # Plugins under this namespace serve as interfaces to cloud providers and + # other provisioning layers. + class Cloud + + # List of known/supported Cloud providers. This may be modified at runtime + # if an implemention is defective or missing required methods. + @@supportedCloudList = ['AWS', 'CloudFormation', 'Google', 'Azure'] + + # List of known/supported Cloud providers + # @return [Array] + def self.supportedClouds + @@supportedCloudList + end + + # Raise an exception if the cloud provider specified isn't valid + def self.cloudClass(cloud) + if cloud.nil? or !supportedClouds.include?(cloud.to_s) + raise MuError, "Cloud provider #{cloud} is not supported" + end + Object.const_get("MU").const_get("Cloud").const_get(cloud.to_s) + end + + # List of known/supported Cloud providers for which we have at least one + # set of credentials configured. + # @return [Array] + def self.availableClouds + available = [] + MU::Cloud.supportedClouds.each { |cloud| + begin + cloudbase = Object.const_get("MU").const_get("Cloud").const_get(cloud) + next if cloudbase.listCredentials.nil? or cloudbase.listCredentials.empty? + available << cloud + rescue NameError + end + } + + available + end + + # Raise an exception if the cloud provider specified isn't valid or we + # don't have any credentials configured for it. + def self.assertAvailableCloud(cloud) + if cloud.nil? or availableClouds.include?(cloud.to_s) + raise MuError, "Cloud provider #{cloud} is not available" + end + end + + # Load the container class for each cloud we know about, and inject autoload + # code for each of its supported resource type classes. + failed = [] + MU::Cloud.supportedClouds.each { |cloud| + require "mu/providers/#{cloud.downcase}" + cloudclass = Object.const_get("MU").const_get("Cloud").const_get(cloud) + @@generic_class_methods_toplevel.each { |method| + if !cloudclass.respond_to?(method) + MU.log "MU::Cloud::#{cloud} has not implemented required class method #{method}, disabling", MU::ERR + failed << cloud + end + } + } + failed.uniq! + @@supportedCloudList = @@supportedCloudList - failed + + end + +end diff --git a/modules/mu/cloud/resource_base.rb b/modules/mu/cloud/resource_base.rb new file mode 100644 index 000000000..562c39ff9 --- /dev/null +++ b/modules/mu/cloud/resource_base.rb @@ -0,0 +1,1089 @@ +# Copyright:: Copyright (c) 2020 eGlobalTech, Inc., all rights reserved +# +# Licensed under the BSD-3 license (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License in the root of the project or at +# +# http://egt-labs.com/mu/LICENSE.html +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +module MU + # Plugins under this namespace serve as interfaces to cloud providers and + # other provisioning layers. + class Cloud + + @@resource_types.keys.each { |name| + Object.const_get("MU").const_get("Cloud").const_get(name).class_eval { + attr_reader :cloudclass + attr_reader :cloudobj + attr_reader :destroyed + attr_reader :delayed_save + + def self.shortname + name.sub(/.*?::([^:]+)$/, '\1') + end + + def self.cfg_plural + MU::Cloud.resource_types[shortname.to_sym][:cfg_plural] + end + + def self.has_multiples + MU::Cloud.resource_types[shortname.to_sym][:has_multiples] + end + + def self.cfg_name + MU::Cloud.resource_types[shortname.to_sym][:cfg_name] + end + + def self.can_live_in_vpc + MU::Cloud.resource_types[shortname.to_sym][:can_live_in_vpc] + end + + def self.waits_on_parent_completion + MU::Cloud.resource_types[shortname.to_sym][:waits_on_parent_completion] + end + + def self.deps_wait_on_my_creation + MU::Cloud.resource_types[shortname.to_sym][:deps_wait_on_my_creation] + end + + # Print something palatable when we're called in a string context. + def to_s + fullname = "#{self.class.shortname}" + if !@cloudobj.nil? and !@cloudobj.mu_name.nil? + @mu_name ||= @cloudobj.mu_name + end + if !@mu_name.nil? and !@mu_name.empty? + fullname = fullname + " '#{@mu_name}'" + end + if !@cloud_id.nil? + fullname = fullname + " (#{@cloud_id})" + end + return fullname + end + + # Set our +deploy+ and +deploy_id+ attributes, optionally doing so even + # if they have already been set. + # + # @param mommacat [MU::MommaCat]: The deploy to which we're being told we belong + # @param force [Boolean]: Set even if we already have a deploy object + # @return [String]: Our new +deploy_id+ + def intoDeploy(mommacat, force: false) + if force or (!@deploy) + MU.log "Inserting #{self} [#{self.object_id}] into #{mommacat.deploy_id} as a #{@config['name']}", MU::DEBUG + + @deploy = mommacat + @deploy.addKitten(@cloudclass.cfg_plural, @config['name'], self) + @deploy_id = @deploy.deploy_id + @cloudobj.intoDeploy(mommacat, force: force) if @cloudobj + end + @deploy_id + end + + # Return the +virtual_name+ config field, if it is set. + # @param name [String]: If set, will only return a value if +virtual_name+ matches this string + # @return [String,nil] + def virtual_name(name = nil) + if @config and @config['virtual_name'] and + (!name or name == @config['virtual_name']) + return @config['virtual_name'] + end + nil + end + + # @param mommacat [MU::MommaCat]: The deployment containing this cloud resource + # @param mu_name [String]: Optional- specify the full Mu resource name of an existing resource to load, instead of creating a new one + # @param cloud_id [String]: Optional- specify the cloud provider's identifier for an existing resource to load, instead of creating a new one + # @param kitten_cfg [Hash]: The parse configuration for this object from {MU::Config} + def initialize(**args) + raise MuError, "Cannot invoke Cloud objects without a configuration" if args[:kitten_cfg].nil? + + # We are a parent wrapper object. Initialize our child object and + # housekeeping bits accordingly. + if self.class.name.match(/^MU::Cloud::([^:]+)$/) + @live = true + @delayed_save = args[:delayed_save] + @method_semaphore = Mutex.new + @method_locks = {} + if args[:mommacat] + MU.log "Initializing an instance of #{self.class.name} in #{args[:mommacat].deploy_id} #{mu_name}", MU::DEBUG, details: args[:kitten_cfg] + elsif args[:mu_name].nil? + raise MuError, "Can't instantiate a MU::Cloud object with a live deploy or giving us a mu_name" + else + MU.log "Initializing a detached #{self.class.name} named #{args[:mu_name]}", MU::DEBUG, details: args[:kitten_cfg] + end + + my_cloud = args[:kitten_cfg]['cloud'].to_s || MU::Config.defaultCloud + if my_cloud.nil? or !MU::Cloud.supportedClouds.include?(my_cloud) + raise MuError, "Can't instantiate a MU::Cloud object without a valid cloud (saw '#{my_cloud}')" + end + @cloudclass = MU::Cloud.resourceClass(my_cloud, self.class.shortname) + @cloudparentclass = Object.const_get("MU").const_get("Cloud").const_get(my_cloud) + @cloudobj = @cloudclass.new( + mommacat: args[:mommacat], + kitten_cfg: args[:kitten_cfg], + cloud_id: args[:cloud_id], + mu_name: args[:mu_name] + ) + raise MuError, "Unknown error instantiating #{self}" if @cloudobj.nil? +# These should actually call the method live instead of caching a static value + PUBLIC_ATTRS.each { |a| + instance_variable_set(("@"+a.to_s).to_sym, @cloudobj.send(a)) + } + @deploy ||= args[:mommacat] + @deploy_id ||= @deploy.deploy_id if @deploy + + # Register with the containing deployment + if !@deploy.nil? and !@cloudobj.mu_name.nil? and + !@cloudobj.mu_name.empty? and !args[:delay_descriptor_load] + describe # XXX is this actually safe here? + @deploy.addKitten(self.class.cfg_name, @config['name'], self) + elsif !@deploy.nil? and @cloudobj.mu_name.nil? + MU.log "#{self} in #{@deploy.deploy_id} didn't generate a mu_name after being loaded/initialized, dependencies on this resource will probably be confused!", MU::ERR, details: [caller, args.keys] + end + + # We are actually a child object invoking this via super() from its + # own initialize(), so initialize all the attributes and instance + # variables we know to be universal. + else + + class << self + # Declare attributes that everyone should have + PUBLIC_ATTRS.each { |a| + attr_reader a + } + end +# XXX this butchers ::Id and ::Ref objects that might be used by dependencies() to good effect, but we also can't expect our implementations to cope with knowing when a .to_s has to be appended to things at random + @config = MU::Config.manxify(args[:kitten_cfg]) || MU::Config.manxify(args[:config]) + + if !@config + MU.log "Missing config arguments in setInstanceVariables, can't initialize a cloud object without it", MU::ERR, details: args.keys + raise MuError, "Missing config arguments in setInstanceVariables" + end + + @deploy = args[:mommacat] || args[:deploy] + + @credentials = args[:credentials] + @credentials ||= @config['credentials'] + + @cloud = @config['cloud'] + if !@cloud + if self.class.name.match(/^MU::Cloud::([^:]+)(?:::.+|$)/) + cloudclass_name = Regexp.last_match[1] + if MU::Cloud.supportedClouds.include?(cloudclass_name) + @cloud = cloudclass_name + end + end + end + if !@cloud + raise MuError, "Failed to determine what cloud #{self} should be in!" + end + + @environment = @config['environment'] + if @deploy + @deploy_id = @deploy.deploy_id + @appname = @deploy.appname + end + + @cloudclass = MU::Cloud.resourceClass(@cloud, self.class.shortname) + @cloudparentclass = Object.const_get("MU").const_get("Cloud").const_get(@cloud) + + # A pre-existing object, you say? + if args[:cloud_id] + +# TODO implement ::Id for every cloud... and they should know how to get from +# cloud_desc to a fully-resolved ::Id object, not just the short string + + @cloud_id = args[:cloud_id] + describe(cloud_id: @cloud_id) + @habitat_id = habitat_id # effectively, cache this + + # If we can build us an ::Id object for @cloud_id instead of a + # string, do so. + begin + idclass = Object.const_get("MU").const_get("Cloud").const_get(@cloud).const_get("Id") + long_id = if @deploydata and @deploydata[idclass.idattr.to_s] + @deploydata[idclass.idattr.to_s] + elsif self.respond_to?(idclass.idattr) + self.send(idclass.idattr) + end + + @cloud_id = idclass.new(long_id) if !long_id.nil? and !long_id.empty? +# 1 see if we have the value on the object directly or in deploy data +# 2 set an attr_reader with the value +# 3 rewrite our @cloud_id attribute with a ::Id object + rescue NameError, MU::Cloud::MuCloudResourceNotImplemented + end + + end + + # Use pre-existing mu_name (we're probably loading an extant deploy) + # if available + if args[:mu_name] + @mu_name = args[:mu_name].dup + # If scrub_mu_isms is set, our mu_name is always just the bare name + # field of the resource. + elsif @config['scrub_mu_isms'] + @mu_name = @config['name'].dup +# XXX feck it insert an inheritable method right here? Set a default? How should resource implementations determine whether they're instantiating a new object? + end + + @tags = {} + if !@config['scrub_mu_isms'] + @tags = @deploy ? @deploy.listStandardTags : MU::MommaCat.listStandardTags + end + if @config['tags'] + @config['tags'].each { |tag| + @tags[tag['key']] = tag['value'] + } + end + + if @cloudparentclass.respond_to?(:resourceInitHook) + @cloudparentclass.resourceInitHook(self, @deploy) + end + + # Add cloud-specific instance methods for our resource objects to + # inherit. + if @cloudparentclass.const_defined?(:AdditionalResourceMethods) + self.extend @cloudparentclass.const_get(:AdditionalResourceMethods) + end + + if ["Server", "ServerPool"].include?(self.class.shortname) and @deploy + @mu_name ||= @deploy.getResourceName(@config['name'], need_unique_string: @config.has_key?("basis")) + if self.class.shortname == "Server" + @groomer = MU::Groomer.new(self) + end + + @groomclass = MU::Groomer.loadGroomer(@config["groomer"]) + + if windows? or @config['active_directory'] and !@mu_windows_name + if !@deploydata.nil? and !@deploydata['mu_windows_name'].nil? + @mu_windows_name = @deploydata['mu_windows_name'] + else + # Use the same random differentiator as the "real" name if we're + # from a ServerPool. Helpful for admin sanity. + unq = @mu_name.sub(/^.*?-(...)$/, '\1') + if @config['basis'] and !unq.nil? and !unq.empty? + @mu_windows_name = @deploy.getResourceName(@config['name'], max_length: 15, need_unique_string: true, use_unique_string: unq, reuse_unique_string: true) + else + @mu_windows_name = @deploy.getResourceName(@config['name'], max_length: 15, need_unique_string: true) + end + end + end + class << self + attr_reader :groomer + attr_reader :groomerclass + attr_accessor :mu_windows_name # XXX might be ok as reader now + end + end + end + + end + + def cloud + if @cloud + @cloud + elsif @config and @config['cloud'] + @config['cloud'] + elsif self.class.name.match(/^MU::Cloud::([^:]+)::.+/) + cloudclass_name = Regexp.last_match[1] + if MU::Cloud.supportedClouds.include?(cloudclass_name) + cloudclass_name + else + nil + end + else + nil + end + end + + + # Remove all metadata and cloud resources associated with this object + def destroy + if self.class.cfg_name == "server" + begin + ip = canonicalIP + MU::Master.removeIPFromSSHKnownHosts(ip) if ip + if @deploy and @deploy.deployment and + @deploy.deployment['servers'] and @config['name'] + me = @deploy.deployment['servers'][@config['name']][@mu_name] + if me + ["private_ip_address", "public_ip_address"].each { |field| + if me[field] + MU::Master.removeIPFromSSHKnownHosts(me[field]) + end + } + if me["private_ip_list"] + me["private_ip_list"].each { |private_ip| + MU::Master.removeIPFromSSHKnownHosts(private_ip) + } + end + end + end + rescue MU::MuError => e + MU.log e.message, MU::WARN + end + end + if !@cloudobj.nil? and !@cloudobj.groomer.nil? + @cloudobj.groomer.cleanup + elsif !@groomer.nil? + @groomer.cleanup + end + if !@deploy.nil? + if !@cloudobj.nil? and !@config.nil? and !@cloudobj.mu_name.nil? + @deploy.notify(self.class.cfg_plural, @config['name'], nil, mu_name: @cloudobj.mu_name, remove: true, triggering_node: @cloudobj, delayed_save: @delayed_save) + elsif !@mu_name.nil? + @deploy.notify(self.class.cfg_plural, @config['name'], nil, mu_name: @mu_name, remove: true, triggering_node: self, delayed_save: @delayed_save) + end + @deploy.removeKitten(self) + end + # Make sure that if notify gets called again it won't go returning a + # bunch of now-bogus metadata. + @destroyed = true + if !@cloudobj.nil? + def @cloudobj.notify + {} + end + else + def notify + {} + end + end + end + + # Return the cloud object's idea of where it lives (project, account, + # etc) in the form of an identifier. If not applicable for this object, + # we expect to return +nil+. + # @return [String,nil] + def habitat(nolookup: true) + return nil if ["folder", "habitat"].include?(self.class.cfg_name) + if @cloudobj + @cloudparentclass.habitat(@cloudobj, nolookup: nolookup, deploy: @deploy) + else + @cloudparentclass.habitat(self, nolookup: nolookup, deploy: @deploy) + end + end + + def habitat_id(nolookup: false) + @habitat_id ||= habitat(nolookup: nolookup) + @habitat_id + end + + # We're fundamentally a wrapper class, so go ahead and reroute requests + # that are meant for our wrapped object. + def method_missing(method_sym, *arguments) + if @cloudobj + MU.log "INVOKING #{method_sym.to_s} FROM PARENT CLOUD OBJECT #{self}", MU::DEBUG, details: arguments + @cloudobj.method(method_sym).call(*arguments) + else + raise NoMethodError, "No such instance method #{method_sym.to_s} available on #{self.class.name}" + end + end + + # Merge the passed hash into the existing configuration hash of this + # cloud object. Currently this is only used by the {MU::Adoption} + # module. I don't love exposing this to the whole internal API, but I'm + # probably overthinking that. + # @param newcfg [Hash] + def config!(newcfg) + @config.merge!(newcfg) + end + + def cloud_desc(use_cache: true) + describe + + if !@cloudobj.nil? + if @cloudobj.class.instance_methods(false).include?(:cloud_desc) + @cloud_desc_cache ||= @cloudobj.cloud_desc + end + end + if !@config.nil? and !@cloud_id.nil? and (!use_cache or @cloud_desc_cache.nil?) + # The find() method should be returning a Hash with the cloud_id + # as a key and a cloud platform descriptor as the value. + begin + args = { + :region => @config['region'], + :cloud => @config['cloud'], + :cloud_id => @cloud_id, + :credentials => @credentials, + :project => habitat_id, # XXX this belongs in our required_instance_methods hack + :flags => @config + } + @cloudparentclass.required_instance_methods.each { |m| +# if respond_to?(m) +# args[m] = method(m).call +# else + args[m] = instance_variable_get(("@"+m.to_s).to_sym) +# end + } + + matches = self.class.find(args) + if !matches.nil? and matches.is_a?(Hash) +# XXX or if the hash is keyed with an ::Id element, oh boy +# puts matches[@cloud_id][:self_link] +# puts matches[@cloud_id][:url] +# if matches[@cloud_id][:self_link] +# @url ||= matches[@cloud_id][:self_link] +# elsif matches[@cloud_id][:url] +# @url ||= matches[@cloud_id][:url] +# elsif matches[@cloud_id][:arn] +# @arn ||= matches[@cloud_id][:arn] +# end + if matches[@cloud_id] + @cloud_desc_cache = matches[@cloud_id] + else + matches.each_pair { |k, v| # flatten out ::Id objects just in case + if @cloud_id.to_s == k.to_s + @cloud_desc_cache = v + break + end + } + end + end + + if !@cloud_desc_cache + MU.log "cloud_desc via #{self.class.name}.find() failed to locate a live object.\nWas called by #{caller[0]}", MU::WARN, details: args + end + rescue StandardError => e + MU.log "Got #{e.inspect} trying to find cloud handle for #{self.class.shortname} #{@mu_name} (#{@cloud_id})", MU::WARN + raise e + end + end + + return @cloud_desc_cache + end + + # Retrieve all of the known metadata for this resource. + # @param cloud_id [String]: The cloud platform's identifier for the resource we're describing. Makes lookups more efficient. + # @return [Array]: mu_name, config, deploydata + def describe(cloud_id: nil) + if cloud_id.nil? and !@cloudobj.nil? + @cloud_id ||= @cloudobj.cloud_id + end + res_type = self.class.cfg_plural + res_name = @config['name'] if !@config.nil? + @credentials ||= @config['credentials'] if !@config.nil? + deploydata = nil + if !@deploy.nil? and @deploy.is_a?(MU::MommaCat) and + !@deploy.deployment.nil? and + !@deploy.deployment[res_type].nil? and + !@deploy.deployment[res_type][res_name].nil? + deploydata = @deploy.deployment[res_type][res_name] + else + # XXX This should only happen on a brand new resource, but we should + # probably complain under other circumstances, if we can + # differentiate them. + end + + if self.class.has_multiples and !@mu_name.nil? and deploydata.is_a?(Hash) and deploydata.has_key?(@mu_name) + @deploydata = deploydata[@mu_name] + elsif deploydata.is_a?(Hash) + @deploydata = deploydata + end + + if @cloud_id.nil? and @deploydata.is_a?(Hash) + if @mu_name.nil? and @deploydata.has_key?('#MU_NAME') + @mu_name = @deploydata['#MU_NAME'] + end + if @deploydata.has_key?('cloud_id') + @cloud_id ||= @deploydata['cloud_id'] + end + end + + return [@mu_name, @config, @deploydata] + end + + # Fetch MU::Cloud objects for each of this object's dependencies, and + # return in an easily-navigable Hash. This can include things listed in + # @config['dependencies'], implicitly-defined dependencies such as + # add_firewall_rules or vpc stanzas, and may refer to objects internal + # to this deployment or external. Will populate the instance variables + # @dependencies (general dependencies, which can only be sibling + # resources in this deployment), as well as for certain config stanzas + # which can refer to external resources (@vpc, @loadbalancers, + # @add_firewall_rules) + def dependencies(use_cache: false, debug: false) + @dependencies ||= {} + @loadbalancers ||= [] + @firewall_rules ||= [] + + if @config.nil? + return [@dependencies, @vpc, @loadbalancers] + end + if use_cache and @dependencies.size > 0 + return [@dependencies, @vpc, @loadbalancers] + end + @config['dependencies'] = [] if @config['dependencies'].nil? + + loglevel = debug ? MU::NOTICE : MU::DEBUG + + # First, general dependencies. These should all be fellow members of + # the current deployment. + @config['dependencies'].each { |dep| + @dependencies[dep['type']] ||= {} + next if @dependencies[dep['type']].has_key?(dep['name']) + handle = @deploy.findLitterMate(type: dep['type'], name: dep['name']) if !@deploy.nil? + if !handle.nil? + MU.log "Loaded dependency for #{self}: #{dep['name']} => #{handle}", loglevel + @dependencies[dep['type']][dep['name']] = handle + else + # XXX yell under circumstances where we should expect to have + # our stuff available already? + end + } + + # Special dependencies: my containing VPC + if self.class.can_live_in_vpc and !@config['vpc'].nil? + @config['vpc']["id"] ||= @config['vpc']["vpc_id"] # old deploys + @config['vpc']["name"] ||= @config['vpc']["vpc_name"] # old deploys + # If something hash-ified a MU::Config::Ref here, fix it + if !@config['vpc']["id"].nil? and @config['vpc']["id"].is_a?(Hash) + @config['vpc']["id"] = MU::Config::Ref.new(@config['vpc']["id"]) + end + if !@config['vpc']["id"].nil? + if @config['vpc']["id"].is_a?(MU::Config::Ref) and !@config['vpc']["id"].kitten.nil? + @vpc = @config['vpc']["id"].kitten(@deploy) + else + if @config['vpc']['habitat'] + @config['vpc']['habitat'] = MU::Config::Ref.get(@config['vpc']['habitat']) + end + vpc_ref = MU::Config::Ref.get(@config['vpc']) + @vpc = vpc_ref.kitten(@deploy) + end + elsif !@config['vpc']["name"].nil? and @deploy + MU.log "Attempting findLitterMate on VPC for #{self}", loglevel, details: @config['vpc'] + + sib_by_name = @deploy.findLitterMate(name: @config['vpc']['name'], type: "vpcs", return_all: true, habitat: @config['vpc']['project'], debug: debug) + if sib_by_name.is_a?(Array) + if sib_by_name.size == 1 + @vpc = matches.first + MU.log "Single VPC match for #{self}", loglevel, details: @vpc.to_s + else +# XXX ok but this is the wrong place for this really the config parser needs to sort this out somehow + # we got multiple matches, try to pick one by preferred subnet + # behavior + MU.log "Sorting a bunch of VPC matches for #{self}", loglevel, details: sib_by_name.map { |s| s.to_s }.join(", ") + sib_by_name.each { |sibling| + all_private = sibling.subnets.map { |s| s.private? }.all?(true) + all_public = sibling.subnets.map { |s| s.private? }.all?(false) + names = sibling.subnets.map { |s| s.name } + ids = sibling.subnets.map { |s| s.cloud_id } + if all_private and ["private", "all_private"].include?(@config['vpc']['subnet_pref']) + @vpc = sibling + break + elsif all_public and ["public", "all_public"].include?(@config['vpc']['subnet_pref']) + @vpc = sibling + break + elsif @config['vpc']['subnet_name'] and + names.include?(@config['vpc']['subnet_name']) +puts "CHOOSING #{@vpc.to_s} 'cause it has #{@config['vpc']['subnet_name']}" + @vpc = sibling + break + elsif @config['vpc']['subnet_id'] and + ids.include?(@config['vpc']['subnet_id']) + @vpc = sibling + break + end + } + if !@vpc + sibling = sib_by_name.sample + MU.log "Got multiple matching VPCs for #{self.class.cfg_name} #{@mu_name}, so I'm arbitrarily choosing #{sibling.mu_name}", MU::WARN, details: @config['vpc'] + @vpc = sibling + end + end + else + @vpc = sib_by_name + MU.log "Found exact VPC match for #{self}", loglevel, details: sib_by_name.to_s + end + else + MU.log "No shortcuts available to fetch VPC for #{self}", loglevel, details: @config['vpc'] + end + + if !@vpc and !@config['vpc']["name"].nil? and + @dependencies.has_key?("vpc") and + @dependencies["vpc"].has_key?(@config['vpc']["name"]) + MU.log "Grabbing VPC I see in @dependencies['vpc']['#{@config['vpc']["name"]}'] for #{self}", loglevel, details: @config['vpc'] + @vpc = @dependencies["vpc"][@config['vpc']["name"]] + elsif !@vpc + tag_key, tag_value = @config['vpc']['tag'].split(/=/, 2) if !@config['vpc']['tag'].nil? + if !@config['vpc'].has_key?("id") and + !@config['vpc'].has_key?("deploy_id") and !@deploy.nil? + @config['vpc']["deploy_id"] = @deploy.deploy_id + end + MU.log "Doing findStray for VPC for #{self}", loglevel, details: @config['vpc'] + vpcs = MU::MommaCat.findStray( + @config['cloud'], + "vpc", + deploy_id: @config['vpc']["deploy_id"], + cloud_id: @config['vpc']["id"], + name: @config['vpc']["name"], + tag_key: tag_key, + tag_value: tag_value, + habitats: [@project_id], + region: @config['vpc']["region"], + calling_deploy: @deploy, + credentials: @credentials, + dummy_ok: true, + debug: debug + ) + @vpc = vpcs.first if !vpcs.nil? and vpcs.size > 0 + end + if @vpc and @vpc.config and @vpc.config['bastion'] and + @vpc.config['bastion'].to_h['name'] != @config['name'] + refhash = @vpc.config['bastion'].to_h + refhash['deploy_id'] ||= @vpc.deploy.deploy_id + natref = MU::Config::Ref.get(refhash) + if natref and natref.kitten(@vpc.deploy) + @nat = natref.kitten(@vpc.deploy) + end + end + if @nat.nil? and !@vpc.nil? and ( + @config['vpc'].has_key?("nat_host_id") or + @config['vpc'].has_key?("nat_host_tag") or + @config['vpc'].has_key?("nat_host_ip") or + @config['vpc'].has_key?("nat_host_name") + ) + + nat_tag_key, nat_tag_value = @config['vpc']['nat_host_tag'].split(/=/, 2) if !@config['vpc']['nat_host_tag'].nil? + + @nat = @vpc.findBastion( + nat_name: @config['vpc']['nat_host_name'], + nat_cloud_id: @config['vpc']['nat_host_id'], + nat_tag_key: nat_tag_key, + nat_tag_value: nat_tag_value, + nat_ip: @config['vpc']['nat_host_ip'] + ) + + if @nat.nil? + if !@vpc.cloud_desc.nil? + @nat = @vpc.findNat( + nat_cloud_id: @config['vpc']['nat_host_id'], + nat_filter_key: "vpc-id", + region: @config['vpc']["region"], + nat_filter_value: @vpc.cloud_id, + credentials: @config['credentials'] + ) + else + @nat = @vpc.findNat( + nat_cloud_id: @config['vpc']['nat_host_id'], + region: @config['vpc']["region"], + credentials: @config['credentials'] + ) + end + end + end + if @vpc.nil? and @config['vpc'] + feck = MU::Config::Ref.get(@config['vpc']) + feck.kitten(@deploy, debug: true) + pp feck + raise MuError.new "#{self.class.cfg_name} #{@config['name']} failed to locate its VPC", details: @config['vpc'] + end + elsif self.class.cfg_name == "vpc" + @vpc = self + end + + # Google accounts usually have a useful default VPC we can use + if @vpc.nil? and @project_id and @cloud == "Google" and + self.class.can_live_in_vpc + MU.log "Seeing about default VPC for #{self.to_s}", MU::NOTICE + vpcs = MU::MommaCat.findStray( + "Google", + "vpc", + cloud_id: "default", + habitats: [@project_id], + credentials: @credentials, + dummy_ok: true, + debug: debug + ) + @vpc = vpcs.first if !vpcs.nil? and vpcs.size > 0 + end + + # Special dependencies: LoadBalancers I've asked to attach to an + # instance. + if @config.has_key?("loadbalancers") + @loadbalancers = [] if !@loadbalancers + @config['loadbalancers'].each { |lb| + MU.log "Loading LoadBalancer for #{self}", MU::DEBUG, details: lb + if @dependencies.has_key?("loadbalancer") and + @dependencies["loadbalancer"].has_key?(lb['concurrent_load_balancer']) + @loadbalancers << @dependencies["loadbalancer"][lb['concurrent_load_balancer']] + else + if !lb.has_key?("existing_load_balancer") and + !lb.has_key?("deploy_id") and !@deploy.nil? + lb["deploy_id"] = @deploy.deploy_id + end + lbs = MU::MommaCat.findStray( + @config['cloud'], + "loadbalancer", + deploy_id: lb["deploy_id"], + cloud_id: lb['existing_load_balancer'], + name: lb['concurrent_load_balancer'], + region: @config["region"], + calling_deploy: @deploy, + dummy_ok: true + ) + @loadbalancers << lbs.first if !lbs.nil? and lbs.size > 0 + end + } + end + + # Munge in external resources referenced by the existing_deploys + # keyword + if @config["existing_deploys"] && !@config["existing_deploys"].empty? + @config["existing_deploys"].each { |ext_deploy| + if ext_deploy["cloud_id"] + found = MU::MommaCat.findStray( + @config['cloud'], + ext_deploy["cloud_type"], + cloud_id: ext_deploy["cloud_id"], + region: @config['region'], + dummy_ok: false + ).first + + MU.log "Couldn't find existing resource #{ext_deploy["cloud_id"]}, #{ext_deploy["cloud_type"]}", MU::ERR if found.nil? + @deploy.notify(ext_deploy["cloud_type"], found.config["name"], found.deploydata, mu_name: found.mu_name, triggering_node: @mu_name) + elsif ext_deploy["mu_name"] && ext_deploy["deploy_id"] + MU.log "#{ext_deploy["mu_name"]} / #{ext_deploy["deploy_id"]}" + found = MU::MommaCat.findStray( + @config['cloud'], + ext_deploy["cloud_type"], + deploy_id: ext_deploy["deploy_id"], + mu_name: ext_deploy["mu_name"], + region: @config['region'], + dummy_ok: false + ).first + + MU.log "Couldn't find existing resource #{ext_deploy["mu_name"]}/#{ext_deploy["deploy_id"]}, #{ext_deploy["cloud_type"]}", MU::ERR if found.nil? + @deploy.notify(ext_deploy["cloud_type"], found.config["name"], found.deploydata, mu_name: ext_deploy["mu_name"], triggering_node: @mu_name) + else + MU.log "Trying to find existing deploy, but either the cloud_id is not valid or no mu_name and deploy_id where provided", MU::ERR + end + } + end + + if @config['dns_records'] && !@config['dns_records'].empty? + @config['dns_records'].each { |dnsrec| + if dnsrec.has_key?("name") + if dnsrec['name'].start_with?(@deploy.deploy_id.downcase) && !dnsrec['name'].start_with?(@mu_name.downcase) + MU.log "DNS records for #{@mu_name} seem to be wrong, deleting from current config", MU::WARN, details: dnsrec + dnsrec.delete('name') + dnsrec.delete('target') + end + end + } + end + + return [@dependencies, @vpc, @loadbalancers] + end + + # Using the automatically-defined +@vpc+ from {dependencies} in + # conjunction with our config, return our configured subnets. + # @return [Array] + def mySubnets + dependencies + if !@vpc or !@config["vpc"] + return nil + end + + if @config["vpc"]["subnet_id"] or @config["vpc"]["subnet_name"] + @config["vpc"]["subnets"] ||= [] + subnet_block = {} + subnet_block["subnet_id"] = @config["vpc"]["subnet_id"] if @config["vpc"]["subnet_id"] + subnet_block["subnet_name"] = @config["vpc"]["subnet_name"] if @config["vpc"]["subnet_name"] + @config["vpc"]["subnets"] << subnet_block + @config["vpc"]["subnets"].uniq! + end + + if (!@config["vpc"]["subnets"] or @config["vpc"]["subnets"].empty?) and + !@config["vpc"]["subnet_id"] + return @vpc.subnets + end + + subnets = [] + @config["vpc"]["subnets"].each { |subnet| + subnet_obj = @vpc.getSubnet(cloud_id: subnet["subnet_id"].to_s, name: subnet["subnet_name"].to_s) + raise MuError, "Couldn't find a live subnet for #{self.to_s} matching #{subnet} in #{@vpc.to_s} (#{@vpc.subnets.map { |s| s.name }.join(",")})" if subnet_obj.nil? + subnets << subnet_obj + } + + subnets + end + + # @return [Array] + def myFirewallRules + dependencies + + rules = [] + if @dependencies.has_key?("firewall_rule") + rules = @dependencies['firewall_rule'].values + end +# XXX what other ways are these specified? + + rules + end + + # If applicable, allow this resource's NAT host blanket access via + # rules in its associated +admin+ firewall rule set. + def allowBastionAccess + return nil if !@nat or !@nat.is_a?(MU::Cloud::Server) + + myFirewallRules.each { |acl| + if acl.config["admin"] + acl.addRule(@nat.listIPs, proto: "tcp") + acl.addRule(@nat.listIPs, proto: "udp") + acl.addRule(@nat.listIPs, proto: "icmp") + end + } + end + + # Defaults any resources that don't declare their release-readiness to + # ALPHA. That'll learn 'em. + def self.quality + MU::Cloud::ALPHA + end + + # Return a list of "container" artifacts, by class, that apply to this + # resource type in a cloud provider. This is so methods that call find + # know whether to call +find+ with identifiers for parent resources. + # This is similar in purpose to the +isGlobal?+ resource class method, + # which tells our search functions whether or not a resource scopes to + # a region. In almost all cases this is one-entry list consisting of + # +:Habitat+. Notable exceptions include most implementations of + # +Habitat+, which either reside inside a +:Folder+ or nothing at all; + # whereas a +:Folder+ tends to not have any containing parent. Very few + # resource implementations will need to override this. + # A +nil+ entry in this list is interpreted as "this resource can be + # global." + # @return [Array] + def self.canLiveIn + if self.shortname == "Folder" + [nil, :Folder] + elsif self.shortname == "Habitat" + [:Folder] + else + [:Habitat] + end + end + + def self.find(*flags) + allfound = {} + + MU::Cloud.availableClouds.each { |cloud| + begin + args = flags.first + next if args[:cloud] and args[:cloud] != cloud + # skip this cloud if we have a region argument that makes no + # sense there + cloudbase = Object.const_get("MU").const_get("Cloud").const_get(cloud) + next if cloudbase.listCredentials.nil? or cloudbase.listCredentials.empty? or cloudbase.credConfig(args[:credentials]).nil? + if args[:region] and cloudbase.respond_to?(:listRegions) + if !cloudbase.listRegions(credentials: args[:credentials]) + MU.log "Failed to get region list for credentials #{args[:credentials]} in cloud #{cloud}", MU::ERR, details: caller + else + next if !cloudbase.listRegions(credentials: args[:credentials]).include?(args[:region]) + end + end + begin + cloudclass = MU::Cloud.resourceClass(cloud, shortname) + rescue MU::MuError + next + end + + found = cloudclass.find(args) + if !found.nil? + if found.is_a?(Hash) + allfound.merge!(found) + else + raise MuError, "#{cloudclass}.find returned a non-Hash result" + end + end + rescue MuCloudResourceNotImplemented + end + } + allfound + end + + # Wrapper for the cleanup class method of underlying cloud object implementations. + def self.cleanup(*flags) + ok = true + params = flags.first + clouds = MU::Cloud.supportedClouds + if params[:cloud] + clouds = [params[:cloud]] + params.delete(:cloud) + end + + clouds.each { |cloud| + begin + cloudclass = MU::Cloud.resourceClass(cloud, shortname) + + if cloudclass.isGlobal? + params.delete(:region) + end + + raise MuCloudResourceNotImplemented if !cloudclass.respond_to?(:cleanup) or cloudclass.method(:cleanup).owner.to_s != "#" + MU.log "Invoking #{cloudclass}.cleanup from #{shortname}", MU::DEBUG, details: flags + cloudclass.cleanup(params) + rescue MuCloudResourceNotImplemented + MU.log "No #{cloud} implementation of #{shortname}.cleanup, skipping", MU::DEBUG, details: flags + rescue StandardError => e + in_msg = cloud + if params and params[:region] + in_msg += " "+params[:region] + end + if params and params[:flags] and params[:flags]["project"] and !params[:flags]["project"].empty? + in_msg += " project "+params[:flags]["project"] + end + MU.log "Skipping #{shortname} cleanup method in #{in_msg} due to #{e.class.name}: #{e.message}", MU::WARN, details: e.backtrace + ok = false + end + } + MU::MommaCat.unlockAll + + ok + end + + # A hook that is always called just before each instance method is + # invoked, so that we can ensure that repetitive setup tasks (like + # resolving +:resource_group+ for Azure resources) have always been + # done. + def resourceInitHook + @cloud ||= cloud + if @cloudparentclass.respond_to?(:resourceInitHook) + @cloudparentclass.resourceInitHook(@cloudobj, @deploy) + end + end + + if shortname == "Database" + + # Getting the password for a database's master user, and saving it in a database / cluster specific vault + def getPassword + if @config['password'].nil? + if @config['auth_vault'] && !@config['auth_vault'].empty? + @config['password'] = @groomclass.getSecret( + vault: @config['auth_vault']['vault'], + item: @config['auth_vault']['item'], + field: @config['auth_vault']['password_field'] + ) + else + # Should we use random instead? + @config['password'] = Password.pronounceable(10..12) + end + end + + creds = { + "username" => @config["master_user"], + "password" => @config["password"] + } + @groomclass.saveSecret(vault: @mu_name, item: "database_credentials", data: creds) + end + end + + if shortname == "DNSZone" + def self.genericMuDNSEntry(*flags) +# XXX have this switch on a global config for where Mu puts its DNS + cloudclass = MU::Cloud.resourceClass(MU::Config.defaultCloud, "DNSZone") + cloudclass.genericMuDNSEntry(flags.first) + end + def self.createRecordsFromConfig(*flags) + cloudclass = MU::Cloud.resourceClass(MU::Config.defaultCloud, "DNSZone") + if !flags.nil? and flags.size == 1 + cloudclass.createRecordsFromConfig(flags.first) + else + cloudclass.createRecordsFromConfig(*flags) + end + end + end + + if shortname == "Server" or shortname == "ServerPool" + def windows? + return true if %w{win2k16 win2k12r2 win2k12 win2k8 win2k8r2 win2k19 windows}.include?(@config['platform']) + begin + return true if cloud_desc.respond_to?(:platform) and cloud_desc.platform == "Windows" +# XXX ^ that's AWS-speak, doesn't cover GCP or anything else; maybe we should require cloud layers to implement this so we can just call @cloudobj.windows? + rescue MU::MuError + return false + end + false + end + + require 'mu/cloud/winrm_sessions' + require 'mu/cloud/ssh_sessions' + end + + # Wrap the instance methods that this cloud resource type has to + # implement. + MU::Cloud.resource_types[name.to_sym][:instance].each { |method| + + define_method method do |*args| + return nil if @cloudobj.nil? + MU.log "Invoking #{@cloudobj}.#{method}", MU::DEBUG + + # Go ahead and guarantee that we can't accidentally trigger these + # methods recursively. + @method_semaphore.synchronize { + # We're looking for recursion, not contention, so ignore some + # obviously harmless things. + if @method_locks.has_key?(method) and method != :findBastion and method != :cloud_id + MU.log "Double-call to cloud method #{method} for #{self}", MU::DEBUG, details: caller + ["competing call stack:"] + @method_locks[method] + end + @method_locks[method] = caller + } + + # Make sure the describe() caches are fresh + @cloudobj.describe if method != :describe + + # Don't run through dependencies on simple attr_reader lookups + if ![:dependencies, :cloud_id, :config, :mu_name].include?(method) + @cloudobj.dependencies + end + + retval = nil + if !args.nil? and args.size == 1 + retval = @cloudobj.method(method).call(args.first) + elsif !args.nil? and args.size > 0 + retval = @cloudobj.method(method).call(*args) + else + retval = @cloudobj.method(method).call + end + if (method == :create or method == :groom or method == :postBoot) and + (!@destroyed and !@cloudobj.destroyed) + deploydata = @cloudobj.method(:notify).call + @deploydata ||= deploydata # XXX I don't remember why we're not just doing this from the get-go; maybe because we prefer some mangling occurring in @deploy.notify? + if deploydata.nil? or !deploydata.is_a?(Hash) + MU.log "#{self} notify method did not return a Hash of deployment data, attempting to fill in with cloud descriptor #{@cloudobj.cloud_id}", MU::WARN + deploydata = MU.structToHash(@cloudobj.cloud_desc) + raise MuError, "Failed to collect metadata about #{self}" if deploydata.nil? + end + deploydata['cloud_id'] ||= @cloudobj.cloud_id if !@cloudobj.cloud_id.nil? + deploydata['mu_name'] = @cloudobj.mu_name if !@cloudobj.mu_name.nil? + deploydata['nodename'] = @cloudobj.mu_name if !@cloudobj.mu_name.nil? + deploydata.delete("#MUOBJECT") + @deploy.notify(self.class.cfg_plural, @config['name'], deploydata, triggering_node: @cloudobj, delayed_save: @delayed_save) if !@deploy.nil? + elsif method == :notify + retval['cloud_id'] = @cloudobj.cloud_id.to_s if !@cloudobj.cloud_id.nil? + retval['mu_name'] = @cloudobj.mu_name if !@cloudobj.mu_name.nil? + @deploy.notify(self.class.cfg_plural, @config['name'], retval, triggering_node: @cloudobj, delayed_save: @delayed_save) if !@deploy.nil? + end + @method_semaphore.synchronize { + @method_locks.delete(method) + } + + @deploydata = @cloudobj.deploydata + @config = @cloudobj.config + retval + end + } # end instance method list + + + } # end dynamic class generation block + } # end resource type iteration + + end + +end diff --git a/modules/mu/cloud/ssh_sessions.rb b/modules/mu/cloud/ssh_sessions.rb new file mode 100644 index 000000000..9211bd6f3 --- /dev/null +++ b/modules/mu/cloud/ssh_sessions.rb @@ -0,0 +1,225 @@ +# Copyright:: Copyright (c) 2020 eGlobalTech, Inc., all rights reserved +# +# Licensed under the BSD-3 license (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License in the root of the project or at +# +# http://egt-labs.com/mu/LICENSE.html +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +module MU + # Plugins under this namespace serve as interfaces to cloud providers and + # other provisioning layers. + class Cloud + + # An exception we can use with transient Net::SSH errors, which require + # special handling due to obnoxious asynchronous interrupt behaviors. + class NetSSHFail < MuNonFatal; + end + + # Net::SSH exceptions seem to have their own behavior vis a vis threads, + # and our regular call stack gets circumvented when they're thrown. Cheat + # here to catch them gracefully. + def self.handleNetSSHExceptions + Thread.handle_interrupt(Net::SSH::Exception => :never) { + begin + Thread.handle_interrupt(Net::SSH::Exception => :immediate) { + MU.log "(Probably harmless) Caught a Net::SSH Exception in #{Thread.current.inspect}", MU::DEBUG, details: Thread.current.backtrace + } + ensure +# raise NetSSHFail, "Net::SSH had a nutty" + end + } + end + + + # Basic setup tasks performed on a new node during its first initial + # ssh connection. Most of this is terrible Windows glue. + # @param ssh [Net::SSH::Connection::Session]: The active SSH session to the new node. + def initialSSHTasks(ssh) + win_env_fix = %q{echo 'export PATH="$PATH:/cygdrive/c/opscode/chef/embedded/bin"' > "$HOME/chef-client"; echo 'prev_dir="`pwd`"; for __dir in /proc/registry/HKEY_LOCAL_MACHINE/SYSTEM/CurrentControlSet/Control/Session\ Manager/Environment;do cd "$__dir"; for __var in `ls * | grep -v TEMP | grep -v TMP`;do __var=`echo $__var | tr "[a-z]" "[A-Z]"`; test -z "${!__var}" && export $__var="`cat $__var`" >/dev/null 2>&1; done; done; cd "$prev_dir"; /cygdrive/c/opscode/chef/bin/chef-client.bat $@' >> "$HOME/chef-client"; chmod 700 "$HOME/chef-client"; ( grep "^alias chef-client=" "$HOME/.bashrc" || echo 'alias chef-client="$HOME/chef-client"' >> "$HOME/.bashrc" ) ; ( grep "^alias mu-groom=" "$HOME/.bashrc" || echo 'alias mu-groom="powershell -File \"c:/Program Files/Amazon/Ec2ConfigService/Scripts/UserScript.ps1\""' >> "$HOME/.bashrc" )} + win_installer_check = %q{ls /proc/registry/HKEY_LOCAL_MACHINE/SOFTWARE/Microsoft/Windows/CurrentVersion/Installer/} + lnx_installer_check = %q{ps auxww | awk '{print $11}' | egrep '(/usr/bin/yum|apt-get|dpkg)'} + lnx_updates_check = %q{( test -f /.mu-installer-ran-updates || ! test -d /var/lib/cloud/instance ) || echo "userdata still running"} + win_set_pw = nil + + if windows? and !@config['use_cloud_provider_windows_password'] + # This covers both the case where we have a windows password passed from a vault and where we need to use a a random Windows Admin password generated by MU::Cloud::Server.generateWindowsPassword + pw = @groomer.getSecret( + vault: @config['mu_name'], + item: "windows_credentials", + field: "password" + ) + win_check_for_pw = %Q{powershell -Command '& {Add-Type -AssemblyName System.DirectoryServices.AccountManagement; $Creds = (New-Object System.Management.Automation.PSCredential("#{@config["windows_admin_username"]}", (ConvertTo-SecureString "#{pw}" -AsPlainText -Force)));$DS = New-Object System.DirectoryServices.AccountManagement.PrincipalContext([System.DirectoryServices.AccountManagement.ContextType]::Machine); $DS.ValidateCredentials($Creds.GetNetworkCredential().UserName, $Creds.GetNetworkCredential().password); echo $Result}'} + win_set_pw = %Q{powershell -Command "& {(([adsi]('WinNT://./#{@config["windows_admin_username"]}, user')).psbase.invoke('SetPassword', '#{pw}'))}"} + end + + # There shouldn't be a use case where a domain joined computer goes through initialSSHTasks. Removing Active Directory specific computer rename. + set_hostname = true + hostname = nil + if !@config['active_directory'].nil? + if @config['active_directory']['node_type'] == "domain_controller" && @config['active_directory']['domain_controller_hostname'] + hostname = @config['active_directory']['domain_controller_hostname'] + @mu_windows_name = hostname + set_hostname = true + else + # Do we have an AD specific hostname? + hostname = @mu_windows_name + set_hostname = true + end + else + hostname = @mu_windows_name + end + win_check_for_hostname = %Q{powershell -Command '& {hostname}'} + win_set_hostname = %Q{powershell -Command "& {Rename-Computer -NewName '#{hostname}' -Force -PassThru -Restart; Restart-Computer -Force }"} + + begin + # Set our admin password first, if we need to + if windows? and !win_set_pw.nil? and !win_check_for_pw.nil? + output = ssh.exec!(win_check_for_pw) + raise MU::Cloud::BootstrapTempFail, "Got nil output from ssh session, waiting and retrying" if output.nil? + if !output.match(/True/) + MU.log "Setting Windows password for user #{@config['windows_admin_username']}", details: ssh.exec!(win_set_pw) + end + end + if windows? + output = ssh.exec!(win_env_fix) + output += ssh.exec!(win_installer_check) + raise MU::Cloud::BootstrapTempFail, "Got nil output from ssh session, waiting and retrying" if output.nil? + if output.match(/InProgress/) + raise MU::Cloud::BootstrapTempFail, "Windows Installer service is still doing something, need to wait" + end + if set_hostname and !@hostname_set and @mu_windows_name + output = ssh.exec!(win_check_for_hostname) + raise MU::Cloud::BootstrapTempFail, "Got nil output from ssh session, waiting and retrying" if output.nil? + if !output.match(/#{@mu_windows_name}/) + MU.log "Setting Windows hostname to #{@mu_windows_name}", details: ssh.exec!(win_set_hostname) + @hostname_set = true + # Reboot from the API too, in case Windows is flailing + if !@cloudobj.nil? + @cloudobj.reboot + else + reboot + end + raise MU::Cloud::BootstrapTempFail, "Set hostname in Windows, waiting for reboot" + end + end + else + output = ssh.exec!(lnx_installer_check) + if !output.nil? and !output.empty? + raise MU::Cloud::BootstrapTempFail, "Linux package manager is still doing something, need to wait (#{output})" + end + if !@config['skipinitialupdates'] and + !@config['scrub_mu_isms'] and + !@config['userdata_script'] + output = ssh.exec!(lnx_updates_check) + if !output.nil? and output.match(/userdata still running/) + raise MU::Cloud::BootstrapTempFail, "Waiting for initial userdata system updates to complete" + end + end + end + rescue RuntimeError => e + raise MU::Cloud::BootstrapTempFail, "Got #{e.inspect} performing initial SSH connect tasks, will try again" + end + + end + + # @param max_retries [Integer]: Number of connection attempts to make before giving up + # @param retry_interval [Integer]: Number of seconds to wait between connection attempts + # @return [Net::SSH::Connection::Session] + def getSSHSession(max_retries = 12, retry_interval = 30) + ssh_keydir = Etc.getpwnam(@deploy.mu_user).dir+"/.ssh" + nat_ssh_key, nat_ssh_user, nat_ssh_host, canonical_ip, ssh_user, _ssh_key_name = getSSHConfig + session = nil + retries = 0 + + vpc_class = Object.const_get("MU").const_get("Cloud").const_get(@cloud).const_get("VPC") + + # XXX WHY is this a thing + Thread.handle_interrupt(Errno::ECONNREFUSED => :never) { + } + + begin + MU::Cloud.handleNetSSHExceptions + if !nat_ssh_host.nil? + proxy_cmd = "ssh -q -o StrictHostKeyChecking=no -W %h:%p #{nat_ssh_user}@#{nat_ssh_host}" + MU.log "Attempting SSH to #{canonical_ip} (#{@mu_name}) as #{ssh_user} with key #{@deploy.ssh_key_name} using proxy '#{proxy_cmd}'" if retries == 0 + proxy = Net::SSH::Proxy::Command.new(proxy_cmd) + session = Net::SSH.start( + canonical_ip, + ssh_user, + :config => false, + :keys_only => true, + :keys => [ssh_keydir+"/"+nat_ssh_key, ssh_keydir+"/"+@deploy.ssh_key_name], + :verify_host_key => false, + # :verbose => :info, + :host_key => "ssh-rsa", + :port => 22, + :auth_methods => ['publickey'], + :proxy => proxy + ) + else + + MU.log "Attempting SSH to #{canonical_ip} (#{@mu_name}) as #{ssh_user} with key #{ssh_keydir}/#{@deploy.ssh_key_name}" if retries == 0 + session = Net::SSH.start( + canonical_ip, + ssh_user, + :config => false, + :keys_only => true, + :keys => [ssh_keydir+"/"+@deploy.ssh_key_name], + :verify_host_key => false, + # :verbose => :info, + :host_key => "ssh-rsa", + :port => 22, + :auth_methods => ['publickey'] + ) + end + retries = 0 + rescue Net::SSH::HostKeyMismatch => e + MU.log("Remembering new key: #{e.fingerprint}") + e.remember_host! + session.close + retry +# rescue SystemCallError, Timeout::Error, Errno::ECONNRESET, Errno::EHOSTUNREACH, Net::SSH::Proxy::ConnectError, SocketError, Net::SSH::Disconnect, Net::SSH::AuthenticationFailed, IOError, Net::SSH::ConnectionTimeout, Net::SSH::Proxy::ConnectError, MU::Cloud::NetSSHFail => e + rescue SystemExit, Timeout::Error, Net::SSH::AuthenticationFailed, Net::SSH::Disconnect, Net::SSH::ConnectionTimeout, Net::SSH::Proxy::ConnectError, Net::SSH::Exception, Errno::ECONNRESET, Errno::EHOSTUNREACH, Errno::ECONNREFUSED, Errno::EPIPE, SocketError, IOError => e + begin + session.close if !session.nil? + rescue Net::SSH::Disconnect, IOError => e + if windows? + MU.log "Windows has probably closed the ssh session before we could. Waiting before trying again", MU::NOTICE + else + MU.log "ssh session was closed unexpectedly, waiting before trying again", MU::NOTICE + end + sleep 10 + end + + if retries < max_retries + retries = retries + 1 + msg = "ssh #{ssh_user}@#{@mu_name}: #{e.message}, waiting #{retry_interval}s (attempt #{retries}/#{max_retries})" + if retries == 1 or (retries/max_retries <= 0.5 and (retries % 3) == 0) + MU.log msg, MU::NOTICE + if !vpc_class.haveRouteToInstance?(cloud_desc, credentials: @credentials) and + canonical_ip.match(/(^127\.)|(^192\.168\.)|(^10\.)|(^172\.1[6-9]\.)|(^172\.2[0-9]\.)|(^172\.3[0-1]\.)|(^::1$)|(^[fF][cCdD])/) and + !nat_ssh_host + MU.log "Node #{@mu_name} at #{canonical_ip} looks like it's in a private address space, and I don't appear to have a direct route to it. It may not be possible to connect with this routing!", MU::WARN + end + elsif retries/max_retries > 0.5 + MU.log msg, MU::WARN, details: e.inspect + end + sleep retry_interval + retry + else + raise MuError, "#{@mu_name}: #{e.inspect} trying to connect with SSH, max_retries exceeded", e.backtrace + end + end + return session + end + + end + +end diff --git a/modules/mu/cloud/winrm_sessions.rb b/modules/mu/cloud/winrm_sessions.rb new file mode 100644 index 000000000..9fb3d9ad7 --- /dev/null +++ b/modules/mu/cloud/winrm_sessions.rb @@ -0,0 +1,231 @@ +# Copyright:: Copyright (c) 2020 eGlobalTech, Inc., all rights reserved +# +# Licensed under the BSD-3 license (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License in the root of the project or at +# +# http://egt-labs.com/mu/LICENSE.html +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +autoload :WinRM, "winrm" + +module MU + # Plugins under this namespace serve as interfaces to cloud providers and + # other provisioning layers. + class Cloud + + # Gracefully message and attempt to accommodate the common transient errors peculiar to Windows nodes + # @param e [Exception]: The exception that we're handling + # @param retries [Integer]: The current number of retries, which we'll increment and pass back to the caller + # @param rebootable_fails [Integer]: The current number of reboot-worthy failures, which we'll increment and pass back to the caller + # @param max_retries [Integer]: Maximum number of retries to attempt; we'll raise an exception if this is exceeded + # @param reboot_on_problems [Boolean]: Whether we should try to reboot a "stuck" machine + # @param retry_interval [Integer]: How many seconds to wait before returning for another attempt + def handleWindowsFail(e, retries, rebootable_fails, max_retries: 30, reboot_on_problems: false, retry_interval: 45) + msg = "WinRM connection to https://"+@mu_name+":5986/wsman: #{e.message}, waiting #{retry_interval}s (attempt #{retries}/#{max_retries})" + if e.class.name == "WinRM::WinRMAuthorizationError" or e.message.match(/execution expired/) and reboot_on_problems + if rebootable_fails > 0 and (rebootable_fails % 7) == 0 + MU.log "#{@mu_name} still misbehaving, forcing Stop and Start from API", MU::WARN + reboot(true) # vicious API stop/start + sleep retry_interval*3 + rebootable_fails = 0 + else + if rebootable_fails == 5 + MU.log "#{@mu_name} misbehaving, attempting to reboot from API", MU::WARN + reboot # graceful API restart + sleep retry_interval*2 + end + rebootable_fails = rebootable_fails + 1 + end + end + if retries < max_retries + if retries == 1 or (retries/max_retries <= 0.5 and (retries % 3) == 0 and retries != 0) + MU.log msg, MU::NOTICE + elsif retries/max_retries > 0.5 + MU.log msg, MU::WARN, details: e.inspect + end + sleep retry_interval + retries = retries + 1 + else + raise MuError, "#{@mu_name}: #{e.inspect} trying to connect with WinRM, max_retries exceeded", e.backtrace + end + return [retries, rebootable_fails] + end + + def windowsRebootPending?(shell = nil) + if shell.nil? + shell = getWinRMSession(1, 30) + end +# if (Get-Item "HKLM:/SOFTWARE/Microsoft/Windows/CurrentVersion/WindowsUpdate/Auto Update/RebootRequired" -EA Ignore) { exit 1 } + cmd = %Q{ + if (Get-ChildItem "HKLM:/Software/Microsoft/Windows/CurrentVersion/Component Based Servicing/RebootPending" -EA Ignore) { + echo "Component Based Servicing/RebootPending is true" + exit 1 + } + if (Get-ItemProperty "HKLM:/SYSTEM/CurrentControlSet/Control/Session Manager" -Name PendingFileRenameOperations -EA Ignore) { + echo "Control/Session Manager/PendingFileRenameOperations is true" + exit 1 + } + try { + $util = [wmiclass]"\\\\.\\root\\ccm\\clientsdk:CCM_ClientUtilities" + $status = $util.DetermineIfRebootPending() + if(($status -ne $null) -and $status.RebootPending){ + echo "WMI says RebootPending is true" + exit 1 + } + } catch { + exit 0 + } + exit 0 + } + resp = shell.run(cmd) + returnval = resp.exitcode == 0 ? false : true + shell.close + returnval + end + + # Basic setup tasks performed on a new node during its first WinRM + # connection. Most of this is terrible Windows glue. + # @param shell [WinRM::Shells::Powershell]: An active Powershell session to the new node. + def initialWinRMTasks(shell) + retries = 0 + rebootable_fails = 0 + begin + if !@config['use_cloud_provider_windows_password'] + pw = @groomer.getSecret( + vault: @config['mu_name'], + item: "windows_credentials", + field: "password" + ) + win_check_for_pw = %Q{Add-Type -AssemblyName System.DirectoryServices.AccountManagement; $Creds = (New-Object System.Management.Automation.PSCredential("#{@config["windows_admin_username"]}", (ConvertTo-SecureString "#{pw}" -AsPlainText -Force)));$DS = New-Object System.DirectoryServices.AccountManagement.PrincipalContext([System.DirectoryServices.AccountManagement.ContextType]::Machine); $DS.ValidateCredentials($Creds.GetNetworkCredential().UserName, $Creds.GetNetworkCredential().password); echo $Result} + resp = shell.run(win_check_for_pw) + if resp.stdout.chomp != "True" + win_set_pw = %Q{(([adsi]('WinNT://./#{@config["windows_admin_username"]}, user')).psbase.invoke('SetPassword', '#{pw}'))} + resp = shell.run(win_set_pw) + puts resp.stdout + MU.log "Resetting Windows host password", MU::NOTICE, details: resp.stdout + end + end + + # Install Cygwin here, because for some reason it breaks inside Chef + # XXX would love to not do this here + pkgs = ["bash", "mintty", "vim", "curl", "openssl", "wget", "lynx", "openssh"] + admin_home = "c:/bin/cygwin/home/#{@config["windows_admin_username"]}" + install_cygwin = %Q{ + If (!(Test-Path "c:/bin/cygwin/Cygwin.bat")){ + $WebClient = New-Object System.Net.WebClient + $WebClient.DownloadFile("http://cygwin.com/setup-x86_64.exe","$env:Temp/setup-x86_64.exe") + Start-Process -wait -FilePath $env:Temp/setup-x86_64.exe -ArgumentList "-q -n -l $env:Temp/cygwin -R c:/bin/cygwin -s http://mirror.cs.vt.edu/pub/cygwin/cygwin/ -P #{pkgs.join(',')}" + } + if(!(Test-Path #{admin_home})){ + New-Item -type directory -path #{admin_home} + } + if(!(Test-Path #{admin_home}/.ssh)){ + New-Item -type directory -path #{admin_home}/.ssh + } + if(!(Test-Path #{admin_home}/.ssh/authorized_keys)){ + New-Item #{admin_home}/.ssh/authorized_keys -type file -force -value "#{@deploy.ssh_public_key}" + } + } + resp = shell.run(install_cygwin) + if resp.exitcode != 0 + MU.log "Failed at installing Cygwin", MU::ERR, details: resp + end + + hostname = nil + if !@config['active_directory'].nil? + if @config['active_directory']['node_type'] == "domain_controller" && @config['active_directory']['domain_controller_hostname'] + hostname = @config['active_directory']['domain_controller_hostname'] + @mu_windows_name = hostname + else + # Do we have an AD specific hostname? + hostname = @mu_windows_name + end + else + hostname = @mu_windows_name + end + resp = shell.run(%Q{hostname}) + + if resp.stdout.chomp != hostname + resp = shell.run(%Q{Rename-Computer -NewName '#{hostname}' -Force -PassThru -Restart; Restart-Computer -Force}) + MU.log "Renaming Windows host to #{hostname}; this will trigger a reboot", MU::NOTICE, details: resp.stdout + reboot(true) + sleep 30 + end + rescue WinRM::WinRMError, HTTPClient::ConnectTimeoutError => e + retries, rebootable_fails = handleWindowsFail(e, retries, rebootable_fails, max_retries: 10, reboot_on_problems: true, retry_interval: 30) + retry + end + end + + # Get a privileged Powershell session on the server in question, using SSL-encrypted WinRM with certificate authentication. + # @param max_retries [Integer]: + # @param retry_interval [Integer]: + # @param timeout [Integer]: + # @param winrm_retries [Integer]: + # @param reboot_on_problems [Boolean]: + def getWinRMSession(max_retries = 40, retry_interval = 60, timeout: 30, winrm_retries: 2, reboot_on_problems: false) + _nat_ssh_key, _nat_ssh_user, _nat_ssh_host, canonical_ip, _ssh_user, _ssh_key_name = getSSHConfig + @mu_name ||= @config['mu_name'] + + shell = nil + opts = nil + # and now, a thing I really don't want to do + MU::Master.addInstanceToEtcHosts(canonical_ip, @mu_name) + + # catch exceptions that circumvent our regular call stack + Thread.abort_on_exception = false + Thread.handle_interrupt(WinRM::WinRMWSManFault => :never) { + begin + Thread.handle_interrupt(WinRM::WinRMWSManFault => :immediate) { + MU.log "(Probably harmless) Caught a WinRM::WinRMWSManFault in #{Thread.current.inspect}", MU::DEBUG, details: Thread.current.backtrace + } + ensure + # Reraise something useful + end + } + + retries = 0 + rebootable_fails = 0 + begin + loglevel = retries > 4 ? MU::NOTICE : MU::DEBUG + MU.log "Calling WinRM on #{@mu_name}", loglevel, details: opts + opts = { + retry_limit: winrm_retries, + no_ssl_peer_verification: true, # XXX this should not be necessary; we get 'hostname "foo" does not match the server certificate' even when it clearly does match + ca_trust_path: "#{MU.mySSLDir}/Mu_CA.pem", + transport: :ssl, + operation_timeout: timeout, + } + if retries % 2 == 0 # NTLM password over https + opts[:endpoint] = 'https://'+canonical_ip+':5986/wsman' + opts[:user] = @config['windows_admin_username'] + opts[:password] = getWindowsAdminPassword + else # certificate auth over https + opts[:endpoint] = 'https://'+@mu_name+':5986/wsman' + opts[:client_cert] = "#{MU.mySSLDir}/#{@mu_name}-winrm.crt" + opts[:client_key] = "#{MU.mySSLDir}/#{@mu_name}-winrm.key" + end + conn = WinRM::Connection.new(opts) + conn.logger.level = :debug if retries > 2 + MU.log "WinRM connection to #{@mu_name} created", MU::DEBUG, details: conn + shell = conn.shell(:powershell) + shell.run('ipconfig') # verify that we can do something + rescue Errno::EHOSTUNREACH, Errno::ECONNREFUSED, HTTPClient::ConnectTimeoutError, OpenSSL::SSL::SSLError, SocketError, WinRM::WinRMError, Timeout::Error => e + retries, rebootable_fails = handleWindowsFail(e, retries, rebootable_fails, max_retries: max_retries, reboot_on_problems: reboot_on_problems, retry_interval: retry_interval) + retry + ensure + MU::Master.removeInstanceFromEtcHosts(@mu_name) + end + + shell + end + + end + +end diff --git a/modules/mu/config.rb b/modules/mu/config.rb index 404659c49..45274e409 100644 --- a/modules/mu/config.rb +++ b/modules/mu/config.rb @@ -525,7 +525,7 @@ def insertKitten(descriptor, type, delay_validation = false, ignore_duplicates: # cloud-specific schema. schemaclass = Object.const_get("MU").const_get("Config").const_get(shortclass) myschema = Marshal.load(Marshal.dump(MU::Config.schema["properties"][cfg_plural]["items"])) - more_required, more_schema = Object.const_get("MU").const_get("Cloud").const_get(descriptor["cloud"]).const_get(shortclass.to_s).schema(self) + more_required, more_schema = MU::Cloud.resourceClass(descriptor["cloud"], type).schema(self) if more_schema MU::Config.schemaMerge(myschema["properties"], more_schema, descriptor["cloud"]) end @@ -544,7 +544,7 @@ def insertKitten(descriptor, type, delay_validation = false, ignore_duplicates: end # Make sure a sensible region has been targeted, if applicable - classobj = Object.const_get("MU").const_get("Cloud").const_get(descriptor["cloud"]) + classobj = MU::Cloud.cloudClass(descriptor["cloud"]) if descriptor["region"] valid_regions = classobj.listRegions if !valid_regions.include?(descriptor["region"]) @@ -665,7 +665,6 @@ def insertKitten(descriptor, type, delay_validation = false, ignore_duplicates: if (descriptor['ingress_rules'] or ["server", "server_pool", "database", "cache_cluster"].include?(cfg_name)) descriptor['ingress_rules'] ||= [] - fw_classobj = Object.const_get("MU").const_get("Cloud").const_get(descriptor["cloud"]).const_get("FirewallRule") acl = haveLitterMate?(fwname, "firewall_rules") already_exists = !acl.nil? @@ -676,7 +675,7 @@ def insertKitten(descriptor, type, delay_validation = false, ignore_duplicates: "region" => descriptor['region'], "credentials" => descriptor["credentials"] } - if !fw_classobj.isGlobal? + if !MU::Cloud.resourceClass(descriptor["cloud"], "FirewallRule").isGlobal? acl['region'] = descriptor['region'] acl['region'] ||= classobj.myRegion(acl['credentials']) else @@ -831,7 +830,7 @@ def insertKitten(descriptor, type, delay_validation = false, ignore_duplicates: # Run the cloud class's deeper validation, unless we've already failed # on stuff that will cause spurious alarms further in if ok - parser = Object.const_get("MU").const_get("Cloud").const_get(descriptor["cloud"]).const_get(shortclass.to_s) + parser = MU::Cloud.resourceClass(descriptor['cloud'], type) original_descriptor = MU::Config.stripConfig(descriptor) passed = parser.validateConfig(descriptor, self) @@ -841,7 +840,7 @@ def insertKitten(descriptor, type, delay_validation = false, ignore_duplicates: end # Make sure we've been configured with the right credentials - cloudbase = Object.const_get("MU").const_get("Cloud").const_get(descriptor['cloud']) + cloudbase = MU::Cloud.cloudClass(descriptor['cloud']) credcfg = cloudbase.credConfig(descriptor['credentials']) if !credcfg or credcfg.empty? raise ValidationError, "#{descriptor['cloud']} #{cfg_name} #{descriptor['name']} declares credential set #{descriptor['credentials']}, but no such credentials exist for that cloud provider" diff --git a/modules/mu/config/database.rb b/modules/mu/config/database.rb index 46614e9fc..e4c1e1303 100644 --- a/modules/mu/config/database.rb +++ b/modules/mu/config/database.rb @@ -420,8 +420,8 @@ def self.validate(db, configurator) db["dependencies"] ||= [] db["dependencies"] << { "type" => "database", - "name" => db["source"]["name"], - "phase" => "groom" + "name" => db["source"]["name"]#, +# "phase" => "groom" } end db["source"]["cloud"] ||= db["cloud"] diff --git a/modules/mu/config/doc_helpers.rb b/modules/mu/config/doc_helpers.rb index 4d7956127..f8bd1311d 100644 --- a/modules/mu/config/doc_helpers.rb +++ b/modules/mu/config/doc_helpers.rb @@ -29,8 +29,7 @@ def self.docSchema rescue LoadError next end - res_class = Object.const_get("MU").const_get("Cloud").const_get(cloud).const_get(classname) - _required, res_schema = res_class.schema(self) + _required, res_schema = MU::Cloud.resourceClass(cloud, classname).schema(self) docschema["properties"][attrs[:cfg_plural]]["items"]["description"] ||= "" docschema["properties"][attrs[:cfg_plural]]["items"]["description"] += "\n#\n# `#{cloud}`: "+res_class.quality res_schema.each { |key, cfg| @@ -61,7 +60,7 @@ def self.prepend_descriptions(prefix, cfg) MU::Cloud.supportedClouds.each { |cloud| res_class = nil begin - res_class = Object.const_get("MU").const_get("Cloud").const_get(cloud).const_get(classname) + res_class = MU::Cloud.resourceClass(cloud, classname) rescue MU::Cloud::MuCloudResourceNotImplemented next end diff --git a/modules/mu/config/firewall_rule.rb b/modules/mu/config/firewall_rule.rb index 088e2a43b..0e4f74a9c 100644 --- a/modules/mu/config/firewall_rule.rb +++ b/modules/mu/config/firewall_rule.rb @@ -180,8 +180,6 @@ def adminFirewallRuleset(vpc: nil, admin_ip: nil, region: nil, cloud: nil, crede ] end - resclass = Object.const_get("MU").const_get("Cloud").const_get(cloud).const_get("FirewallRule") - if rules_only return rules end @@ -217,7 +215,7 @@ def adminFirewallRuleset(vpc: nil, admin_ip: nil, region: nil, cloud: nil, crede acl['project'] = acl["vpc"]["habitat"]["id"] || acl["vpc"]["habitat"]["name"] end acl.delete("vpc") if !acl["vpc"] - if !resclass.isGlobal? and !region.nil? and !region.empty? + if !MU::Cloud.resourceClass(cloud, "FirewallRule").isGlobal? and !region.nil? and !region.empty? acl["region"] = region end @admin_firewall_rules << acl if !@admin_firewall_rules.include?(acl) diff --git a/modules/mu/config/ref.rb b/modules/mu/config/ref.rb index da80b9421..b88c3f071 100644 --- a/modules/mu/config/ref.rb +++ b/modules/mu/config/ref.rb @@ -261,6 +261,7 @@ def cloud_id # @param mommacat [MU::MommaCat]: A deploy object which will be searched for the referenced resource if provided, before restoring to broader, less efficient searches. def kitten(mommacat = @mommacat, shallow: false, debug: false) return nil if !@cloud or !@type + loglevel = debug ? MU::NOTICE : MU::DEBUG if @obj @deploy_id ||= @obj.deploy_id @@ -270,6 +271,7 @@ def kitten(mommacat = @mommacat, shallow: false, debug: false) end if mommacat + MU.log "Looking for #{@type} #{@name} #{@id} in deploy #{mommacat.deploy_id}", loglevel @obj = mommacat.findLitterMate(type: @type, name: @name, cloud_id: @id, credentials: @credentials, debug: debug) if @obj # initialize missing attributes, if we can @id ||= @obj.cloud_id diff --git a/modules/mu/config/schema_helpers.rb b/modules/mu/config/schema_helpers.rb index 05564db28..1c571a48e 100644 --- a/modules/mu/config/schema_helpers.rb +++ b/modules/mu/config/schema_helpers.rb @@ -22,10 +22,9 @@ class Config def self.defaultCloud configured = {} MU::Cloud.supportedClouds.each { |cloud| - cloudclass = Object.const_get("MU").const_get("Cloud").const_get(cloud) if $MU_CFG[cloud.downcase] and !$MU_CFG[cloud.downcase].empty? configured[cloud] = $MU_CFG[cloud.downcase].size - configured[cloud] += 0.5 if cloudclass.hosted? # tiebreaker + configured[cloud] += 0.5 if MU::Cloud.cloudClass(cloud).hosted? # tiebreaker end } if configured.size > 0 @@ -34,8 +33,7 @@ def self.defaultCloud }.first else MU::Cloud.supportedClouds.each { |cloud| - cloudclass = Object.const_get("MU").const_get("Cloud").const_get(cloud) - return cloud if cloudclass.hosted? + return cloud if MU::Cloud.cloudClass(cloud).hosted? } return MU::Cloud.supportedClouds.first end @@ -83,9 +81,8 @@ def self.schemaMerge(orig, new, cloud) @@loadfails = [] MU::Cloud.availableClouds.each { |cloud| next if @@loadfails.include?(cloud) - cloudclass = Object.const_get("MU").const_get("Cloud").const_get(cloud) begin - regions = cloudclass.listRegions() + regions = MU::Cloud.cloudClass(cloud).listRegions() @@allregions.concat(regions) if regions rescue MU::MuError => e @@loadfails << cloud @@ -100,7 +97,7 @@ def self.region_primitive @@allregions = [] MU::Cloud.availableClouds.each { |cloud| next if @@loadfails.include?(cloud) - cloudclass = Object.const_get("MU").const_get("Cloud").const_get(cloud) + cloudclass = MU::Cloud.cloudClass(cloud) begin return @@allregions if !cloudclass.listRegions() @@allregions.concat(cloudclass.listRegions()) @@ -244,7 +241,7 @@ def self.loadResourceSchema(type, cloud: nil) schema["title"] = type.to_s if cloud - cloudclass = Object.const_get("MU").const_get("Cloud").const_get(cloud).const_get(shortclass) + cloudclass = MU::Cloud.resourceClass(cloud, type) if cloudclass.respond_to?(:schema) _reqd, cloudschema = cloudclass.schema @@ -298,8 +295,7 @@ def applySchemaDefaults(conf_chunk = config, schema_chunk = schema, depth = 0, s # schema information so that we set those defaults correctly. realschema = if type and schema_chunk["items"] and schema_chunk["items"]["properties"] and item["cloud"] and MU::Cloud.supportedClouds.include?(item['cloud']) - cloudclass = Object.const_get("MU").const_get("Cloud").const_get(item["cloud"]).const_get(type) - _toplevel_required, cloudschema = cloudclass.schema(self) + _toplevel_required, cloudschema = MU::Cloud.resourceClass(item["cloud"], type).schema(self) newschema = schema_chunk["items"].dup MU::Config.schemaMerge(newschema["properties"], cloudschema, item["cloud"]) @@ -339,9 +335,9 @@ def applyInheritedDefaults(kitten, type) return end - cloudclass = Object.const_get("MU").const_get("Cloud").const_get(kitten['cloud']) - shortclass, _cfg_name, _cfg_plural, _classname = MU::Cloud.getResourceNames(type) - resclass = Object.const_get("MU").const_get("Cloud").const_get(kitten['cloud']).const_get(shortclass) + cloudclass = MU::Cloud.cloudClass(kitten['cloud']) + + resclass = MU::Cloud.resourceClass(kitten['cloud'], type) schema_fields = ["us_only", "scrub_mu_isms", "credentials", "billing_acct"] if !resclass.isGlobal? diff --git a/modules/mu/config/vpc.rb b/modules/mu/config/vpc.rb index c5fcdc45a..6c6fe1cf9 100644 --- a/modules/mu/config/vpc.rb +++ b/modules/mu/config/vpc.rb @@ -543,7 +543,7 @@ def self.validate(vpc, configurator) # Clouds that don't have some kind of native NAT gateway can also # leverage this host to honor "gateway" => "#NAT" situations. if !can_peer and !already_peered and have_public and vpc["create_bastion"] - serverclass = Object.const_get("MU").const_get("Cloud").const_get(vpc["cloud"]).const_get("Server") + serverclass = MU::Cloud.resourceClass(vpc["cloud"], "Server") bastion = serverclass.genericNAT.dup bastion["groomer_variables"] = { "nat_ip_block" => vpc["ip_block"].to_s @@ -600,7 +600,7 @@ def self.resolvePeers(vpc, configurator) MU.log "Skipping malformed VPC peer in #{vpc['name']}", MU::ERR, details: peer next end - peer["#MU_CLOUDCLASS"] = Object.const_get("MU").const_get("Cloud").const_get("VPC") + peer["#MU_CLOUDCLASS"] = MU::Cloud.loadBaseType("VPC") # We check for multiple siblings because some implementations # (Google) can split declared VPCs into parts to get the mimic the # routing behaviors we expect. diff --git a/modules/mu/deploy.rb b/modules/mu/deploy.rb index 5d7dda888..0bec21867 100644 --- a/modules/mu/deploy.rb +++ b/modules/mu/deploy.rb @@ -157,6 +157,7 @@ def initialize(environment, _shortclass, _cfg_name, _cfg_plural, classname = MU::Cloud.getResourceNames(data[:cfg_plural]) @main_config[data[:cfg_plural]].each { |resource| resource["#MU_CLOUDCLASS"] = classname +# resource["#MU_CLOUDCLASS"] = MU::Cloud.resourceClass(resource['cloud'], data[:cfg_plural]) } setThreadDependencies(@main_config[data[:cfg_plural]]) end @@ -265,7 +266,7 @@ def run # Run cloud provider-specific deploy meta-artifact creation (ssh keys, # resource groups, etc) @mommacat.cloudsUsed.each { |cloud| - cloudclass = Object.const_get("MU").const_get("Cloud").const_get(cloud) + cloudclass = MU::Cloud.cloudClass(cloud) cloudclass.initDeploy(@mommacat) } @@ -556,14 +557,7 @@ def setThreadDependencies(services) MU.log "Setting dependencies for #{name}", MU::DEBUG, details: resource["dependencies"] if resource["dependencies"] != nil then resource["dependencies"].each { |dependency| - parent_class = nil - MU::Cloud.resource_types.each_pair { |res_class, attrs| - if attrs[:cfg_name] == dependency['type'] or - attrs[:cfg_plural] == dependency['type'] - parent_class = Object.const_get("MU").const_get("Cloud").const_get(res_class) - break - end - } + parent_class = MU::Cloud.loadBaseType(dependency['type']) parent_type = parent_class.cfg_name @@ -575,7 +569,8 @@ def setThreadDependencies(services) if !resource["no_create_wait"] and (resource["#MU_CLOUDCLASS"].waits_on_parent_completion or dependency['phase'] == "create" or - (parent_class.deps_wait_on_my_creation and parent_type != res_type)) +# (parent_class.deps_wait_on_my_creation and parent_type != res_type)) + parent_class.deps_wait_on_my_creation) addDependentThread(parent, "#{name}_create") end @@ -651,8 +646,9 @@ def createResources(services, mode="create") run_this_method = myservice['#MUOBJECT'].method(mode) rescue StandardError => e MU::MommaCat.unlockAll - @main_thread.raise MuError, "Error invoking #{myservice["#MU_CLOUDCLASS"]}.#{mode} for #{myservice['name']} (#{e.inspect})", e.backtrace - raise e + @main_thread.raise MuError, "Error invoking #{myservice["#MUOBJECT"].class.name}.#{mode} for #{myservice['name']} (#{e.inspect})", e.backtrace + return +# raise e end begin MU.log "Checking whether to run #{myservice['#MUOBJECT']}.#{mode} (updating: #{@updating})", MU::DEBUG diff --git a/modules/mu/mommacat.rb b/modules/mu/mommacat.rb index 9282c0bf7..5d828d640 100644 --- a/modules/mu/mommacat.rb +++ b/modules/mu/mommacat.rb @@ -318,7 +318,7 @@ def regionsUsed @original_config[type].each { |resource| if resource['cloud'] cloudclass = Object.const_get("MU").const_get("Cloud").const_get(resource['cloud']) - resclass = Object.const_get("MU").const_get("Cloud").const_get(resource['cloud']).const_get(res_type.to_s) + resclass = MU::Cloud.resourceClass(resource['cloud'], res_type) if resclass.isGlobal? # XXX why was I doing this, urgh next diff --git a/modules/mu/mommacat/search.rb b/modules/mu/mommacat/search.rb index bb380732e..792f1e719 100644 --- a/modules/mu/mommacat/search.rb +++ b/modules/mu/mommacat/search.rb @@ -60,7 +60,7 @@ def self.findStray(cloud, type, ) _shortclass, _cfg_name, type, _classname, _attrs = MU::Cloud.getResourceNames(type, true) - cloudclass = MU::Cloud.assertSupportedCloud(cloud) + cloudclass = MU::Cloud.cloudClass(cloud) return nil if cloudclass.virtual? if (tag_key and !tag_value) or (!tag_key and tag_value) @@ -135,7 +135,7 @@ def self.findStray(cloud, type, # @param created_only [Boolean]: Only return the littermate if its cloud_id method returns a value # @param return_all [Boolean]: Return a Hash of matching objects indexed by their mu_name, instead of a single match. Only valid for resource types where has_multiples is true. # @return [MU::Cloud] - def findLitterMate(type: nil, name: nil, mu_name: nil, cloud_id: nil, created_only: false, return_all: false, credentials: nil, habitat: nil, **flags) + def findLitterMate(type: nil, name: nil, mu_name: nil, cloud_id: nil, created_only: false, return_all: false, credentials: nil, habitat: nil, debug: false, **flags) _shortclass, _cfg_name, type, _classname, attrs = MU::Cloud.getResourceNames(type) # If we specified a habitat, which we may also have done by its shorthand @@ -159,12 +159,14 @@ def findLitterMate(type: nil, name: nil, mu_name: nil, cloud_id: nil, created_on } @kitten_semaphore.synchronize { + return nil if !@kittens.has_key?(type) matches = [] @kittens[type].each { |habitat_group, sib_classes| next if habitat and habitat_group and habitat_group != habitat sib_classes.each_pair { |sib_class, cloud_objs| + if attrs[:has_multiples] next if !name.nil? and name != sib_class or cloud_objs.empty? if !name.nil? @@ -213,7 +215,7 @@ def resolve_habitat(habitat, credentials: nil, debug: false) end def self.generate_dummy_object(type, cloud, name, mu_name, cloud_id, desc, region, habitat, tag_value, calling_deploy, credentials) - resourceclass = MU::Cloud.loadCloudType(cloud, type) + resourceclass = MU::Cloud.resourceClass(cloud, type) use_name = if (name.nil? or name.empty?) if !mu_name.nil? @@ -269,8 +271,8 @@ def self.generate_dummy_object(type, cloud, name, mu_name, cloud_id, desc, regio private_class_method :generate_dummy_object def self.search_cloud_provider(type, cloud, habitats, region, cloud_id: nil, tag_key: nil, tag_value: nil, credentials: nil, flags: nil) - cloudclass = MU::Cloud.assertSupportedCloud(cloud) - resourceclass = MU::Cloud.loadCloudType(cloud, type) + cloudclass = MU::Cloud.cloudClass(cloud) + resourceclass = MU::Cloud.resourceClass(cloud, type) # Decide what regions we'll search, if applicable for this resource # type. diff --git a/modules/mu/providers/aws.rb b/modules/mu/providers/aws.rb index e1ef49515..119e64819 100644 --- a/modules/mu/providers/aws.rb +++ b/modules/mu/providers/aws.rb @@ -1251,7 +1251,7 @@ def self.createTag(resource = nil, # Mu Master, if we're in AWS. # @return [void] def self.openFirewallForClients - MU::Cloud.loadCloudType("AWS", :FirewallRule) + MU::Cloud.resourceClass("AWS", :FirewallRule) begin if File.exist?(Etc.getpwuid(Process.uid).dir+"/.chef/knife.rb") ::Chef::Config.from_file(Etc.getpwuid(Process.uid).dir+"/.chef/knife.rb") diff --git a/modules/mu/providers/aws/database.rb b/modules/mu/providers/aws/database.rb index 60c398038..b1b265511 100644 --- a/modules/mu/providers/aws/database.rb +++ b/modules/mu/providers/aws/database.rb @@ -254,6 +254,7 @@ def createSubnetGroup # Finding subnets, creating security groups/adding holes, create subnet group subnet_ids = [] +dependencies raise MuError.new "Didn't find the VPC specified for #{@mu_name}", details: @config["vpc"].to_h unless @vpc mySubnets.each { |subnet| @@ -435,10 +436,9 @@ def notify def createNewSnapshot snap_id = @deploy.getResourceName(@config["name"]) + Time.new.strftime("%M%S").to_s src_ref = MU::Config::Ref.get(@config["source"]) - src_ref.kitten + src_ref.kitten(@deploy) if !src_ref.id - MU.log "Failed to get an id from reference for creating a snapshot", MU::ERR, details: @config['source'] - raise "Failed to get an id from reference for creating a snapshot" + raise MuError.new "#{@mu_name} failed to get an id from reference for creating a snapshot", details: @config['source'] end params = { :tags => @tags.each_key.map { |k| { :key => k, :value => @tags[k] } } @@ -665,7 +665,7 @@ def self.schema(_config) "type" => "string", "enum" => ["license-included", "bring-your-own-license", "general-public-license", "postgresql-license"] }, - "ingress_rules" => MU::Cloud::AWS::FirewallRule.ingressRuleAddtlSchema + "ingress_rules" => MU::Cloud.resourceClass("AWS", "FirewallRule").ingressRuleAddtlSchema } [toplevel_required, schema] end @@ -1020,7 +1020,6 @@ def add_basic def add_cluster_node cluster = MU::Config::Ref.get(@config["member_of_cluster"]).kitten(@deploy, debug: true) if cluster.nil? or cluster.cloud_id.nil? -puts @deploy.findLitterMate(type: "database", name: @config['member_of_cluster']['name']).class.name raise MuError.new "Failed to resolve parent cluster of #{@mu_name}", details: @config["member_of_cluster"].to_h end @@ -1380,7 +1379,7 @@ def self.terminate_rds_instance(db, noop: false, skipsnapshots: false, region: M return if db.nil? } - MU::Cloud::AWS::DNSZone.genericMuDNSEntry(name: cloud_id, target: (cluster ? db.endpoint : db.endpoint.address), cloudclass: MU::Cloud::Database, delete: true) if !noop + MU::Cloud.resourceClass("AWS", "DNSZone").genericMuDNSEntry(name: cloud_id, target: (cluster ? db.endpoint : db.endpoint.address), cloudclass: MU::Cloud::Database, delete: true) if !noop if %w{deleting deleted}.include?(cluster ? db.status : db.db_instance_status) MU.log "#{cloud_id} has already been terminated", MU::WARN @@ -1445,7 +1444,7 @@ def self.purge_rds_sgs(cloud_id, region, credentials, noop) secgroup = MU::Cloud::AWS.rds(region: region, credentials: credentials).describe_db_security_groups(db_security_group_name: cloud_id) rdssecgroups << cloud_id if !secgroup.nil? rescue Aws::RDS::Errors::DBSecurityGroupNotFound - MU.log "No such RDS security group #{sg} to purge", MU::DEBUG + MU.log "No such RDS security group #{cloud_id} to purge", MU::DEBUG end # RDS security groups can depend on EC2 security groups, do these last diff --git a/modules/mu/providers/aws/habitat.rb b/modules/mu/providers/aws/habitat.rb index 96bbbcc6b..9fc4cf867 100644 --- a/modules/mu/providers/aws/habitat.rb +++ b/modules/mu/providers/aws/habitat.rb @@ -144,7 +144,7 @@ def self.isLive?(_account_number, _credentials = nil) def self.orgMasterCreds?(credentials = nil) acct_num = MU::Cloud::AWS.iam(credentials: credentials).list_users.users.first.arn.split(/:/)[4] - parentorg = MU::Cloud::AWS::Folder.find(credentials: credentials).values.first + parentorg = MU::Cloud.resourceClass("AWS", "Folder").find(credentials: credentials).values.first acct_num == parentorg.master_account_id end diff --git a/modules/mu/providers/aws/role.rb b/modules/mu/providers/aws/role.rb index b87535f73..7bc70e23a 100644 --- a/modules/mu/providers/aws/role.rb +++ b/modules/mu/providers/aws/role.rb @@ -925,7 +925,7 @@ def self.schema(_config) toplevel_required = [] aws_resource_types = MU::Cloud.resource_types.keys.reject { |t| begin - MU::Cloud.loadCloudType("AWS", t) + MU::Cloud.resourceClass("AWS", t) false rescue MuCloudResourceNotImplemented true diff --git a/modules/mu/providers/aws/vpc.rb b/modules/mu/providers/aws/vpc.rb index d7646911d..5fc965eb4 100644 --- a/modules/mu/providers/aws/vpc.rb +++ b/modules/mu/providers/aws/vpc.rb @@ -1777,7 +1777,7 @@ def self.purge_vpcs(noop = false, tagfilters = [{name: "tag:MU-ID", values: [MU. } on_retry = Proc.new { - MU::Cloud::AWS::FirewallRule.cleanup( + MU::Cloud.resourceClass("AWS", "FirewallRule").cleanup( noop: noop, region: region, credentials: credentials, @@ -1794,7 +1794,7 @@ def self.purge_vpcs(noop = false, tagfilters = [{name: "tag:MU-ID", values: [MU. if !MU::Cloud::AWS.isGovCloud?(region) mu_zone = MU::Cloud::DNSZone.find(cloud_id: "platform-mu", region: region, credentials: credentials).values.first if !mu_zone.nil? - MU::Cloud::AWS::DNSZone.toggleVPCAccess(id: mu_zone.id, vpc_id: vpc.vpc_id, remove: true, credentials: credentials) + MU::Cloud.resourceClass("AWS", "DNSZone").toggleVPCAccess(id: mu_zone.id, vpc_id: vpc.vpc_id, remove: true, credentials: credentials) end end } From bac19c9216c3d8c31ab26936d210ef064d67bc2a Mon Sep 17 00:00:00 2001 From: John Stange Date: Tue, 7 Apr 2020 12:25:31 -0400 Subject: [PATCH 054/124] Config: detect trivial dependency loops; AWS::ContainerCluster: validate availability of EKS in a given region more efficiently --- modules/mu/config.rb | 83 +++++++++---------- modules/mu/deploy.rb | 3 +- modules/mu/providers/aws/container_cluster.rb | 13 +-- 3 files changed, 48 insertions(+), 51 deletions(-) diff --git a/modules/mu/config.rb b/modules/mu/config.rb index 45274e409..bcb087538 100644 --- a/modules/mu/config.rb +++ b/modules/mu/config.rb @@ -485,7 +485,8 @@ def removeKitten(name, type) # @param ignore_duplicates [Boolean]: Do not raise an exception if we attempt to insert a resource with a +name+ field that's already in use def insertKitten(descriptor, type, delay_validation = false, ignore_duplicates: false, overwrite: false) append = false -# start = Time.now + start = Time.now + shortclass, cfg_name, cfg_plural, classname = MU::Cloud.getResourceNames(type) MU.log "insertKitten on #{cfg_name} #{descriptor['name']} (delay_validation: #{delay_validation.to_s})", MU::DEBUG, details: caller[0] @@ -856,60 +857,56 @@ def insertKitten(descriptor, type, delay_validation = false, ignore_duplicates: @kittens[cfg_plural] << descriptor if append } + MU.log "insertKitten completed #{cfg_name} #{descriptor['name']} in #{sprintf("%.2fs", Time.now-start)}", MU::DEBUG + ok end # For our resources which specify intra-stack dependencies, make sure those # dependencies are actually declared. - # TODO check for loops - def self.check_dependencies(config) + def check_dependencies ok = true - config.each_pair { |type, values| - if values.instance_of?(Array) - values.each { |resource| - if resource.kind_of?(Hash) and !resource["dependencies"].nil? - append = [] - delete = [] - resource["dependencies"].each { |dependency| - _shortclass, cfg_name, cfg_plural, _classname = MU::Cloud.getResourceNames(dependency["type"]) - found = false - names_seen = [] - if !config[cfg_plural].nil? - config[cfg_plural].each { |service| - names_seen << service["name"].to_s - found = true if service["name"].to_s == dependency["name"].to_s - if service["virtual_name"] - names_seen << service["virtual_name"].to_s - if service["virtual_name"].to_s == dependency["name"].to_s - found = true - append_me = dependency.dup - append_me['name'] = service['name'] - append << append_me - delete << dependency - end - end - } - end - if !found - MU.log "Missing dependency: #{type}{#{resource['name']}} needs #{cfg_name}{#{dependency['name']}}", MU::ERR, details: names_seen + @config.each_pair { |type, values| + next if !values.instance_of?(Array) + _shortclass, cfg_name, _cfg_plural, _classname = MU::Cloud.getResourceNames(type, false) + values.each { |resource| + next if !resource.kind_of?(Hash) or resource["dependencies"].nil? + + resource["dependencies"].each { |dependency| + # make sure the thing we depend on really exists + sibling = haveLitterMate?(dependency['name'], dependency['type']) + if !sibling + MU.log "Missing dependency: #{type}{#{resource['name']}} needs #{cfg_name}{#{dependency['name']}}", MU::ERR + ok = false + next + end + + # Fudge dependency declarations to account for virtual_names + if sibling['virtual_name'] == dependency['name'] + dependency['name'] = sibling['name'] + end + + next if dependency['no_create_wait'] + + # Check for a circular relationship. This only goes one layer deep, + # but more is a lot to ask. + if sibling['dependencies'] + sibling['dependencies'].each { |sib_dep| + next if sib_dep['type'] != cfg_name or sib_dep['no_create_wait'] + cousin = haveLitterMate?(sib_dep['name'], sib_dep['type']) + if cousin and cousin['name'] == resource['name'] + MU.log "Circular dependency #{type} #{resource['name']} => #{dependency['name']} => #{sib_dep['name']}", MU::ERR ok = false end } - if append.size > 0 - append.uniq! - resource["dependencies"].concat(append) - end - if delete.size > 0 - delete.each { |delete_me| - resource["dependencies"].delete(delete_me) - } - end end } - end + + } } - return ok + + ok end # Ugly text-manipulation to recursively resolve some placeholder strings @@ -1213,7 +1210,7 @@ def validate(config = @config) types.each { |type| config[type] = @kittens[type] if @kittens[type].size > 0 } - ok = false if !MU::Config.check_dependencies(config) + ok = false if !check_dependencies # TODO enforce uniqueness of resource names raise ValidationError if !ok diff --git a/modules/mu/deploy.rb b/modules/mu/deploy.rb index 0bec21867..71f90157c 100644 --- a/modules/mu/deploy.rb +++ b/modules/mu/deploy.rb @@ -566,10 +566,9 @@ def setThreadDependencies(services) addDependentThread(parent, "#{name}_groom") # should our creation thread also wait on our parent's create? - if !resource["no_create_wait"] and + if !dependency["no_create_wait"] and (resource["#MU_CLOUDCLASS"].waits_on_parent_completion or dependency['phase'] == "create" or -# (parent_class.deps_wait_on_my_creation and parent_type != res_type)) parent_class.deps_wait_on_my_creation) addDependentThread(parent, "#{name}_create") end diff --git a/modules/mu/providers/aws/container_cluster.rb b/modules/mu/providers/aws/container_cluster.rb index 50743eb82..45ee6b40a 100644 --- a/modules/mu/providers/aws/container_cluster.rb +++ b/modules/mu/providers/aws/container_cluster.rb @@ -377,9 +377,10 @@ def self.getStandardImage(flavor = "ECS", region = MU.myRegion, version: nil, gp end # Return the list of regions where we know EKS is supported. - def self.EKSRegions(credentials = nil) + def self.EKSRegions(credentials = nil, region: nil) eks_regions = [] - MU::Cloud::AWS.listRegions(credentials: credentials).each { |r| + check_regions = region ? [region] : MU::Cloud::AWS.listRegions(credentials: credentials) + check_regions.each { |r| ami = getStandardImage("EKS", r) eks_regions << r if ami } @@ -416,7 +417,7 @@ def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credent end def self.purge_eks_clusters(noop: false, region: MU.curRegion, credentials: nil) - return if !MU::Cloud::AWS::ContainerCluster.EKSRegions.include?(region) + return if !MU::Cloud::AWS::ContainerCluster.EKSRegions(credentials, region: region).include?(region) resp = begin MU::Cloud::AWS.eks(credentials: credentials, region: region).list_clusters rescue Aws::EKS::Errors::AccessDeniedException @@ -1214,18 +1215,18 @@ def self.schema(_config) # @return [Boolean]: True if validation succeeded, False otherwise def self.validateConfig(cluster, configurator) ok = true - +start = Time.now cluster['size'] = MU::Cloud::AWS::Server.validateInstanceType(cluster["instance_type"], cluster["region"]) ok = false if cluster['size'].nil? cluster["flavor"] = "EKS" if cluster["flavor"].match(/^Kubernetes$/i) - if cluster["flavor"] == "ECS" and cluster["kubernetes"] and !MU::Cloud::AWS.isGovCloud?(cluster["region"]) and !cluster["containers"] and MU::Cloud::AWS::ContainerCluster.EKSRegions.include?(cluster['region']) + if cluster["flavor"] == "ECS" and cluster["kubernetes"] and !MU::Cloud::AWS.isGovCloud?(cluster["region"]) and !cluster["containers"] and MU::Cloud::AWS::ContainerCluster.EKSRegions(cluster['credentials'], region: cluster['region']).include?(cluster['region']) cluster["flavor"] = "EKS" MU.log "Setting flavor of ContainerCluster '#{cluster['name']}' to EKS ('kubernetes' stanza was specified)", MU::NOTICE end - if cluster["flavor"] == "EKS" and !MU::Cloud::AWS::ContainerCluster.EKSRegions.include?(cluster['region']) + if cluster["flavor"] == "EKS" and !MU::Cloud::AWS::ContainerCluster.EKSRegions(cluster['credentials'], region: cluster['region']).include?(cluster['region']) MU.log "EKS is only available in some regions", MU::ERR, details: MU::Cloud::AWS::ContainerCluster.EKSRegions ok = false end From 2a9ddb1ea7b14841c0875f6e2560e19d4d5b9e28 Mon Sep 17 00:00:00 2001 From: John Stange Date: Tue, 7 Apr 2020 13:33:39 -0400 Subject: [PATCH 055/124] AWS: convert all hardcoded AWS resource class cross-references to use MU::Cloud.resourceClass --- modules/mu/providers/aws/bucket.rb | 4 ++-- modules/mu/providers/aws/cache_cluster.rb | 14 +++++------ modules/mu/providers/aws/collection.rb | 6 ++--- modules/mu/providers/aws/container_cluster.rb | 4 ++-- modules/mu/providers/aws/database.rb | 6 ++--- modules/mu/providers/aws/firewall_rule.rb | 4 ++-- modules/mu/providers/aws/group.rb | 16 ++++++------- modules/mu/providers/aws/loadbalancer.rb | 8 +++---- modules/mu/providers/aws/log.rb | 4 ++-- modules/mu/providers/aws/search_domain.rb | 10 ++++---- modules/mu/providers/aws/server.rb | 24 +++++++++---------- modules/mu/providers/aws/server_pool.rb | 16 ++++++------- modules/mu/providers/aws/user.rb | 14 +++++------ modules/mu/providers/aws/vpc.rb | 4 ++-- 14 files changed, 67 insertions(+), 67 deletions(-) diff --git a/modules/mu/providers/aws/bucket.rb b/modules/mu/providers/aws/bucket.rb index 8a1042372..d0affb698 100644 --- a/modules/mu/providers/aws/bucket.rb +++ b/modules/mu/providers/aws/bucket.rb @@ -97,7 +97,7 @@ def groom ] } - policy_docs = MU::Cloud::AWS::Role.genPolicyDocument(@config['policies'], deploy_obj: @deploy, bucket_style: true) + policy_docs = MU::Cloud.resourceClass("AWS", "Role").genPolicyDocument(@config['policies'], deploy_obj: @deploy, bucket_style: true) policy_docs.each { |doc| MU.log "Applying S3 bucket policy #{doc.keys.first} to bucket #{@cloud_id}", MU::NOTICE, details: JSON.pretty_generate(doc.values.first) MU::Cloud::AWS.s3(credentials: @config['credentials'], region: @config['region']).put_bucket_policy( @@ -309,7 +309,7 @@ def self.find(**args) def self.schema(_config) toplevel_required = [] schema = { - "policies" => MU::Cloud::AWS::Role.condition_schema, + "policies" => MU::Cloud.resourceClass("AWS", "Role").condition_schema, "acl" => { "type" => "string", "enum" => ["private", "public-read", "public-read-write", "authenticated-read"], diff --git a/modules/mu/providers/aws/cache_cluster.rb b/modules/mu/providers/aws/cache_cluster.rb index dedc3ad75..5c35f5118 100644 --- a/modules/mu/providers/aws/cache_cluster.rb +++ b/modules/mu/providers/aws/cache_cluster.rb @@ -199,7 +199,7 @@ def create addStandardTags(member, "cluster", region: @config['region']) } - MU::Cloud::AWS::DNSZone.genericMuDNSEntry( + MU::Cloud.resourceClass("AWS", "DNSZone").genericMuDNSEntry( name: resp.replication_group_id, target: "#{resp.node_groups.first.primary_endpoint.address}.", cloudclass: MU::Cloud::CacheCluster, @@ -207,7 +207,7 @@ def create ) resp.node_groups.first.node_group_members.each { |member| - MU::Cloud::AWS::DNSZone.genericMuDNSEntry( + MU::Cloud.resourceClass("AWS", "DNSZone").genericMuDNSEntry( name: member.cache_cluster_id, target: "#{member.read_endpoint.address}.", cloudclass: MU::Cloud::CacheCluster, @@ -413,7 +413,7 @@ def notify } end # XXX this should be a call to @deploy.nameKitten - MU::Cloud::AWS::DNSZone.createRecordsFromConfig(@config['dns_records'], target: repl_group.node_groups.first.primary_endpoint.address) + MU::Cloud.resourceClass("AWS", "DNSZone").createRecordsFromConfig(@config['dns_records'], target: repl_group.node_groups.first.primary_endpoint.address) deploy_struct = { "identifier" => repl_group.replication_group_id, @@ -686,7 +686,7 @@ def self.schema(_config) "type" => "boolean", "description" => "Create a replication group; will be set automatically if +engine+ is +redis+ and +node_count+ is greated than one." }, - "ingress_rules" => MU::Cloud::AWS::FirewallRule.ingressRuleAddtlSchema + "ingress_rules" => MU::Cloud.resourceClass("AWS", "FirewallRule").ingressRuleAddtlSchema } [toplevel_required, schema] end @@ -787,7 +787,7 @@ def self.terminate_cache_cluster(cluster, noop: false, skipsnapshots: false, reg end # The API is broken, cluster.cache_nodes is returnning an empty array, and the only URL we can get is the config one with cluster.configuration_endpoint.address. - # MU::Cloud::AWS::DNSZone.genericMuDNSEntry(name: cluster_id, target: , cloudclass: MU::Cloud::CacheCluster, delete: true) + # MU::Cloud.resourceClass("AWS", "DNSZone").genericMuDNSEntry(name: cluster_id, target: , cloudclass: MU::Cloud::CacheCluster, delete: true) if %w{deleting deleted}.include?(cluster.cache_cluster_status) MU.log "#{cluster_id} has already been terminated", MU::WARN @@ -889,10 +889,10 @@ def self.terminate_replication_group(repl_group, noop: false, skipsnapshots: fal end # What's the likelihood of having more than one node group? maybe iterate over node_groups instead of assuming there is only one? - MU::Cloud::AWS::DNSZone.genericMuDNSEntry(name: repl_group_id, target: repl_group.node_groups.first.primary_endpoint.address, cloudclass: MU::Cloud::CacheCluster, delete: true) + MU::Cloud.resourceClass("AWS", "DNSZone").genericMuDNSEntry(name: repl_group_id, target: repl_group.node_groups.first.primary_endpoint.address, cloudclass: MU::Cloud::CacheCluster, delete: true) # Assuming we also created DNS records for each of our cluster's read endpoint. repl_group.node_groups.first.node_group_members.each { |member| - MU::Cloud::AWS::DNSZone.genericMuDNSEntry(name: member.cache_cluster_id, target: member.read_endpoint.address, cloudclass: MU::Cloud::CacheCluster, delete: true) + MU::Cloud.resourceClass("AWS", "DNSZone").genericMuDNSEntry(name: member.cache_cluster_id, target: member.read_endpoint.address, cloudclass: MU::Cloud::CacheCluster, delete: true) } if %w{deleting deleted}.include?(repl_group.status) diff --git a/modules/mu/providers/aws/collection.rb b/modules/mu/providers/aws/collection.rb index f7b8198ee..96b6014c8 100644 --- a/modules/mu/providers/aws/collection.rb +++ b/modules/mu/providers/aws/collection.rb @@ -152,7 +152,7 @@ def create instance_name = MU.deploy_id+"-"+@config['name']+"-"+resource.logical_resource_id MU::Cloud::AWS.createTag(resource.physical_resource_id, "Name", instance_name, credentials: @config['credentials']) - instance = MU::Cloud::AWS::Server.notifyDeploy( + instance = MU::Cloud.resourceClass("AWS", "Server").notifyDeploy( @config['name']+"-"+resource.logical_resource_id, resource.physical_resource_id ) @@ -170,7 +170,7 @@ def create mu_zone, _junk = MU::Cloud::DNSZone.find(name: "mu") if !mu_zone.nil? - MU::Cloud::AWS::DNSZone.genericMuDNSEntry(instance_name, instance["private_ip_address"], MU::Cloud::Server) + MU::Cloud.resourceClass("AWS", "DNSZone").genericMuDNSEntry(instance_name, instance["private_ip_address"], MU::Cloud::Server) else MU::Master.addInstanceToEtcHosts(instance["public_ip_address"], instance_name) end @@ -178,7 +178,7 @@ def create when "AWS::EC2::SecurityGroup" MU::Cloud::AWS.createStandardTags(resource.physical_resource_id) MU::Cloud::AWS.createTag(resource.physical_resource_id, "Name", MU.deploy_id+"-"+@config['name']+'-'+resource.logical_resource_id, credentials: @config['credentials']) - MU::Cloud::AWS::FirewallRule.notifyDeploy( + MU::Cloud.resourceClass("AWS", "FirewallRule").notifyDeploy( @config['name']+"-"+resource.logical_resource_id, resource.physical_resource_id ) diff --git a/modules/mu/providers/aws/container_cluster.rb b/modules/mu/providers/aws/container_cluster.rb index 45ee6b40a..0052503a0 100644 --- a/modules/mu/providers/aws/container_cluster.rb +++ b/modules/mu/providers/aws/container_cluster.rb @@ -467,7 +467,7 @@ def self.purge_eks_clusters(noop: false, region: MU.curRegion, credentials: nil) MU.log "Waiting for EKS cluster #{cluster} to finish deleting (status #{status})", MU::NOTICE end } -# MU::Cloud::AWS::Server.removeIAMProfile(cluster) +# MU::Cloud.resourceClass("AWS", "Server").removeIAMProfile(cluster) end } end @@ -1216,7 +1216,7 @@ def self.schema(_config) def self.validateConfig(cluster, configurator) ok = true start = Time.now - cluster['size'] = MU::Cloud::AWS::Server.validateInstanceType(cluster["instance_type"], cluster["region"]) + cluster['size'] = MU::Cloud.resourceClass("AWS", "Server").validateInstanceType(cluster["instance_type"], cluster["region"]) ok = false if cluster['size'].nil? cluster["flavor"] = "EKS" if cluster["flavor"].match(/^Kubernetes$/i) diff --git a/modules/mu/providers/aws/database.rb b/modules/mu/providers/aws/database.rb index b1b265511..caa22be20 100644 --- a/modules/mu/providers/aws/database.rb +++ b/modules/mu/providers/aws/database.rb @@ -886,7 +886,7 @@ def self.validate_network_cfg(db) ok = true if !db['vpc'] - db["vpc"] = MU::Cloud::AWS::VPC.defaultVpc(db['region'], db['credentials']) + db["vpc"] = MU::Cloud.resourceClass("AWS", "VPC").defaultVpc(db['region'], db['credentials']) if db['vpc'] and !(db['engine'].match(/sqlserver/) and db['create_read_replica']) MU.log "Using default VPC for database '#{db['name']}; this sets 'publicly_accessible' to true.", MU::WARN db['publicly_accessible'] = true @@ -1161,10 +1161,10 @@ def wait_until_available def do_naming if @config["create_cluster"] - MU::Cloud::AWS::DNSZone.genericMuDNSEntry(name: cloud_desc.db_cluster_identifier, target: "#{cloud_desc.endpoint}.", cloudclass: MU::Cloud::Database, sync_wait: @config['dns_sync_wait']) + MU::Cloud.resourceClass("AWS", "DNSZone").genericMuDNSEntry(name: cloud_desc.db_cluster_identifier, target: "#{cloud_desc.endpoint}.", cloudclass: MU::Cloud::Database, sync_wait: @config['dns_sync_wait']) MU.log "Database cluster #{@config['name']} is at #{cloud_desc.endpoint}", MU::SUMMARY else - MU::Cloud::AWS::DNSZone.genericMuDNSEntry(name: cloud_desc.db_instance_identifier, target: "#{cloud_desc.endpoint.address}.", cloudclass: MU::Cloud::Database, sync_wait: @config['dns_sync_wait']) + MU::Cloud.resourceClass("AWS", "DNSZone").genericMuDNSEntry(name: cloud_desc.db_instance_identifier, target: "#{cloud_desc.endpoint.address}.", cloudclass: MU::Cloud::Database, sync_wait: @config['dns_sync_wait']) MU.log "Database #{@config['name']} is at #{cloud_desc.endpoint.address}", MU::SUMMARY end if @config['auth_vault'] diff --git a/modules/mu/providers/aws/firewall_rule.rb b/modules/mu/providers/aws/firewall_rule.rb index 6d9806038..89a6f2d4a 100644 --- a/modules/mu/providers/aws/firewall_rule.rb +++ b/modules/mu/providers/aws/firewall_rule.rb @@ -398,7 +398,7 @@ def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credent # Some services create sneaky rogue ENIs which then block removal of # associated security groups. Find them and fry them. - MU::Cloud::AWS::VPC.purge_interfaces(noop, filters, region: region, credentials: credentials) + MU::Cloud.resourceClass("AWS", "VPC").purge_interfaces(noop, filters, region: region, credentials: credentials) resp = MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_security_groups( filters: filters @@ -421,7 +421,7 @@ def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credent # try to get out from under loose network interfaces with which # we're associated if sg.vpc_id - default_sg = MU::Cloud::AWS::VPC.getDefaultSg(sg.vpc_id, region: region, credentials: credentials) + default_sg = MU::Cloud.resourceClass("AWS", "VPC").getDefaultSg(sg.vpc_id, region: region, credentials: credentials) if default_sg eni_resp = MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_network_interfaces( filters: [ {name: "group-id", values: [sg.group_id]} ] diff --git a/modules/mu/providers/aws/group.rb b/modules/mu/providers/aws/group.rb index 71273c23e..1e9be64b2 100644 --- a/modules/mu/providers/aws/group.rb +++ b/modules/mu/providers/aws/group.rb @@ -60,7 +60,7 @@ def groom userid = user userdesc = @deploy.findLitterMate(name: user, type: "users") userid = userdesc.cloud_id if userdesc - found = MU::Cloud::AWS::User.find(cloud_id: userid) + found = MU::Cloud.resourceClass("AWS", "User").find(cloud_id: userid) if found.size == 1 userdesc = found.values.first MU.log "Adding IAM user #{userdesc.path}#{userdesc.user_name} to group #{@mu_name}", MU::NOTICE @@ -88,7 +88,7 @@ def groom # Create these if necessary, then append them to the list of # attachable_policies if @config['raw_policies'] - pol_arns = MU::Cloud::AWS::Role.manageRawPolicies( + pol_arns = MU::Cloud.resourceClass("AWS", "Role").manageRawPolicies( @config['raw_policies'], basename: @deploy.getResourceName(@config['name']), credentials: @credentials @@ -114,7 +114,7 @@ def groom attached_policies.each { |a| if !configured_policies.include?(a.policy_arn) MU.log "Removing IAM policy #{a.policy_arn} from group #{@mu_name}", MU::NOTICE - MU::Cloud::AWS::Role.purgePolicy(a.policy_arn, @credentials) + MU::Cloud.resourceClass("AWS", "Role").purgePolicy(a.policy_arn, @credentials) else configured_policies.delete(a.policy_arn) end @@ -131,7 +131,7 @@ def groom end if @config['inline_policies'] - docs = MU::Cloud::AWS::Role.genPolicyDocument(@config['inline_policies'], deploy_obj: @deploy) + docs = MU::Cloud.resourceClass("AWS", "Role").genPolicyDocument(@config['inline_policies'], deploy_obj: @deploy) docs.each { |doc| MU.log "Putting user policy #{doc.keys.first} to group #{@cloud_id} " MU::Cloud::AWS.iam(credentials: @credentials).put_group_policy( @@ -291,7 +291,7 @@ def toKitten(**_args) resp.policy_names.each { |pol_name| pol = MU::Cloud::AWS.iam(credentials: @credentials).get_group_policy(group_name: @cloud_id, policy_name: pol_name) doc = JSON.parse(URI.decode(pol.policy_document)) - bok["inline_policies"] = MU::Cloud::AWS::Role.doc2MuPolicies(pol.policy_name, doc, bok["inline_policies"]) + bok["inline_policies"] = MU::Cloud.resourceClass("AWS", "Role").doc2MuPolicies(pol.policy_name, doc, bok["inline_policies"]) } end @@ -324,7 +324,7 @@ def toKitten(**_args) def self.schema(_config) toplevel_required = [] polschema = MU::Config::Role.schema["properties"]["policies"] - polschema.deep_merge!(MU::Cloud::AWS::Role.condition_schema) + polschema.deep_merge!(MU::Cloud.resourceClass("AWS", "Role").condition_schema) schema = { "inline_policies" => polschema, @@ -364,7 +364,7 @@ def self.validateConfig(group, configurator) # If we're attaching some managed policies, make sure all of the ones # that should already exist do indeed exist if group['attachable_policies'] - ok = false if !MU::Cloud::AWS::Role.validateAttachablePolicies( + ok = false if !MU::Cloud.resourceClass("AWS", "Role").validateAttachablePolicies( group['attachable_policies'], credentials: group['credentials'], region: group['region'] @@ -384,7 +384,7 @@ def self.validateConfig(group, configurator) "name" => user } else - found = MU::Cloud::AWS::User.find(cloud_id: user) + found = MU::Cloud.resourceClass("AWS", "User").find(cloud_id: user) if found.nil? or found.empty? MU.log "Error in members for group #{group['name']}: No such user #{user}", MU::ERR ok = false diff --git a/modules/mu/providers/aws/loadbalancer.rb b/modules/mu/providers/aws/loadbalancer.rb index f184dc473..494cb91c5 100644 --- a/modules/mu/providers/aws/loadbalancer.rb +++ b/modules/mu/providers/aws/loadbalancer.rb @@ -163,7 +163,7 @@ def create dnsthread = Thread.new { if !MU::Cloud::AWS.isGovCloud? MU.dupGlobals(parent_thread_id) - generic_mu_dns = MU::Cloud::AWS::DNSZone.genericMuDNSEntry(name: @mu_name, target: "#{lb.dns_name}.", cloudclass: MU::Cloud::LoadBalancer, sync_wait: @config['dns_sync_wait']) + generic_mu_dns = MU::Cloud.resourceClass("AWS", "DNSZone").genericMuDNSEntry(name: @mu_name, target: "#{lb.dns_name}.", cloudclass: MU::Cloud::LoadBalancer, sync_wait: @config['dns_sync_wait']) end } @@ -536,7 +536,7 @@ def create } end if !MU::Cloud::AWS.isGovCloud? - MU::Cloud::AWS::DNSZone.createRecordsFromConfig(@config['dns_records'], target: cloud_desc.dns_name) + MU::Cloud.resourceClass("AWS", "DNSZone").createRecordsFromConfig(@config['dns_records'], target: cloud_desc.dns_name) end end @@ -706,7 +706,7 @@ def self.checkForTagMatch(arn, region, ignoremaster, credentials, classic = fals end if matched if !MU::Cloud::AWS.isGovCloud? - MU::Cloud::AWS::DNSZone.genericMuDNSEntry(name: lb.load_balancer_name, target: lb.dns_name, cloudclass: MU::Cloud::LoadBalancer, delete: true) if !noop + MU::Cloud.resourceClass("AWS", "DNSZone").genericMuDNSEntry(name: lb.load_balancer_name, target: lb.dns_name, cloudclass: MU::Cloud::LoadBalancer, delete: true) if !noop end if classic MU.log "Removing Elastic Load Balancer #{lb.load_balancer_name}" @@ -793,7 +793,7 @@ def self.schema(_config) } } }, - "ingress_rules" => MU::Cloud::AWS::FirewallRule.ingressRuleAddtlSchema + "ingress_rules" => MU::Cloud.resourceClass("AWS", "FirewallRule").ingressRuleAddtlSchema } [toplevel_required, schema] end diff --git a/modules/mu/providers/aws/log.rb b/modules/mu/providers/aws/log.rb index d9b28df0d..7556f1292 100644 --- a/modules/mu/providers/aws/log.rb +++ b/modules/mu/providers/aws/log.rb @@ -233,8 +233,8 @@ def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credent # unless noop # MU::Cloud::AWS.iam(credentials: credentials).list_roles.roles.each{ |role| # match_string = "#{MU.deploy_id}.*CloudTrail" - # Maybe we should have a more generic way to delete IAM profiles and policies. The call itself should be moved from MU::Cloud::AWS::Server. -# MU::Cloud::AWS::Server.removeIAMProfile(role.role_name) if role.role_name.match(match_string) + # Maybe we should have a more generic way to delete IAM profiles and policies. The call itself should be moved from MU::Cloud.resourceClass("AWS", "Server"). +# MU::Cloud.resourceClass("AWS", "Server").removeIAMProfile(role.role_name) if role.role_name.match(match_string) # } # end end diff --git a/modules/mu/providers/aws/search_domain.rb b/modules/mu/providers/aws/search_domain.rb index 9658e3c36..e817c9725 100644 --- a/modules/mu/providers/aws/search_domain.rb +++ b/modules/mu/providers/aws/search_domain.rb @@ -156,8 +156,8 @@ def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credent begin resp = MU::Cloud::AWS.iam(credentials: credentials).list_roles(marker: marker) resp.roles.each{ |role| - # XXX Maybe we should have a more generic way to delete IAM profiles and policies. The call itself should be moved from MU::Cloud::AWS::Server. -# MU::Cloud::AWS::Server.removeIAMProfile(role.role_name) if role.role_name.match(/^#{Regexp.quote(MU.deploy_id)}/) + # XXX Maybe we should have a more generic way to delete IAM profiles and policies. The call itself should be moved from MU::Cloud.resourceClass("AWS", "Server"). +# MU::Cloud.resourceClass("AWS", "Server").removeIAMProfile(role.role_name) if role.role_name.match(/^#{Regexp.quote(MU.deploy_id)}/) } marker = resp.marker end while resp.is_truncated @@ -380,7 +380,7 @@ def self.validateConfig(dom, configurator) if configurator.haveLitterMate?(dom['slow_logs'], "log") dom['dependencies'] << { "name" => dom['slow_logs'], "type" => "log" } else - log_group = MU::Cloud::AWS::Log.find(cloud_id: dom['slow_logs'], region: dom['region']).values.first + log_group = MU::Cloud.resourceClass("AWS", "Log").find(cloud_id: dom['slow_logs'], region: dom['region']).values.first if !log_group MU.log "Specified slow_logs CloudWatch log group '#{dom['slow_logs']}' in SearchDomain '#{dom['name']}' doesn't appear to exist", MU::ERR ok = false @@ -525,7 +525,7 @@ def genParams(ext = nil) arn = @config['slow_logs'] else log_group = @deploy.findLitterMate(type: "log", name: @config['slow_logs']) - log_group = MU::Cloud::AWS::Log.find(cloud_id: log_group.mu_name, region: log_group.cloudobj.config['region']).values.first + log_group = MU::Cloud.resourceClass("AWS", "Log").find(cloud_id: log_group.mu_name, region: log_group.cloudobj.config['region']).values.first if log_group.nil? or log_group.arn.nil? raise MuError, "Failed to retrieve ARN of sibling LogGroup '#{@config['slow_logs']}'" end @@ -552,7 +552,7 @@ def genParams(ext = nil) params[:log_publishing_options]["SEARCH_SLOW_LOGS"] = {} params[:log_publishing_options]["SEARCH_SLOW_LOGS"][:enabled] = true params[:log_publishing_options]["SEARCH_SLOW_LOGS"][:cloud_watch_logs_log_group_arn] = arn - MU::Cloud::AWS::Log.allowService("es.amazonaws.com", arn, @config['region']) + MU::Cloud.resourceClass("AWS", "Log").allowService("es.amazonaws.com", arn, @config['region']) end end diff --git a/modules/mu/providers/aws/server.rb b/modules/mu/providers/aws/server.rb index cc3b6d8cf..58b8cf91d 100644 --- a/modules/mu/providers/aws/server.rb +++ b/modules/mu/providers/aws/server.rb @@ -405,7 +405,7 @@ def getSSHConfig return nil if @config.nil? or @deploy.nil? nat_ssh_key = nat_ssh_user = nat_ssh_host = nil - if !@config["vpc"].nil? and !MU::Cloud::AWS::VPC.haveRouteToInstance?(cloud_desc, region: @config['region'], credentials: @config['credentials']) + if !@config["vpc"].nil? and !MU::Cloud.resourceClass("AWS", "VPC").haveRouteToInstance?(cloud_desc, region: @config['region'], credentials: @config['credentials']) if !@nat.nil? if @nat.is_a?(Struct) && @nat.nat_gateway_id && @nat.nat_gateway_id.start_with?("nat-") raise MuError, "Configured to use NAT Gateway, but I have no route to instance. Either use Bastion, or configure VPC peering" @@ -615,7 +615,7 @@ def toKitten(**_args) return nil end - asgs = MU::Cloud::AWS::ServerPool.find( + asgs = MU::Cloud.resourceClass("AWS", "ServerPool").find( instance_id: @cloud_id, region: @config['region'], credentials: @credentials @@ -726,14 +726,14 @@ def toKitten(**_args) if int.groups.size > 0 require 'mu/providers/aws/firewall_rule' - ifaces = MU::Cloud::AWS::FirewallRule.getAssociatedInterfaces(int.groups.map { |sg| sg.group_id }, credentials: @credentials, region: @config['region']) + ifaces = MU::Cloud.resourceClass("AWS", "FirewallRule").getAssociatedInterfaces(int.groups.map { |sg| sg.group_id }, credentials: @credentials, region: @config['region']) done_local_rules = false int.groups.each { |sg| if !done_local_rules and ifaces[sg.group_id].size == 1 - sg_desc = MU::Cloud::AWS::FirewallRule.find(cloud_id: sg.group_id, credentials: @credentials, region: @config['region']).values.first + sg_desc = MU::Cloud.resourceClass("AWS", "FirewallRule").find(cloud_id: sg.group_id, credentials: @credentials, region: @config['region']).values.first if sg_desc - bok["ingress_rules"] = MU::Cloud::AWS::FirewallRule.rulesToBoK(sg_desc.ip_permissions) - bok["ingress_rules"].concat(MU::Cloud::AWS::FirewallRule.rulesToBoK(sg_desc.ip_permissions_egress, egress: true)) + bok["ingress_rules"] = MU::Cloud.resourceClass("AWS", "FirewallRule").rulesToBoK(sg_desc.ip_permissions) + bok["ingress_rules"].concat(MU::Cloud.resourceClass("AWS", "FirewallRule").rulesToBoK(sg_desc.ip_permissions_egress, egress: true)) done_local_rules = true next end @@ -943,7 +943,7 @@ def canonicalIP # Our deploydata gets corrupted often with server pools, this will cause us to use the wrong IP to identify a node # which will cause us to create certificates, DNS records and other artifacts with incorrect information which will cause our deploy to fail. # The cloud_id is always correct so lets use 'cloud_desc' to get the correct IPs - if MU::Cloud::AWS::VPC.haveRouteToInstance?(cloud_desc, region: @config['region'], credentials: @config['credentials']) or @deploydata["public_ip_address"].nil? + if MU::Cloud.resourceClass("AWS", "VPC").haveRouteToInstance?(cloud_desc, region: @config['region'], credentials: @config['credentials']) or @deploydata["public_ip_address"].nil? @config['canonical_ip'] = cloud_desc.private_ip_address @deploydata["private_ip_address"] = cloud_desc.private_ip_address return cloud_desc.private_ip_address @@ -1672,7 +1672,7 @@ def self.schema(_config) "type" => "object" } }, - "ingress_rules" => MU::Cloud::AWS::FirewallRule.ingressRuleAddtlSchema, + "ingress_rules" => MU::Cloud.resourceClass("AWS", "FirewallRule").ingressRuleAddtlSchema, "ssh_user" => { "type" => "string", "default" => "root", @@ -2114,7 +2114,7 @@ def configureNetworking subnet = @vpc.getSubnet(cloud_id: cloud_desc.subnet_id) _nat_ssh_key, _nat_ssh_user, nat_ssh_host, _canonical_ip, _ssh_user, _ssh_key_name = getSSHConfig - if subnet.private? and !nat_ssh_host and !MU::Cloud::AWS::VPC.haveRouteToInstance?(cloud_desc, region: @config['region'], credentials: @config['credentials']) + if subnet.private? and !nat_ssh_host and !MU::Cloud.resourceClass("AWS", "VPC").haveRouteToInstance?(cloud_desc, region: @config['region'], credentials: @config['credentials']) raise MuError, "#{@mu_name} is in a private subnet (#{subnet}), but has no bastion host configured, and I have no other route to it" end @@ -2206,15 +2206,15 @@ def setAlarms alarm["dimensions"] = [{:name => "InstanceId", :value => @cloud_id}] if alarm["enable_notifications"] - topic_arn = MU::Cloud::AWS::Notification.createTopic(alarm["notification_group"], region: @config["region"], credentials: @config['credentials']) - MU::Cloud::AWS::Notification.subscribe(arn: topic_arn, protocol: alarm["notification_type"], endpoint: alarm["notification_endpoint"], region: @config["region"], credentials: @config["credentials"]) + topic_arn = MU::Cloud.resourceClass("AWS", "Notification").createTopic(alarm["notification_group"], region: @config["region"], credentials: @config['credentials']) + MU::Cloud.resourceClass("AWS", "Notification").subscribe(arn: topic_arn, protocol: alarm["notification_type"], endpoint: alarm["notification_endpoint"], region: @config["region"], credentials: @config["credentials"]) alarm["alarm_actions"] = [topic_arn] alarm["ok_actions"] = [topic_arn] end alarm_name = alarm_obj ? alarm_obj.cloud_id : "#{@mu_name}-#{alarm['name']}".upcase - MU::Cloud::AWS::Alarm.setAlarm( + MU::Cloud.resourceClass("AWS", "Alarm").setAlarm( name: alarm_name, ok_actions: alarm["ok_actions"], alarm_actions: alarm["alarm_actions"], diff --git a/modules/mu/providers/aws/server_pool.rb b/modules/mu/providers/aws/server_pool.rb index 718fa6e6b..cd60754ce 100644 --- a/modules/mu/providers/aws/server_pool.rb +++ b/modules/mu/providers/aws/server_pool.rb @@ -120,7 +120,7 @@ def create if !@deploy.nocleanup Thread.new { MU.dupGlobals(parent_thread_id) - MU::Cloud::AWS::Server.terminateInstance(id: member.instance_id) + MU::Cloud.resourceClass("AWS", "Server").terminateInstance(id: member.instance_id) } end end @@ -813,7 +813,7 @@ def self.schema(_config) } } }, - "ingress_rules" => MU::Cloud::AWS::FirewallRule.ingressRuleAddtlSchema + "ingress_rules" => MU::Cloud.resourceClass("AWS", "FirewallRule").ingressRuleAddtlSchema } [toplevel_required, schema] end @@ -886,7 +886,7 @@ def self.validateConfig(pool, configurator) launch = pool["basis"]["launch_config"] launch['iam_policies'] ||= pool['iam_policies'] - launch['size'] = MU::Cloud::AWS::Server.validateInstanceType(launch["size"], pool["region"]) + launch['size'] = MU::Cloud.resourceClass("AWS", "Server").validateInstanceType(launch["size"], pool["region"]) ok = false if launch['size'].nil? if !launch['generate_iam_role'] if !launch['iam_role'] and pool['cloud'] != "CloudFormation" @@ -1104,7 +1104,7 @@ def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credent end end -# MU::Cloud::AWS::Server.removeIAMProfile(resource_id) +# MU::Cloud.resourceClass("AWS", "Server").removeIAMProfile(resource_id) # Generally there should be a launch_configuration of the same name # XXX search for these independently, too? @@ -1145,14 +1145,14 @@ def createUpdateLaunchConfig @config['basis']['launch_config']["ami_id"] = @deploy.deployment["images"][@config['basis']['launch_config']["server"]]["image_id"] MU.log "Using AMI '#{@config['basis']['launch_config']["ami_id"]}' from sibling server #{@config['basis']['launch_config']["server"]} in ServerPool #{@mu_name}" elsif !@config['basis']['launch_config']["instance_id"].nil? - @config['basis']['launch_config']["ami_id"] = MU::Cloud::AWS::Server.createImage( + @config['basis']['launch_config']["ami_id"] = MU::Cloud.resourceClass("AWS", "Server").createImage( name: @mu_name, instance_id: @config['basis']['launch_config']["instance_id"], credentials: @config['credentials'], region: @config['region'] )[@config['region']] end - MU::Cloud::AWS::Server.waitForAMI(@config['basis']['launch_config']["ami_id"], credentials: @config['credentials']) + MU::Cloud.resourceClass("AWS", "Server").waitForAMI(@config['basis']['launch_config']["ami_id"], credentials: @config['credentials']) oldlaunch = MU::Cloud::AWS.autoscale(region: @config['region'], credentials: @config['credentials']).describe_launch_configurations( launch_configuration_names: [@mu_name] @@ -1207,12 +1207,12 @@ def createUpdateLaunchConfig vol.delete("encrypted") end end - mapping, _cfm_mapping = MU::Cloud::AWS::Server.convertBlockDeviceMapping(vol) + mapping, _cfm_mapping = MU::Cloud.resourceClass("AWS", "Server").convertBlockDeviceMapping(vol) storage << mapping } end - storage.concat(MU::Cloud::AWS::Server.ephemeral_mappings) + storage.concat(MU::Cloud.resourceClass("AWS", "Server").ephemeral_mappings) if @config['basis']['launch_config']['generate_iam_role'] role = @deploy.findLitterMate(name: @config['name'], type: "roles") diff --git a/modules/mu/providers/aws/user.rb b/modules/mu/providers/aws/user.rb index 364ae2b8c..d6bdc4cad 100644 --- a/modules/mu/providers/aws/user.rb +++ b/modules/mu/providers/aws/user.rb @@ -109,7 +109,7 @@ def groom # Create these if necessary, then append them to the list of # attachable_policies if @config['raw_policies'] - pol_arns = MU::Cloud::AWS::Role.manageRawPolicies( + pol_arns = MU::Cloud.resourceClass("AWS", "Role").manageRawPolicies( @config['raw_policies'], basename: @deploy.getResourceName(@config['name']), credentials: @credentials @@ -135,7 +135,7 @@ def groom attached_policies.each { |a| if !configured_policies.include?(a.policy_arn) MU.log "Removing IAM policy #{a.policy_arn} from user #{@mu_name}", MU::NOTICE - MU::Cloud::AWS::Role.purgePolicy(a.policy_arn, @credentials) + MU::Cloud.resourceClass("AWS", "Role").purgePolicy(a.policy_arn, @credentials) else configured_policies.delete(a.policy_arn) end @@ -151,7 +151,7 @@ def groom end if @config['inline_policies'] - docs = MU::Cloud::AWS::Role.genPolicyDocument(@config['inline_policies'], deploy_obj: @deploy) + docs = MU::Cloud.resourceClass("AWS", "Role").genPolicyDocument(@config['inline_policies'], deploy_obj: @deploy) docs.each { |doc| MU.log "Putting user policy #{doc.keys.first} to user #{@cloud_id} " MU::Cloud::AWS.iam(credentials: @credentials).put_user_policy( @@ -431,7 +431,7 @@ def toKitten(**_args) resp.policy_names.each { |pol_name| pol = MU::Cloud::AWS.iam(credentials: @credentials).get_user_policy(user_name: @cloud_id, policy_name: pol_name) doc = JSON.parse(URI.decode(pol.policy_document)) - bok["inline_policies"] = MU::Cloud::AWS::Role.doc2MuPolicies(pol.policy_name, doc, bok["inline_policies"]) + bok["inline_policies"] = MU::Cloud.resourceClass("AWS", "Role").doc2MuPolicies(pol.policy_name, doc, bok["inline_policies"]) } end @@ -465,7 +465,7 @@ def toKitten(**_args) def self.schema(_config) toplevel_required = [] polschema = MU::Config::Role.schema["properties"]["policies"] - polschema.deep_merge!(MU::Cloud::AWS::Role.condition_schema) + polschema.deep_merge!(MU::Cloud.resourceClass("AWS", "Role").condition_schema) schema = { "inline_policies" => polschema, @@ -517,7 +517,7 @@ def self.validateConfig(user, configurator) # If we're attaching some managed policies, make sure all of the ones # that should already exist do indeed exist if user['attachable_policies'] - ok = false if !MU::Cloud::AWS::Role.validateAttachablePolicies( + ok = false if !MU::Cloud.resourceClass("AWS", "Role").validateAttachablePolicies( user['attachable_policies'], credentials: user['credentials'], region: user['region'] @@ -530,7 +530,7 @@ def self.validateConfig(user, configurator) if configurator.haveLitterMate?(group, "groups") need_dependency = true else - found = MU::Cloud::AWS::Group.find(cloud_id: group) + found = MU::Cloud.resourceClass("AWS", "Group").find(cloud_id: group) if found.nil? or found.empty? or (configurator.updating and found.values.first.group.path == "/"+configurator.updating+"/") groupdesc = { diff --git a/modules/mu/providers/aws/vpc.rb b/modules/mu/providers/aws/vpc.rb index 5fc965eb4..017a1069d 100644 --- a/modules/mu/providers/aws/vpc.rb +++ b/modules/mu/providers/aws/vpc.rb @@ -209,7 +209,7 @@ def create if !MU::Cloud::AWS.isGovCloud?(@config['region']) mu_zone = MU::Cloud::DNSZone.find(cloud_id: "platform-mu", credentials: @config['credentials']).values.first if !mu_zone.nil? - MU::Cloud::AWS::DNSZone.toggleVPCAccess(id: mu_zone.id, vpc_id: @cloud_id, region: @config['region'], credentials: @config['credentials']) + MU::Cloud.resourceClass("AWS", "DNSZone").toggleVPCAccess(id: mu_zone.id, vpc_id: @cloud_id, region: @config['region'], credentials: @config['credentials']) end end loadSubnets @@ -1225,7 +1225,7 @@ def self.purge_interfaces(noop = false, filters = [{name: "tag:MU-ID", values: [ # suits me just fine rescue Aws::EC2::Errors::AuthFailure => e if !tried_lbs and iface.attachment.instance_owner_id == "amazon-elb" - MU::Cloud::AWS::LoadBalancer.cleanup( + MU::Cloud.resourceClass("AWS", "LoadBalancer").cleanup( noop: noop, region: region, credentials: credentials, From cebf2a7da9bfd46f70549aee12a6f6364e9010c9 Mon Sep 17 00:00:00 2001 From: John Stange Date: Tue, 7 Apr 2020 14:07:28 -0400 Subject: [PATCH 056/124] move ssh and winrm sessions back into the correct part of the class hierarchy --- modules/mu/cloud/resource_base.rb | 6 +- modules/mu/cloud/ssh_sessions.rb | 335 ++++++++++++------------ modules/mu/cloud/winrm_sessions.rb | 392 +++++++++++++++-------------- 3 files changed, 372 insertions(+), 361 deletions(-) diff --git a/modules/mu/cloud/resource_base.rb b/modules/mu/cloud/resource_base.rb index 562c39ff9..7fbae5698 100644 --- a/modules/mu/cloud/resource_base.rb +++ b/modules/mu/cloud/resource_base.rb @@ -1011,9 +1011,6 @@ def windows? end false end - - require 'mu/cloud/winrm_sessions' - require 'mu/cloud/ssh_sessions' end # Wrap the instance methods that this cloud resource type has to @@ -1084,6 +1081,9 @@ def windows? } # end dynamic class generation block } # end resource type iteration + require 'mu/cloud/winrm_sessions' + require 'mu/cloud/ssh_sessions' + end end diff --git a/modules/mu/cloud/ssh_sessions.rb b/modules/mu/cloud/ssh_sessions.rb index 9211bd6f3..884755597 100644 --- a/modules/mu/cloud/ssh_sessions.rb +++ b/modules/mu/cloud/ssh_sessions.rb @@ -37,188 +37,193 @@ def self.handleNetSSHExceptions } end + [:Server, :ServerPool].each { |name| + Object.const_get("MU").const_get("Cloud").const_get(name).class_eval { + + # Basic setup tasks performed on a new node during its first initial + # ssh connection. Most of this is terrible Windows glue. + # @param ssh [Net::SSH::Connection::Session]: The active SSH session to the new node. + def initialSSHTasks(ssh) + win_env_fix = %q{echo 'export PATH="$PATH:/cygdrive/c/opscode/chef/embedded/bin"' > "$HOME/chef-client"; echo 'prev_dir="`pwd`"; for __dir in /proc/registry/HKEY_LOCAL_MACHINE/SYSTEM/CurrentControlSet/Control/Session\ Manager/Environment;do cd "$__dir"; for __var in `ls * | grep -v TEMP | grep -v TMP`;do __var=`echo $__var | tr "[a-z]" "[A-Z]"`; test -z "${!__var}" && export $__var="`cat $__var`" >/dev/null 2>&1; done; done; cd "$prev_dir"; /cygdrive/c/opscode/chef/bin/chef-client.bat $@' >> "$HOME/chef-client"; chmod 700 "$HOME/chef-client"; ( grep "^alias chef-client=" "$HOME/.bashrc" || echo 'alias chef-client="$HOME/chef-client"' >> "$HOME/.bashrc" ) ; ( grep "^alias mu-groom=" "$HOME/.bashrc" || echo 'alias mu-groom="powershell -File \"c:/Program Files/Amazon/Ec2ConfigService/Scripts/UserScript.ps1\""' >> "$HOME/.bashrc" )} + win_installer_check = %q{ls /proc/registry/HKEY_LOCAL_MACHINE/SOFTWARE/Microsoft/Windows/CurrentVersion/Installer/} + lnx_installer_check = %q{ps auxww | awk '{print $11}' | egrep '(/usr/bin/yum|apt-get|dpkg)'} + lnx_updates_check = %q{( test -f /.mu-installer-ran-updates || ! test -d /var/lib/cloud/instance ) || echo "userdata still running"} + win_set_pw = nil + + if windows? and !@config['use_cloud_provider_windows_password'] + # This covers both the case where we have a windows password passed from a vault and where we need to use a a random Windows Admin password generated by MU::Cloud::Server.generateWindowsPassword + pw = @groomer.getSecret( + vault: @config['mu_name'], + item: "windows_credentials", + field: "password" + ) + win_check_for_pw = %Q{powershell -Command '& {Add-Type -AssemblyName System.DirectoryServices.AccountManagement; $Creds = (New-Object System.Management.Automation.PSCredential("#{@config["windows_admin_username"]}", (ConvertTo-SecureString "#{pw}" -AsPlainText -Force)));$DS = New-Object System.DirectoryServices.AccountManagement.PrincipalContext([System.DirectoryServices.AccountManagement.ContextType]::Machine); $DS.ValidateCredentials($Creds.GetNetworkCredential().UserName, $Creds.GetNetworkCredential().password); echo $Result}'} + win_set_pw = %Q{powershell -Command "& {(([adsi]('WinNT://./#{@config["windows_admin_username"]}, user')).psbase.invoke('SetPassword', '#{pw}'))}"} + end - # Basic setup tasks performed on a new node during its first initial - # ssh connection. Most of this is terrible Windows glue. - # @param ssh [Net::SSH::Connection::Session]: The active SSH session to the new node. - def initialSSHTasks(ssh) - win_env_fix = %q{echo 'export PATH="$PATH:/cygdrive/c/opscode/chef/embedded/bin"' > "$HOME/chef-client"; echo 'prev_dir="`pwd`"; for __dir in /proc/registry/HKEY_LOCAL_MACHINE/SYSTEM/CurrentControlSet/Control/Session\ Manager/Environment;do cd "$__dir"; for __var in `ls * | grep -v TEMP | grep -v TMP`;do __var=`echo $__var | tr "[a-z]" "[A-Z]"`; test -z "${!__var}" && export $__var="`cat $__var`" >/dev/null 2>&1; done; done; cd "$prev_dir"; /cygdrive/c/opscode/chef/bin/chef-client.bat $@' >> "$HOME/chef-client"; chmod 700 "$HOME/chef-client"; ( grep "^alias chef-client=" "$HOME/.bashrc" || echo 'alias chef-client="$HOME/chef-client"' >> "$HOME/.bashrc" ) ; ( grep "^alias mu-groom=" "$HOME/.bashrc" || echo 'alias mu-groom="powershell -File \"c:/Program Files/Amazon/Ec2ConfigService/Scripts/UserScript.ps1\""' >> "$HOME/.bashrc" )} - win_installer_check = %q{ls /proc/registry/HKEY_LOCAL_MACHINE/SOFTWARE/Microsoft/Windows/CurrentVersion/Installer/} - lnx_installer_check = %q{ps auxww | awk '{print $11}' | egrep '(/usr/bin/yum|apt-get|dpkg)'} - lnx_updates_check = %q{( test -f /.mu-installer-ran-updates || ! test -d /var/lib/cloud/instance ) || echo "userdata still running"} - win_set_pw = nil - - if windows? and !@config['use_cloud_provider_windows_password'] - # This covers both the case where we have a windows password passed from a vault and where we need to use a a random Windows Admin password generated by MU::Cloud::Server.generateWindowsPassword - pw = @groomer.getSecret( - vault: @config['mu_name'], - item: "windows_credentials", - field: "password" - ) - win_check_for_pw = %Q{powershell -Command '& {Add-Type -AssemblyName System.DirectoryServices.AccountManagement; $Creds = (New-Object System.Management.Automation.PSCredential("#{@config["windows_admin_username"]}", (ConvertTo-SecureString "#{pw}" -AsPlainText -Force)));$DS = New-Object System.DirectoryServices.AccountManagement.PrincipalContext([System.DirectoryServices.AccountManagement.ContextType]::Machine); $DS.ValidateCredentials($Creds.GetNetworkCredential().UserName, $Creds.GetNetworkCredential().password); echo $Result}'} - win_set_pw = %Q{powershell -Command "& {(([adsi]('WinNT://./#{@config["windows_admin_username"]}, user')).psbase.invoke('SetPassword', '#{pw}'))}"} - end - - # There shouldn't be a use case where a domain joined computer goes through initialSSHTasks. Removing Active Directory specific computer rename. - set_hostname = true - hostname = nil - if !@config['active_directory'].nil? - if @config['active_directory']['node_type'] == "domain_controller" && @config['active_directory']['domain_controller_hostname'] - hostname = @config['active_directory']['domain_controller_hostname'] - @mu_windows_name = hostname - set_hostname = true - else - # Do we have an AD specific hostname? - hostname = @mu_windows_name + # There shouldn't be a use case where a domain joined computer goes through initialSSHTasks. Removing Active Directory specific computer rename. set_hostname = true - end - else - hostname = @mu_windows_name - end - win_check_for_hostname = %Q{powershell -Command '& {hostname}'} - win_set_hostname = %Q{powershell -Command "& {Rename-Computer -NewName '#{hostname}' -Force -PassThru -Restart; Restart-Computer -Force }"} - - begin - # Set our admin password first, if we need to - if windows? and !win_set_pw.nil? and !win_check_for_pw.nil? - output = ssh.exec!(win_check_for_pw) - raise MU::Cloud::BootstrapTempFail, "Got nil output from ssh session, waiting and retrying" if output.nil? - if !output.match(/True/) - MU.log "Setting Windows password for user #{@config['windows_admin_username']}", details: ssh.exec!(win_set_pw) - end - end - if windows? - output = ssh.exec!(win_env_fix) - output += ssh.exec!(win_installer_check) - raise MU::Cloud::BootstrapTempFail, "Got nil output from ssh session, waiting and retrying" if output.nil? - if output.match(/InProgress/) - raise MU::Cloud::BootstrapTempFail, "Windows Installer service is still doing something, need to wait" + hostname = nil + if !@config['active_directory'].nil? + if @config['active_directory']['node_type'] == "domain_controller" && @config['active_directory']['domain_controller_hostname'] + hostname = @config['active_directory']['domain_controller_hostname'] + @mu_windows_name = hostname + set_hostname = true + else + # Do we have an AD specific hostname? + hostname = @mu_windows_name + set_hostname = true + end + else + hostname = @mu_windows_name end - if set_hostname and !@hostname_set and @mu_windows_name - output = ssh.exec!(win_check_for_hostname) - raise MU::Cloud::BootstrapTempFail, "Got nil output from ssh session, waiting and retrying" if output.nil? - if !output.match(/#{@mu_windows_name}/) - MU.log "Setting Windows hostname to #{@mu_windows_name}", details: ssh.exec!(win_set_hostname) - @hostname_set = true - # Reboot from the API too, in case Windows is flailing - if !@cloudobj.nil? - @cloudobj.reboot - else - reboot + win_check_for_hostname = %Q{powershell -Command '& {hostname}'} + win_set_hostname = %Q{powershell -Command "& {Rename-Computer -NewName '#{hostname}' -Force -PassThru -Restart; Restart-Computer -Force }"} + + begin + # Set our admin password first, if we need to + if windows? and !win_set_pw.nil? and !win_check_for_pw.nil? + output = ssh.exec!(win_check_for_pw) + raise MU::Cloud::BootstrapTempFail, "Got nil output from ssh session, waiting and retrying" if output.nil? + if !output.match(/True/) + MU.log "Setting Windows password for user #{@config['windows_admin_username']}", details: ssh.exec!(win_set_pw) end - raise MU::Cloud::BootstrapTempFail, "Set hostname in Windows, waiting for reboot" end - end - else - output = ssh.exec!(lnx_installer_check) - if !output.nil? and !output.empty? - raise MU::Cloud::BootstrapTempFail, "Linux package manager is still doing something, need to wait (#{output})" - end - if !@config['skipinitialupdates'] and - !@config['scrub_mu_isms'] and - !@config['userdata_script'] - output = ssh.exec!(lnx_updates_check) - if !output.nil? and output.match(/userdata still running/) - raise MU::Cloud::BootstrapTempFail, "Waiting for initial userdata system updates to complete" + if windows? + output = ssh.exec!(win_env_fix) + output += ssh.exec!(win_installer_check) + raise MU::Cloud::BootstrapTempFail, "Got nil output from ssh session, waiting and retrying" if output.nil? + if output.match(/InProgress/) + raise MU::Cloud::BootstrapTempFail, "Windows Installer service is still doing something, need to wait" + end + if set_hostname and !@hostname_set and @mu_windows_name + output = ssh.exec!(win_check_for_hostname) + raise MU::Cloud::BootstrapTempFail, "Got nil output from ssh session, waiting and retrying" if output.nil? + if !output.match(/#{@mu_windows_name}/) + MU.log "Setting Windows hostname to #{@mu_windows_name}", details: ssh.exec!(win_set_hostname) + @hostname_set = true + # Reboot from the API too, in case Windows is flailing + if !@cloudobj.nil? + @cloudobj.reboot + else + reboot + end + raise MU::Cloud::BootstrapTempFail, "Set hostname in Windows, waiting for reboot" + end + end + else + output = ssh.exec!(lnx_installer_check) + if !output.nil? and !output.empty? + raise MU::Cloud::BootstrapTempFail, "Linux package manager is still doing something, need to wait (#{output})" + end + if !@config['skipinitialupdates'] and + !@config['scrub_mu_isms'] and + !@config['userdata_script'] + output = ssh.exec!(lnx_updates_check) + if !output.nil? and output.match(/userdata still running/) + raise MU::Cloud::BootstrapTempFail, "Waiting for initial userdata system updates to complete" + end + end end + rescue RuntimeError => e + raise MU::Cloud::BootstrapTempFail, "Got #{e.inspect} performing initial SSH connect tasks, will try again" end - end - rescue RuntimeError => e - raise MU::Cloud::BootstrapTempFail, "Got #{e.inspect} performing initial SSH connect tasks, will try again" - end - end + end - # @param max_retries [Integer]: Number of connection attempts to make before giving up - # @param retry_interval [Integer]: Number of seconds to wait between connection attempts - # @return [Net::SSH::Connection::Session] - def getSSHSession(max_retries = 12, retry_interval = 30) - ssh_keydir = Etc.getpwnam(@deploy.mu_user).dir+"/.ssh" - nat_ssh_key, nat_ssh_user, nat_ssh_host, canonical_ip, ssh_user, _ssh_key_name = getSSHConfig - session = nil - retries = 0 + # @param max_retries [Integer]: Number of connection attempts to make before giving up + # @param retry_interval [Integer]: Number of seconds to wait between connection attempts + # @return [Net::SSH::Connection::Session] + def getSSHSession(max_retries = 12, retry_interval = 30) + ssh_keydir = Etc.getpwnam(@deploy.mu_user).dir+"/.ssh" + nat_ssh_key, nat_ssh_user, nat_ssh_host, canonical_ip, ssh_user, _ssh_key_name = getSSHConfig + session = nil + retries = 0 - vpc_class = Object.const_get("MU").const_get("Cloud").const_get(@cloud).const_get("VPC") + vpc_class = Object.const_get("MU").const_get("Cloud").const_get(@cloud).const_get("VPC") - # XXX WHY is this a thing - Thread.handle_interrupt(Errno::ECONNREFUSED => :never) { - } + # XXX WHY is this a thing + Thread.handle_interrupt(Errno::ECONNREFUSED => :never) { + } - begin - MU::Cloud.handleNetSSHExceptions - if !nat_ssh_host.nil? - proxy_cmd = "ssh -q -o StrictHostKeyChecking=no -W %h:%p #{nat_ssh_user}@#{nat_ssh_host}" - MU.log "Attempting SSH to #{canonical_ip} (#{@mu_name}) as #{ssh_user} with key #{@deploy.ssh_key_name} using proxy '#{proxy_cmd}'" if retries == 0 - proxy = Net::SSH::Proxy::Command.new(proxy_cmd) - session = Net::SSH.start( - canonical_ip, - ssh_user, - :config => false, - :keys_only => true, - :keys => [ssh_keydir+"/"+nat_ssh_key, ssh_keydir+"/"+@deploy.ssh_key_name], - :verify_host_key => false, - # :verbose => :info, - :host_key => "ssh-rsa", - :port => 22, - :auth_methods => ['publickey'], - :proxy => proxy - ) - else - - MU.log "Attempting SSH to #{canonical_ip} (#{@mu_name}) as #{ssh_user} with key #{ssh_keydir}/#{@deploy.ssh_key_name}" if retries == 0 - session = Net::SSH.start( - canonical_ip, - ssh_user, - :config => false, - :keys_only => true, - :keys => [ssh_keydir+"/"+@deploy.ssh_key_name], - :verify_host_key => false, - # :verbose => :info, - :host_key => "ssh-rsa", - :port => 22, - :auth_methods => ['publickey'] - ) - end - retries = 0 - rescue Net::SSH::HostKeyMismatch => e - MU.log("Remembering new key: #{e.fingerprint}") - e.remember_host! - session.close - retry + begin + MU::Cloud.handleNetSSHExceptions + if !nat_ssh_host.nil? + proxy_cmd = "ssh -q -o StrictHostKeyChecking=no -W %h:%p #{nat_ssh_user}@#{nat_ssh_host}" + MU.log "Attempting SSH to #{canonical_ip} (#{@mu_name}) as #{ssh_user} with key #{@deploy.ssh_key_name} using proxy '#{proxy_cmd}'" if retries == 0 + proxy = Net::SSH::Proxy::Command.new(proxy_cmd) + session = Net::SSH.start( + canonical_ip, + ssh_user, + :config => false, + :keys_only => true, + :keys => [ssh_keydir+"/"+nat_ssh_key, ssh_keydir+"/"+@deploy.ssh_key_name], + :verify_host_key => false, + # :verbose => :info, + :host_key => "ssh-rsa", + :port => 22, + :auth_methods => ['publickey'], + :proxy => proxy + ) + else + + MU.log "Attempting SSH to #{canonical_ip} (#{@mu_name}) as #{ssh_user} with key #{ssh_keydir}/#{@deploy.ssh_key_name}" if retries == 0 + session = Net::SSH.start( + canonical_ip, + ssh_user, + :config => false, + :keys_only => true, + :keys => [ssh_keydir+"/"+@deploy.ssh_key_name], + :verify_host_key => false, + # :verbose => :info, + :host_key => "ssh-rsa", + :port => 22, + :auth_methods => ['publickey'] + ) + end + retries = 0 + rescue Net::SSH::HostKeyMismatch => e + MU.log("Remembering new key: #{e.fingerprint}") + e.remember_host! + session.close + retry # rescue SystemCallError, Timeout::Error, Errno::ECONNRESET, Errno::EHOSTUNREACH, Net::SSH::Proxy::ConnectError, SocketError, Net::SSH::Disconnect, Net::SSH::AuthenticationFailed, IOError, Net::SSH::ConnectionTimeout, Net::SSH::Proxy::ConnectError, MU::Cloud::NetSSHFail => e - rescue SystemExit, Timeout::Error, Net::SSH::AuthenticationFailed, Net::SSH::Disconnect, Net::SSH::ConnectionTimeout, Net::SSH::Proxy::ConnectError, Net::SSH::Exception, Errno::ECONNRESET, Errno::EHOSTUNREACH, Errno::ECONNREFUSED, Errno::EPIPE, SocketError, IOError => e - begin - session.close if !session.nil? - rescue Net::SSH::Disconnect, IOError => e - if windows? - MU.log "Windows has probably closed the ssh session before we could. Waiting before trying again", MU::NOTICE - else - MU.log "ssh session was closed unexpectedly, waiting before trying again", MU::NOTICE - end - sleep 10 - end + rescue SystemExit, Timeout::Error, Net::SSH::AuthenticationFailed, Net::SSH::Disconnect, Net::SSH::ConnectionTimeout, Net::SSH::Proxy::ConnectError, Net::SSH::Exception, Errno::ECONNRESET, Errno::EHOSTUNREACH, Errno::ECONNREFUSED, Errno::EPIPE, SocketError, IOError => e + begin + session.close if !session.nil? + rescue Net::SSH::Disconnect, IOError => e + if windows? + MU.log "Windows has probably closed the ssh session before we could. Waiting before trying again", MU::NOTICE + else + MU.log "ssh session was closed unexpectedly, waiting before trying again", MU::NOTICE + end + sleep 10 + end - if retries < max_retries - retries = retries + 1 - msg = "ssh #{ssh_user}@#{@mu_name}: #{e.message}, waiting #{retry_interval}s (attempt #{retries}/#{max_retries})" - if retries == 1 or (retries/max_retries <= 0.5 and (retries % 3) == 0) - MU.log msg, MU::NOTICE - if !vpc_class.haveRouteToInstance?(cloud_desc, credentials: @credentials) and - canonical_ip.match(/(^127\.)|(^192\.168\.)|(^10\.)|(^172\.1[6-9]\.)|(^172\.2[0-9]\.)|(^172\.3[0-1]\.)|(^::1$)|(^[fF][cCdD])/) and - !nat_ssh_host - MU.log "Node #{@mu_name} at #{canonical_ip} looks like it's in a private address space, and I don't appear to have a direct route to it. It may not be possible to connect with this routing!", MU::WARN + if retries < max_retries + retries = retries + 1 + msg = "ssh #{ssh_user}@#{@mu_name}: #{e.message}, waiting #{retry_interval}s (attempt #{retries}/#{max_retries})" + if retries == 1 or (retries/max_retries <= 0.5 and (retries % 3) == 0) + MU.log msg, MU::NOTICE + if !vpc_class.haveRouteToInstance?(cloud_desc, credentials: @credentials) and + canonical_ip.match(/(^127\.)|(^192\.168\.)|(^10\.)|(^172\.1[6-9]\.)|(^172\.2[0-9]\.)|(^172\.3[0-1]\.)|(^::1$)|(^[fF][cCdD])/) and + !nat_ssh_host + MU.log "Node #{@mu_name} at #{canonical_ip} looks like it's in a private address space, and I don't appear to have a direct route to it. It may not be possible to connect with this routing!", MU::WARN + end + elsif retries/max_retries > 0.5 + MU.log msg, MU::WARN, details: e.inspect + end + sleep retry_interval + retry + else + raise MuError, "#{@mu_name}: #{e.inspect} trying to connect with SSH, max_retries exceeded", e.backtrace end - elsif retries/max_retries > 0.5 - MU.log msg, MU::WARN, details: e.inspect end - sleep retry_interval - retry - else - raise MuError, "#{@mu_name}: #{e.inspect} trying to connect with SSH, max_retries exceeded", e.backtrace + return session end - end - return session - end + } + + } end diff --git a/modules/mu/cloud/winrm_sessions.rb b/modules/mu/cloud/winrm_sessions.rb index 9fb3d9ad7..c799ca8f7 100644 --- a/modules/mu/cloud/winrm_sessions.rb +++ b/modules/mu/cloud/winrm_sessions.rb @@ -19,212 +19,218 @@ module MU # other provisioning layers. class Cloud - # Gracefully message and attempt to accommodate the common transient errors peculiar to Windows nodes - # @param e [Exception]: The exception that we're handling - # @param retries [Integer]: The current number of retries, which we'll increment and pass back to the caller - # @param rebootable_fails [Integer]: The current number of reboot-worthy failures, which we'll increment and pass back to the caller - # @param max_retries [Integer]: Maximum number of retries to attempt; we'll raise an exception if this is exceeded - # @param reboot_on_problems [Boolean]: Whether we should try to reboot a "stuck" machine - # @param retry_interval [Integer]: How many seconds to wait before returning for another attempt - def handleWindowsFail(e, retries, rebootable_fails, max_retries: 30, reboot_on_problems: false, retry_interval: 45) - msg = "WinRM connection to https://"+@mu_name+":5986/wsman: #{e.message}, waiting #{retry_interval}s (attempt #{retries}/#{max_retries})" - if e.class.name == "WinRM::WinRMAuthorizationError" or e.message.match(/execution expired/) and reboot_on_problems - if rebootable_fails > 0 and (rebootable_fails % 7) == 0 - MU.log "#{@mu_name} still misbehaving, forcing Stop and Start from API", MU::WARN - reboot(true) # vicious API stop/start - sleep retry_interval*3 - rebootable_fails = 0 - else - if rebootable_fails == 5 - MU.log "#{@mu_name} misbehaving, attempting to reboot from API", MU::WARN - reboot # graceful API restart - sleep retry_interval*2 + [:Server, :ServerPool].each { |name| + Object.const_get("MU").const_get("Cloud").const_get(name).class_eval { + + # Gracefully message and attempt to accommodate the common transient errors peculiar to Windows nodes + # @param e [Exception]: The exception that we're handling + # @param retries [Integer]: The current number of retries, which we'll increment and pass back to the caller + # @param rebootable_fails [Integer]: The current number of reboot-worthy failures, which we'll increment and pass back to the caller + # @param max_retries [Integer]: Maximum number of retries to attempt; we'll raise an exception if this is exceeded + # @param reboot_on_problems [Boolean]: Whether we should try to reboot a "stuck" machine + # @param retry_interval [Integer]: How many seconds to wait before returning for another attempt + def handleWindowsFail(e, retries, rebootable_fails, max_retries: 30, reboot_on_problems: false, retry_interval: 45) + msg = "WinRM connection to https://"+@mu_name+":5986/wsman: #{e.message}, waiting #{retry_interval}s (attempt #{retries}/#{max_retries})" + if e.class.name == "WinRM::WinRMAuthorizationError" or e.message.match(/execution expired/) and reboot_on_problems + if rebootable_fails > 0 and (rebootable_fails % 7) == 0 + MU.log "#{@mu_name} still misbehaving, forcing Stop and Start from API", MU::WARN + reboot(true) # vicious API stop/start + sleep retry_interval*3 + rebootable_fails = 0 + else + if rebootable_fails == 5 + MU.log "#{@mu_name} misbehaving, attempting to reboot from API", MU::WARN + reboot # graceful API restart + sleep retry_interval*2 + end + rebootable_fails = rebootable_fails + 1 + end end - rebootable_fails = rebootable_fails + 1 - end - end - if retries < max_retries - if retries == 1 or (retries/max_retries <= 0.5 and (retries % 3) == 0 and retries != 0) - MU.log msg, MU::NOTICE - elsif retries/max_retries > 0.5 - MU.log msg, MU::WARN, details: e.inspect - end - sleep retry_interval - retries = retries + 1 - else - raise MuError, "#{@mu_name}: #{e.inspect} trying to connect with WinRM, max_retries exceeded", e.backtrace - end - return [retries, rebootable_fails] - end - - def windowsRebootPending?(shell = nil) - if shell.nil? - shell = getWinRMSession(1, 30) - end -# if (Get-Item "HKLM:/SOFTWARE/Microsoft/Windows/CurrentVersion/WindowsUpdate/Auto Update/RebootRequired" -EA Ignore) { exit 1 } - cmd = %Q{ - if (Get-ChildItem "HKLM:/Software/Microsoft/Windows/CurrentVersion/Component Based Servicing/RebootPending" -EA Ignore) { - echo "Component Based Servicing/RebootPending is true" - exit 1 - } - if (Get-ItemProperty "HKLM:/SYSTEM/CurrentControlSet/Control/Session Manager" -Name PendingFileRenameOperations -EA Ignore) { - echo "Control/Session Manager/PendingFileRenameOperations is true" - exit 1 - } - try { - $util = [wmiclass]"\\\\.\\root\\ccm\\clientsdk:CCM_ClientUtilities" - $status = $util.DetermineIfRebootPending() - if(($status -ne $null) -and $status.RebootPending){ - echo "WMI says RebootPending is true" - exit 1 - } - } catch { - exit 0 - } - exit 0 - } - resp = shell.run(cmd) - returnval = resp.exitcode == 0 ? false : true - shell.close - returnval - end - - # Basic setup tasks performed on a new node during its first WinRM - # connection. Most of this is terrible Windows glue. - # @param shell [WinRM::Shells::Powershell]: An active Powershell session to the new node. - def initialWinRMTasks(shell) - retries = 0 - rebootable_fails = 0 - begin - if !@config['use_cloud_provider_windows_password'] - pw = @groomer.getSecret( - vault: @config['mu_name'], - item: "windows_credentials", - field: "password" - ) - win_check_for_pw = %Q{Add-Type -AssemblyName System.DirectoryServices.AccountManagement; $Creds = (New-Object System.Management.Automation.PSCredential("#{@config["windows_admin_username"]}", (ConvertTo-SecureString "#{pw}" -AsPlainText -Force)));$DS = New-Object System.DirectoryServices.AccountManagement.PrincipalContext([System.DirectoryServices.AccountManagement.ContextType]::Machine); $DS.ValidateCredentials($Creds.GetNetworkCredential().UserName, $Creds.GetNetworkCredential().password); echo $Result} - resp = shell.run(win_check_for_pw) - if resp.stdout.chomp != "True" - win_set_pw = %Q{(([adsi]('WinNT://./#{@config["windows_admin_username"]}, user')).psbase.invoke('SetPassword', '#{pw}'))} - resp = shell.run(win_set_pw) - puts resp.stdout - MU.log "Resetting Windows host password", MU::NOTICE, details: resp.stdout + if retries < max_retries + if retries == 1 or (retries/max_retries <= 0.5 and (retries % 3) == 0 and retries != 0) + MU.log msg, MU::NOTICE + elsif retries/max_retries > 0.5 + MU.log msg, MU::WARN, details: e.inspect + end + sleep retry_interval + retries = retries + 1 + else + raise MuError, "#{@mu_name}: #{e.inspect} trying to connect with WinRM, max_retries exceeded", e.backtrace end + return [retries, rebootable_fails] end - # Install Cygwin here, because for some reason it breaks inside Chef - # XXX would love to not do this here - pkgs = ["bash", "mintty", "vim", "curl", "openssl", "wget", "lynx", "openssh"] - admin_home = "c:/bin/cygwin/home/#{@config["windows_admin_username"]}" - install_cygwin = %Q{ - If (!(Test-Path "c:/bin/cygwin/Cygwin.bat")){ - $WebClient = New-Object System.Net.WebClient - $WebClient.DownloadFile("http://cygwin.com/setup-x86_64.exe","$env:Temp/setup-x86_64.exe") - Start-Process -wait -FilePath $env:Temp/setup-x86_64.exe -ArgumentList "-q -n -l $env:Temp/cygwin -R c:/bin/cygwin -s http://mirror.cs.vt.edu/pub/cygwin/cygwin/ -P #{pkgs.join(',')}" - } - if(!(Test-Path #{admin_home})){ - New-Item -type directory -path #{admin_home} - } - if(!(Test-Path #{admin_home}/.ssh)){ - New-Item -type directory -path #{admin_home}/.ssh - } - if(!(Test-Path #{admin_home}/.ssh/authorized_keys)){ - New-Item #{admin_home}/.ssh/authorized_keys -type file -force -value "#{@deploy.ssh_public_key}" + def windowsRebootPending?(shell = nil) + if shell.nil? + shell = getWinRMSession(1, 30) + end +# if (Get-Item "HKLM:/SOFTWARE/Microsoft/Windows/CurrentVersion/WindowsUpdate/Auto Update/RebootRequired" -EA Ignore) { exit 1 } + cmd = %Q{ + if (Get-ChildItem "HKLM:/Software/Microsoft/Windows/CurrentVersion/Component Based Servicing/RebootPending" -EA Ignore) { + echo "Component Based Servicing/RebootPending is true" + exit 1 + } + if (Get-ItemProperty "HKLM:/SYSTEM/CurrentControlSet/Control/Session Manager" -Name PendingFileRenameOperations -EA Ignore) { + echo "Control/Session Manager/PendingFileRenameOperations is true" + exit 1 + } + try { + $util = [wmiclass]"\\\\.\\root\\ccm\\clientsdk:CCM_ClientUtilities" + $status = $util.DetermineIfRebootPending() + if(($status -ne $null) -and $status.RebootPending){ + echo "WMI says RebootPending is true" + exit 1 + } + } catch { + exit 0 + } + exit 0 } - } - resp = shell.run(install_cygwin) - if resp.exitcode != 0 - MU.log "Failed at installing Cygwin", MU::ERR, details: resp + resp = shell.run(cmd) + returnval = resp.exitcode == 0 ? false : true + shell.close + returnval end - hostname = nil - if !@config['active_directory'].nil? - if @config['active_directory']['node_type'] == "domain_controller" && @config['active_directory']['domain_controller_hostname'] - hostname = @config['active_directory']['domain_controller_hostname'] - @mu_windows_name = hostname - else - # Do we have an AD specific hostname? - hostname = @mu_windows_name + # Basic setup tasks performed on a new node during its first WinRM + # connection. Most of this is terrible Windows glue. + # @param shell [WinRM::Shells::Powershell]: An active Powershell session to the new node. + def initialWinRMTasks(shell) + retries = 0 + rebootable_fails = 0 + begin + if !@config['use_cloud_provider_windows_password'] + pw = @groomer.getSecret( + vault: @config['mu_name'], + item: "windows_credentials", + field: "password" + ) + win_check_for_pw = %Q{Add-Type -AssemblyName System.DirectoryServices.AccountManagement; $Creds = (New-Object System.Management.Automation.PSCredential("#{@config["windows_admin_username"]}", (ConvertTo-SecureString "#{pw}" -AsPlainText -Force)));$DS = New-Object System.DirectoryServices.AccountManagement.PrincipalContext([System.DirectoryServices.AccountManagement.ContextType]::Machine); $DS.ValidateCredentials($Creds.GetNetworkCredential().UserName, $Creds.GetNetworkCredential().password); echo $Result} + resp = shell.run(win_check_for_pw) + if resp.stdout.chomp != "True" + win_set_pw = %Q{(([adsi]('WinNT://./#{@config["windows_admin_username"]}, user')).psbase.invoke('SetPassword', '#{pw}'))} + resp = shell.run(win_set_pw) + puts resp.stdout + MU.log "Resetting Windows host password", MU::NOTICE, details: resp.stdout + end + end + + # Install Cygwin here, because for some reason it breaks inside Chef + # XXX would love to not do this here + pkgs = ["bash", "mintty", "vim", "curl", "openssl", "wget", "lynx", "openssh"] + admin_home = "c:/bin/cygwin/home/#{@config["windows_admin_username"]}" + install_cygwin = %Q{ + If (!(Test-Path "c:/bin/cygwin/Cygwin.bat")){ + $WebClient = New-Object System.Net.WebClient + $WebClient.DownloadFile("http://cygwin.com/setup-x86_64.exe","$env:Temp/setup-x86_64.exe") + Start-Process -wait -FilePath $env:Temp/setup-x86_64.exe -ArgumentList "-q -n -l $env:Temp/cygwin -R c:/bin/cygwin -s http://mirror.cs.vt.edu/pub/cygwin/cygwin/ -P #{pkgs.join(',')}" + } + if(!(Test-Path #{admin_home})){ + New-Item -type directory -path #{admin_home} + } + if(!(Test-Path #{admin_home}/.ssh)){ + New-Item -type directory -path #{admin_home}/.ssh + } + if(!(Test-Path #{admin_home}/.ssh/authorized_keys)){ + New-Item #{admin_home}/.ssh/authorized_keys -type file -force -value "#{@deploy.ssh_public_key}" + } + } + resp = shell.run(install_cygwin) + if resp.exitcode != 0 + MU.log "Failed at installing Cygwin", MU::ERR, details: resp + end + + hostname = nil + if !@config['active_directory'].nil? + if @config['active_directory']['node_type'] == "domain_controller" && @config['active_directory']['domain_controller_hostname'] + hostname = @config['active_directory']['domain_controller_hostname'] + @mu_windows_name = hostname + else + # Do we have an AD specific hostname? + hostname = @mu_windows_name + end + else + hostname = @mu_windows_name + end + resp = shell.run(%Q{hostname}) + + if resp.stdout.chomp != hostname + resp = shell.run(%Q{Rename-Computer -NewName '#{hostname}' -Force -PassThru -Restart; Restart-Computer -Force}) + MU.log "Renaming Windows host to #{hostname}; this will trigger a reboot", MU::NOTICE, details: resp.stdout + reboot(true) + sleep 30 + end + rescue WinRM::WinRMError, HTTPClient::ConnectTimeoutError => e + retries, rebootable_fails = handleWindowsFail(e, retries, rebootable_fails, max_retries: 10, reboot_on_problems: true, retry_interval: 30) + retry end - else - hostname = @mu_windows_name end - resp = shell.run(%Q{hostname}) - if resp.stdout.chomp != hostname - resp = shell.run(%Q{Rename-Computer -NewName '#{hostname}' -Force -PassThru -Restart; Restart-Computer -Force}) - MU.log "Renaming Windows host to #{hostname}; this will trigger a reboot", MU::NOTICE, details: resp.stdout - reboot(true) - sleep 30 - end - rescue WinRM::WinRMError, HTTPClient::ConnectTimeoutError => e - retries, rebootable_fails = handleWindowsFail(e, retries, rebootable_fails, max_retries: 10, reboot_on_problems: true, retry_interval: 30) - retry - end - end - - # Get a privileged Powershell session on the server in question, using SSL-encrypted WinRM with certificate authentication. - # @param max_retries [Integer]: - # @param retry_interval [Integer]: - # @param timeout [Integer]: - # @param winrm_retries [Integer]: - # @param reboot_on_problems [Boolean]: - def getWinRMSession(max_retries = 40, retry_interval = 60, timeout: 30, winrm_retries: 2, reboot_on_problems: false) - _nat_ssh_key, _nat_ssh_user, _nat_ssh_host, canonical_ip, _ssh_user, _ssh_key_name = getSSHConfig - @mu_name ||= @config['mu_name'] - - shell = nil - opts = nil - # and now, a thing I really don't want to do - MU::Master.addInstanceToEtcHosts(canonical_ip, @mu_name) - - # catch exceptions that circumvent our regular call stack - Thread.abort_on_exception = false - Thread.handle_interrupt(WinRM::WinRMWSManFault => :never) { - begin - Thread.handle_interrupt(WinRM::WinRMWSManFault => :immediate) { - MU.log "(Probably harmless) Caught a WinRM::WinRMWSManFault in #{Thread.current.inspect}", MU::DEBUG, details: Thread.current.backtrace + # Get a privileged Powershell session on the server in question, using SSL-encrypted WinRM with certificate authentication. + # @param max_retries [Integer]: + # @param retry_interval [Integer]: + # @param timeout [Integer]: + # @param winrm_retries [Integer]: + # @param reboot_on_problems [Boolean]: + def getWinRMSession(max_retries = 40, retry_interval = 60, timeout: 30, winrm_retries: 2, reboot_on_problems: false) + _nat_ssh_key, _nat_ssh_user, _nat_ssh_host, canonical_ip, _ssh_user, _ssh_key_name = getSSHConfig + @mu_name ||= @config['mu_name'] + + shell = nil + opts = nil + # and now, a thing I really don't want to do + MU::Master.addInstanceToEtcHosts(canonical_ip, @mu_name) + + # catch exceptions that circumvent our regular call stack + Thread.abort_on_exception = false + Thread.handle_interrupt(WinRM::WinRMWSManFault => :never) { + begin + Thread.handle_interrupt(WinRM::WinRMWSManFault => :immediate) { + MU.log "(Probably harmless) Caught a WinRM::WinRMWSManFault in #{Thread.current.inspect}", MU::DEBUG, details: Thread.current.backtrace + } + ensure + # Reraise something useful + end } - ensure - # Reraise something useful - end - } - retries = 0 - rebootable_fails = 0 - begin - loglevel = retries > 4 ? MU::NOTICE : MU::DEBUG - MU.log "Calling WinRM on #{@mu_name}", loglevel, details: opts - opts = { - retry_limit: winrm_retries, - no_ssl_peer_verification: true, # XXX this should not be necessary; we get 'hostname "foo" does not match the server certificate' even when it clearly does match - ca_trust_path: "#{MU.mySSLDir}/Mu_CA.pem", - transport: :ssl, - operation_timeout: timeout, - } - if retries % 2 == 0 # NTLM password over https - opts[:endpoint] = 'https://'+canonical_ip+':5986/wsman' - opts[:user] = @config['windows_admin_username'] - opts[:password] = getWindowsAdminPassword - else # certificate auth over https - opts[:endpoint] = 'https://'+@mu_name+':5986/wsman' - opts[:client_cert] = "#{MU.mySSLDir}/#{@mu_name}-winrm.crt" - opts[:client_key] = "#{MU.mySSLDir}/#{@mu_name}-winrm.key" + retries = 0 + rebootable_fails = 0 + begin + loglevel = retries > 4 ? MU::NOTICE : MU::DEBUG + MU.log "Calling WinRM on #{@mu_name}", loglevel, details: opts + opts = { + retry_limit: winrm_retries, + no_ssl_peer_verification: true, # XXX this should not be necessary; we get 'hostname "foo" does not match the server certificate' even when it clearly does match + ca_trust_path: "#{MU.mySSLDir}/Mu_CA.pem", + transport: :ssl, + operation_timeout: timeout, + } + if retries % 2 == 0 # NTLM password over https + opts[:endpoint] = 'https://'+canonical_ip+':5986/wsman' + opts[:user] = @config['windows_admin_username'] + opts[:password] = getWindowsAdminPassword + else # certificate auth over https + opts[:endpoint] = 'https://'+@mu_name+':5986/wsman' + opts[:client_cert] = "#{MU.mySSLDir}/#{@mu_name}-winrm.crt" + opts[:client_key] = "#{MU.mySSLDir}/#{@mu_name}-winrm.key" + end + conn = WinRM::Connection.new(opts) + conn.logger.level = :debug if retries > 2 + MU.log "WinRM connection to #{@mu_name} created", MU::DEBUG, details: conn + shell = conn.shell(:powershell) + shell.run('ipconfig') # verify that we can do something + rescue Errno::EHOSTUNREACH, Errno::ECONNREFUSED, HTTPClient::ConnectTimeoutError, OpenSSL::SSL::SSLError, SocketError, WinRM::WinRMError, Timeout::Error => e + retries, rebootable_fails = handleWindowsFail(e, retries, rebootable_fails, max_retries: max_retries, reboot_on_problems: reboot_on_problems, retry_interval: retry_interval) + retry + ensure + MU::Master.removeInstanceFromEtcHosts(@mu_name) + end + + shell end - conn = WinRM::Connection.new(opts) - conn.logger.level = :debug if retries > 2 - MU.log "WinRM connection to #{@mu_name} created", MU::DEBUG, details: conn - shell = conn.shell(:powershell) - shell.run('ipconfig') # verify that we can do something - rescue Errno::EHOSTUNREACH, Errno::ECONNREFUSED, HTTPClient::ConnectTimeoutError, OpenSSL::SSL::SSLError, SocketError, WinRM::WinRMError, Timeout::Error => e - retries, rebootable_fails = handleWindowsFail(e, retries, rebootable_fails, max_retries: max_retries, reboot_on_problems: reboot_on_problems, retry_interval: retry_interval) - retry - ensure - MU::Master.removeInstanceFromEtcHosts(@mu_name) - end - - shell - end + + } + } end From 79731916d958d6de03b65ef9716a29409d16f6b2 Mon Sep 17 00:00:00 2001 From: John Stange Date: Tue, 7 Apr 2020 18:11:58 -0400 Subject: [PATCH 057/124] Azure, Google: do cross-class references correctly; Config: refine that dependency loop detection a bit --- modules/mu/config.rb | 15 +++++-- modules/mu/config/schema_helpers.rb | 40 +++++++++---------- modules/mu/config/vpc.rb | 9 +++-- modules/mu/providers/azure/server.rb | 8 ++-- modules/mu/providers/azure/user.rb | 2 +- modules/mu/providers/google/bucket.rb | 2 +- .../mu/providers/google/container_cluster.rb | 16 ++++---- modules/mu/providers/google/firewall_rule.rb | 4 +- modules/mu/providers/google/function.rb | 6 +-- modules/mu/providers/google/group.rb | 12 +++--- modules/mu/providers/google/habitat.rb | 2 +- modules/mu/providers/google/loadbalancer.rb | 2 +- modules/mu/providers/google/role.rb | 18 ++++----- modules/mu/providers/google/server.rb | 16 ++++---- modules/mu/providers/google/server_pool.rb | 20 +++++----- modules/mu/providers/google/user.rb | 18 ++++----- modules/mu/providers/google/vpc.rb | 4 +- 17 files changed, 102 insertions(+), 92 deletions(-) diff --git a/modules/mu/config.rb b/modules/mu/config.rb index bcb087538..57f3d7f84 100644 --- a/modules/mu/config.rb +++ b/modules/mu/config.rb @@ -872,6 +872,7 @@ def check_dependencies _shortclass, cfg_name, _cfg_plural, _classname = MU::Cloud.getResourceNames(type, false) values.each { |resource| next if !resource.kind_of?(Hash) or resource["dependencies"].nil? + resource['dependencies'].uniq! resource["dependencies"].each { |dependency| # make sure the thing we depend on really exists @@ -887,16 +888,22 @@ def check_dependencies dependency['name'] = sibling['name'] end - next if dependency['no_create_wait'] + # Check for a circular relationship that will lead to a deadlock + # when creating resource. This only goes one layer deep, and does + # not consider groom-phase deadlocks. + if dependency['phase'] == "groom" or dependency['no_create_wait'] or ( + !MU::Cloud.resourceClass(sibling['cloud'], type).deps_wait_on_my_creation and + !MU::Cloud.resourceClass(resource['cloud'], type).waits_on_parent_completion + ) + next + end - # Check for a circular relationship. This only goes one layer deep, - # but more is a lot to ask. if sibling['dependencies'] sibling['dependencies'].each { |sib_dep| next if sib_dep['type'] != cfg_name or sib_dep['no_create_wait'] cousin = haveLitterMate?(sib_dep['name'], sib_dep['type']) if cousin and cousin['name'] == resource['name'] - MU.log "Circular dependency #{type} #{resource['name']} => #{dependency['name']} => #{sib_dep['name']}", MU::ERR + MU.log "Circular dependency between #{type} #{resource['name']} <=> #{dependency['name']}", MU::ERR, details: [ resource['name'] => dependency, sibling['name'] => sib_dep ] ok = false end } diff --git a/modules/mu/config/schema_helpers.rb b/modules/mu/config/schema_helpers.rb index 1c571a48e..ccacf6d50 100644 --- a/modules/mu/config/schema_helpers.rb +++ b/modules/mu/config/schema_helpers.rb @@ -175,27 +175,27 @@ def self.dependencies_primitive { "type" => "array", "items" => { - "type" => "object", - "description" => "Declare other objects which this resource requires. This resource will wait until the others are available to create itself.", - "required" => ["name", "type"], - "additionalProperties" => false, - "properties" => { - "name" => {"type" => "string"}, - "type" => { - "type" => "string", - "enum" => MU::Cloud.resource_types.values.map { |v| v[:cfg_name] } - }, - "phase" => { - "type" => "string", - "description" => "Which part of the creation process of the resource we depend on should we wait for before starting our own creation? Defaults are usually sensible, but sometimes you want, say, a Server to wait on another Server to be completely ready (through its groom phase) before starting up.", - "enum" => ["create", "groom"] - }, - "no_create_wait" => { - "type" => "boolean", - "default" => false, - "description" => "By default, it's assumed that we want to wait on our parents' creation phase, in addition to whatever is declared in this stanza. Setting this flag will bypass waiting on our parent resource's creation, so that our create or groom phase can instead depend only on the parent's groom phase. " - } + "type" => "object", + "description" => "Declare other objects which this resource requires. This resource will wait until the others are available to create itself.", + "required" => ["name", "type"], + "additionalProperties" => false, + "properties" => { + "name" => {"type" => "string"}, + "type" => { + "type" => "string", + "enum" => MU::Cloud.resource_types.values.map { |v| v[:cfg_name] } + }, + "phase" => { + "type" => "string", + "description" => "Which part of the creation process of the resource we depend on should we wait for before starting our own creation? Defaults are usually sensible, but sometimes you want, say, a Server to wait on another Server to be completely ready (through its groom phase) before starting up.", + "enum" => ["create", "groom"] + }, + "no_create_wait" => { + "type" => "boolean", + "default" => false, + "description" => "By default, it's assumed that we want to wait on our parents' creation phase, in addition to whatever is declared in this stanza. Setting this flag will bypass waiting on our parent resource's creation, so that our create or groom phase can instead depend only on the parent's groom phase. " } + } } } end diff --git a/modules/mu/config/vpc.rb b/modules/mu/config/vpc.rb index 6c6fe1cf9..1e6724675 100644 --- a/modules/mu/config/vpc.rb +++ b/modules/mu/config/vpc.rb @@ -564,7 +564,8 @@ def self.validate(vpc, configurator) } vpc["dependencies"] << { "type" => "server", - "name" => bastion['name'], + "name" => bastion['name'], + "no_create_wait" => true } vpc["bastion"] = MU::Config::Ref.get( name: bastion['name'], @@ -619,14 +620,16 @@ def self.resolvePeers(vpc, configurator) append << append_me vpc["dependencies"] << { "type" => "vpc", - "name" => sib['name'] + "name" => sib['name'], + "phase" => "groom" } end delete << peer else vpc["dependencies"] << { "type" => "vpc", - "name" => peer['vpc']["name"] + "name" => peer['vpc']["name"], + "phase" => "groom" } end delete << peer if sib['name'] == vpc['name'] diff --git a/modules/mu/providers/azure/server.rb b/modules/mu/providers/azure/server.rb index dd9e2a842..db5a05777 100644 --- a/modules/mu/providers/azure/server.rb +++ b/modules/mu/providers/azure/server.rb @@ -146,7 +146,7 @@ def getSSHConfig return nil if @config.nil? or @deploy.nil? nat_ssh_key = nat_ssh_user = nat_ssh_host = nil - if !@config["vpc"].nil? and !MU::Cloud::Azure::VPC.haveRouteToInstance?(cloud_desc, region: @config['region'], credentials: @config['credentials']) + if !@config["vpc"].nil? and !MU::Cloud.resourceClass("Azure", "VPC").haveRouteToInstance?(cloud_desc, region: @config['region'], credentials: @config['credentials']) if !@nat.nil? and @nat.mu_name != @mu_name if @nat.cloud_desc.nil? @@ -189,7 +189,7 @@ def postBoot(instance_id = nil) end _nat_ssh_key, _nat_ssh_user, nat_ssh_host, _canonical_ip, _ssh_user, _ssh_key_name = getSSHConfig - if !nat_ssh_host and !MU::Cloud::Azure::VPC.haveRouteToInstance?(cloud_desc, region: @config['region'], credentials: @config['credentials']) + if !nat_ssh_host and !MU::Cloud.resourceClass("Azure", "VPC").haveRouteToInstance?(cloud_desc, region: @config['region'], credentials: @config['credentials']) # XXX check if canonical_ip is in the private ranges # raise MuError, "#{node} has no NAT host configured, and I have no other route to it" end @@ -384,7 +384,7 @@ def canonicalIP # Our deploydata gets corrupted often with server pools, this will cause us to use the wrong IP to identify a node # which will cause us to create certificates, DNS records and other artifacts with incorrect information which will cause our deploy to fail. # The cloud_id is always correct so lets use 'cloud_desc' to get the correct IPs - if MU::Cloud::Azure::VPC.haveRouteToInstance?(cloud_desc, credentials: @config['credentials']) or public_ips.size == 0 + if MU::Cloud.resourceClass("Azure", "VPC").haveRouteToInstance?(cloud_desc, credentials: @config['credentials']) or public_ips.size == 0 @config['canonical_ip'] = private_ips.first return private_ips.first else @@ -463,7 +463,7 @@ def self.schema(config) hosts_schema = MU::Config::CIDR_PRIMITIVE hosts_schema["pattern"] = "^(\\d+\\.\\d+\\.\\d+\\.\\d+\/[0-9]{1,2}|\\*)$" schema = { - "roles" => MU::Cloud::Azure::User.schema(config)[1]["roles"], + "roles" => MU::Cloud.resourceClass("Azure", "User").schema(config)[1]["roles"], "ingress_rules" => { "items" => { "properties" => { diff --git a/modules/mu/providers/azure/user.rb b/modules/mu/providers/azure/user.rb index e9a9f3c1f..479ef64ec 100644 --- a/modules/mu/providers/azure/user.rb +++ b/modules/mu/providers/azure/user.rb @@ -107,7 +107,7 @@ def getSecret def groom if @config['roles'] @config['roles'].each { |role| - MU::Cloud::Azure::Role.assignTo(cloud_desc.principal_id, role_name: role, credentials: @config['credentials']) + MU::Cloud.resourceClass("Azure", "Role").assignTo(cloud_desc.principal_id, role_name: role, credentials: @config['credentials']) } end end diff --git a/modules/mu/providers/google/bucket.rb b/modules/mu/providers/google/bucket.rb index 29626ca0e..e1d8cc963 100644 --- a/modules/mu/providers/google/bucket.rb +++ b/modules/mu/providers/google/bucket.rb @@ -243,7 +243,7 @@ def toKitten(**_args) grantees[binding.role] << { "id" => grantee } elsif grantee.match(/^serviceAccount:(.*)/) sa_name = Regexp.last_match[1] - if MU::Cloud::Google::User.cannedServiceAcctName?(sa_name) + if MU::Cloud.resourceClass("Google", "User").cannedServiceAcctName?(sa_name) grantees[binding.role] << { "id" => grantee } else grantees[binding.role] << MU::Config::Ref.get( diff --git a/modules/mu/providers/google/container_cluster.rb b/modules/mu/providers/google/container_cluster.rb index aeb8e9a5a..3aaab2475 100644 --- a/modules/mu/providers/google/container_cluster.rb +++ b/modules/mu/providers/google/container_cluster.rb @@ -657,7 +657,7 @@ def toKitten(**_args) end if bok['service_account'] - found = MU::Cloud::Google::User.find( + found = MU::Cloud.resourceClass("Google", "User").find( credentials: bok['credentials'], project: bok['project'], cloud_id: bok['service_account'] @@ -747,7 +747,7 @@ def self.quality def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credentials: nil, flags: {}) flags["habitat"] ||= MU::Cloud::Google.defaultProject(credentials) - return if !MU::Cloud::Google::Habitat.isLive?(flags["habitat"], credentials) + return if !MU::Cloud.resourceClass("Google", "Habitat").isLive?(flags["habitat"], credentials) clusters = [] # Make sure we catch regional *and* zone clusters @@ -817,10 +817,10 @@ def self.schema(config) "type" => "integer", "description" => "The number of local SSD disks to be attached to workers. See https://cloud.google.com/compute/docs/disks/local-ssd#local_ssd_limits" }, - "ssh_user" => MU::Cloud::Google::Server.schema(config)[1]["ssh_user"], - "metadata" => MU::Cloud::Google::Server.schema(config)[1]["metadata"], - "service_account" => MU::Cloud::Google::Server.schema(config)[1]["service_account"], - "scopes" => MU::Cloud::Google::Server.schema(config)[1]["scopes"], + "ssh_user" => MU::Cloud.resourceClass("Google", "Server").schema(config)[1]["ssh_user"], + "metadata" => MU::Cloud.resourceClass("Google", "Server").schema(config)[1]["metadata"], + "service_account" => MU::Cloud.resourceClass("Google", "Server").schema(config)[1]["service_account"], + "scopes" => MU::Cloud.resourceClass("Google", "Server").schema(config)[1]["scopes"], "private_cluster" => { "description" => "Set a GKE cluster to be private, that is segregated into its own hidden VPC.", "type" => "object", @@ -1045,7 +1045,7 @@ def self.validateConfig(cluster, configurator) ok = false end else - cluster = MU::Cloud::Google::User.genericServiceAccount(cluster, configurator) + cluster = MU::Cloud.resourceClass("Google", "User").genericServiceAccount(cluster, configurator) end if cluster['dependencies'] @@ -1141,7 +1141,7 @@ def self.validateConfig(cluster, configurator) end end - cluster['instance_type'] = MU::Cloud::Google::Server.validateInstanceType(cluster["instance_type"], cluster["region"], project: cluster['project'], credentials: cluster['credentials']) + cluster['instance_type'] = MU::Cloud.resourceClass("Google", "Server").validateInstanceType(cluster["instance_type"], cluster["region"], project: cluster['project'], credentials: cluster['credentials']) ok = false if cluster['instance_type'].nil? if !MU::Master.kubectl diff --git a/modules/mu/providers/google/firewall_rule.rb b/modules/mu/providers/google/firewall_rule.rb index 1a963d0a2..59bf5600e 100644 --- a/modules/mu/providers/google/firewall_rule.rb +++ b/modules/mu/providers/google/firewall_rule.rb @@ -209,7 +209,7 @@ def self.quality # @return [void] def self.cleanup(noop: false, ignoremaster: false, credentials: nil, flags: {}) flags["habitat"] ||= MU::Cloud::Google.defaultProject(credentials) - return if !MU::Cloud::Google::Habitat.isLive?(flags["habitat"], credentials) + return if !MU::Cloud.resourceClass("Google", "Habitat").isLive?(flags["habitat"], credentials) filter = %Q{(labels.mu-id = "#{MU.deploy_id.downcase}")} if !ignoremaster and MU.mu_public_ip filter += %Q{ AND (labels.mu-master-ip = "#{MU.mu_public_ip.gsub(/\./, "_")}")} @@ -440,7 +440,7 @@ def self.validateConfig(acl, config) elsif acl['vpc']['habitat'] and acl['vpc']['habitat']['name'] acl['vpc']['project'] = acl['vpc']['habitat']['name'] end - correct_vpc = MU::Cloud::Google::VPC.pickVPC( + correct_vpc = MU::Cloud.resourceClass("Google", "VPC").pickVPC( acl['vpc'], acl, "firewall_rule", diff --git a/modules/mu/providers/google/function.rb b/modules/mu/providers/google/function.rb index d57af699d..1ee04b2c9 100644 --- a/modules/mu/providers/google/function.rb +++ b/modules/mu/providers/google/function.rb @@ -235,7 +235,7 @@ def self.quality # @return [void] def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credentials: nil, flags: {}) flags["habitat"] ||= MU::Cloud::Google.defaultProject(credentials) - return if !MU::Cloud::Google::Habitat.isLive?(flags["habitat"], credentials) + return if !MU::Cloud.resourceClass("Google", "Habitat").isLive?(flags["habitat"], credentials) # Make sure we catch regional *and* zone functions found = MU::Cloud::Google::Function.find(credentials: credentials, region: region, project: flags["habitat"]) found.each_pair { |cloud_id, desc| @@ -373,7 +373,7 @@ def self.schema(config) } } }, - "service_account" => MU::Cloud::Google::Server.schema(config)[1]["service_account"], + "service_account" => MU::Cloud.resourceClass("Google", "Server").schema(config)[1]["service_account"], "runtime" => { "type" => "string", "enum" => %w{nodejs go python nodejs8 nodejs10 python37 go111 go113}, @@ -524,7 +524,7 @@ def self.validateConfig(function, configurator) ok = false end else - function = MU::Cloud::Google::User.genericServiceAccount(function, configurator) + function = MU::Cloud.resourceClass("Google", "User").genericServiceAccount(function, configurator) end # siblings = configurator.haveLitterMate?(nil, "vpcs", has_multiple: true) diff --git a/modules/mu/providers/google/group.rb b/modules/mu/providers/google/group.rb index 679731e36..810531f20 100644 --- a/modules/mu/providers/google/group.rb +++ b/modules/mu/providers/google/group.rb @@ -44,7 +44,7 @@ def create resp = MU::Cloud::Google.admin_directory(credentials: @credentials).insert_group(group_obj) @cloud_id = resp.email - MU::Cloud::Google::Role.bindFromConfig("group", @cloud_id, @config['roles'], credentials: @config['credentials']) + MU::Cloud.resourceClass("Google", "Role").bindFromConfig("group", @cloud_id, @config['roles'], credentials: @config['credentials']) else @cloud_id = @config['name'].sub(/@.*/, "")+"@"+@config['domain'] end @@ -52,7 +52,7 @@ def create # Called automatically by {MU::Deploy#createResources} def groom - MU::Cloud::Google::Role.bindFromConfig("group", @cloud_id, @config['roles'], credentials: @config['credentials'], debug: true) + MU::Cloud.resourceClass("Google", "Role").bindFromConfig("group", @cloud_id, @config['roles'], credentials: @config['credentials'], debug: true) if @config['members'] resolved_desired = [] @@ -166,7 +166,7 @@ def self.cleanup(noop: false, ignoremaster: false, credentials: nil, flags: {}) if flags['known'] flags['known'].each { |group| - MU::Cloud::Google::Role.removeBindings("group", group, credentials: credentials, noop: noop) + MU::Cloud.resourceClass("Google", "Role").removeBindings("group", group, credentials: credentials, noop: noop) } end end @@ -222,10 +222,10 @@ def toKitten(**_args) # type: "users" # ) # } - group_roles = MU::Cloud::Google::Role.getAllBindings(@config['credentials'])["by_entity"] + group_roles = MU::Cloud.resourceClass("Google", "Role").getAllBindings(@config['credentials'])["by_entity"] if group_roles["group"] and group_roles["group"][bok['cloud_id']] and group_roles["group"][bok['cloud_id']].size > 0 - bok['roles'] = MU::Cloud::Google::Role.entityBindingsToSchema(group_roles["group"][bok['cloud_id']], credentials: @config['credentials']) + bok['roles'] = MU::Cloud.resourceClass("Google", "Role").entityBindingsToSchema(group_roles["group"][bok['cloud_id']], credentials: @config['credentials']) end bok @@ -264,7 +264,7 @@ def self.schema(_config) "roles" => { "type" => "array", - "items" => MU::Cloud::Google::Role.ref_schema + "items" => MU::Cloud.resourceClass("Google", "Role").ref_schema } } [toplevel_required, schema] diff --git a/modules/mu/providers/google/habitat.rb b/modules/mu/providers/google/habitat.rb index cb42a7562..d1bf11876 100644 --- a/modules/mu/providers/google/habitat.rb +++ b/modules/mu/providers/google/habitat.rb @@ -61,7 +61,7 @@ def create if @config['parent']['name'] and !@config['parent']['id'] @config['parent']['deploy_id'] = @deploy.deploy_id end - parent = MU::Cloud::Google::Folder.resolveParent(@config['parent'], credentials: @config['credentials']) + parent = MU::Cloud.resourceClass("Google", "Folder").resolveParent(@config['parent'], credentials: @config['credentials']) if !parent MU.log "Unable to resolve parent resource of Google Project #{@config['name']}", MU::ERR, details: @config['parent'] raise "Unable to resolve parent resource of Google Project #{@config['name']}" diff --git a/modules/mu/providers/google/loadbalancer.rb b/modules/mu/providers/google/loadbalancer.rb index 3ce30df34..3a4a91e72 100644 --- a/modules/mu/providers/google/loadbalancer.rb +++ b/modules/mu/providers/google/loadbalancer.rb @@ -148,7 +148,7 @@ def self.quality # @return [void] def self.cleanup(noop: false, ignoremaster: false, region: nil, credentials: nil, flags: {}) flags["habitat"] ||= MU::Cloud::Google.defaultProject(credentials) - return if !MU::Cloud::Google::Habitat.isLive?(flags["habitat"], credentials) + return if !MU::Cloud.resourceClass("Google", "Habitat").isLive?(flags["habitat"], credentials) filter = %Q{(labels.mu-id = "#{MU.deploy_id.downcase}")} if !ignoremaster and MU.mu_public_ip filter += %Q{ AND (labels.mu-master-ip = "#{MU.mu_public_ip.gsub(/\./, "_")}")} diff --git a/modules/mu/providers/google/role.rb b/modules/mu/providers/google/role.rb index 221e60511..1c6449e07 100644 --- a/modules/mu/providers/google/role.rb +++ b/modules/mu/providers/google/role.rb @@ -271,13 +271,13 @@ def self.removeBindings(entity_type, entity_id, credentials: nil, noop: false) my_org = MU::Cloud::Google.getOrg(credentials) if my_org scopes["organizations"] = [my_org.name] - folders = MU::Cloud::Google::Folder.find(credentials: credentials) + folders = MU::Cloud.resourceClass("Google", "Folder").find(credentials: credentials) if folders and folders.size > 0 scopes["folders"] = folders.keys end end - projects = MU::Cloud::Google::Habitat.find(credentials: credentials) + projects = MU::Cloud.resourceClass("Google", "Habitat").find(credentials: credentials) if projects and projects.size > 0 scopes["projects"] = projects.keys end @@ -407,12 +407,12 @@ def self.bindFromConfig(entity_type, entity_id, cfg, credentials: nil, deploy: n # email field (which is the "real" id most of the time) real_id = nil if entity_type == "group" - found = MU::Cloud::Google::Group.find(cloud_id: entity_id, credentials: credentials) + found = MU::Cloud.resourceClass("Google", "Group").find(cloud_id: entity_id, credentials: credentials) if found[entity_id] real_id = found[entity_id].id end elsif entity_type == "user" - found = MU::Cloud::Google::User.find(cloud_id: entity_id, credentials: credentials) + found = MU::Cloud.resourceClass("Google", "User").find(cloud_id: entity_id, credentials: credentials) if found[entity_id] real_id = found[entity_id].id end @@ -563,7 +563,7 @@ def self.find(**args) if args[:project] canned = Hash[MU::Cloud::Google.iam(credentials: args[:credentials]).list_roles.roles.map { |r| [r.name, r] }] begin - MU::Cloud::Google::Habitat.bindings(args[:project], credentials: args[:credentials]).each { |binding| + MU::Cloud.resourceClass("Google", "Habitat").bindings(args[:project], credentials: args[:credentials]).each { |binding| found[binding.role] = canned[binding.role] } rescue ::Google::Apis::ClientError => e @@ -908,15 +908,15 @@ def self.insertBinding(scopetype, scope, binding = nil, member_type: nil, member insertBinding("organizations", my_org.name, binding) } - MU::Cloud::Google::Folder.find(credentials: credentials).keys.each { |folder| - MU::Cloud::Google::Folder.bindings(folder, credentials: credentials).each { |binding| + MU::Cloud.resourceClass("Google", "Folder").find(credentials: credentials).keys.each { |folder| + MU::Cloud.resourceClass("Google", "Folder").bindings(folder, credentials: credentials).each { |binding| insertBinding("folders", folder, binding) } } end - MU::Cloud::Google::Habitat.find(credentials: credentials).keys.each { |project| + MU::Cloud.resourceClass("Google", "Habitat").find(credentials: credentials).keys.each { |project| begin - MU::Cloud::Google::Habitat.bindings(project, credentials: credentials).each { |binding| + MU::Cloud.resourceClass("Google", "Habitat").bindings(project, credentials: credentials).each { |binding| insertBinding("projects", project, binding) } rescue ::Google::Apis::ClientError => e diff --git a/modules/mu/providers/google/server.rb b/modules/mu/providers/google/server.rb index eb24d1c18..eca8f401d 100644 --- a/modules/mu/providers/google/server.rb +++ b/modules/mu/providers/google/server.rb @@ -492,7 +492,7 @@ def getSSHConfig return nil if @config.nil? or @deploy.nil? nat_ssh_key = nat_ssh_user = nat_ssh_host = nil - if !@config["vpc"].nil? and !MU::Cloud::Google::VPC.haveRouteToInstance?(cloud_desc, credentials: @config['credentials']) + if !@config["vpc"].nil? and !MU::Cloud.resourceClass("Google", "VPC").haveRouteToInstance?(cloud_desc, credentials: @config['credentials']) if !@nat.nil? if @nat.cloud_desc.nil? @@ -623,7 +623,7 @@ def postBoot(instance_id = nil) end _nat_ssh_key, _nat_ssh_user, nat_ssh_host, _canonical_ip, _ssh_user, _ssh_key_name = getSSHConfig - if !nat_ssh_host and !MU::Cloud::Google::VPC.haveRouteToInstance?(cloud_desc, credentials: @config['credentials']) + if !nat_ssh_host and !MU::Cloud.resourceClass("Google", "VPC").haveRouteToInstance?(cloud_desc, credentials: @config['credentials']) # XXX check if canonical_ip is in the private ranges # raise MuError, "#{node} has no NAT host configured, and I have no other route to it" end @@ -992,7 +992,7 @@ def canonicalIP # Our deploydata gets corrupted often with server pools, this will cause us to use the wrong IP to identify a node # which will cause us to create certificates, DNS records and other artifacts with incorrect information which will cause our deploy to fail. # The cloud_id is always correct so lets use 'cloud_desc' to get the correct IPs - if MU::Cloud::Google::VPC.haveRouteToInstance?(cloud_desc, credentials: @config['credentials']) or public_ips.size == 0 + if MU::Cloud.resourceClass("Google", "VPC").haveRouteToInstance?(cloud_desc, credentials: @config['credentials']) or public_ips.size == 0 @config['canonical_ip'] = private_ips.first return private_ips.first else @@ -1292,7 +1292,7 @@ def self.quality # @return [void] def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credentials: nil, flags: {}) flags["habitat"] ||= MU::Cloud::Google.defaultProject(credentials) - return if !MU::Cloud::Google::Habitat.isLive?(flags["habitat"], credentials) + return if !MU::Cloud.resourceClass("Google", "Habitat").isLive?(flags["habitat"], credentials) # XXX make damn sure MU.deploy_id is set filter = %Q{(labels.mu-id = "#{MU.deploy_id.downcase}")} @@ -1356,7 +1356,7 @@ def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credent def self.schema(config) toplevel_required = [] schema = { - "roles" => MU::Cloud::Google::User.schema(config)[1]["roles"], + "roles" => MU::Cloud.resourceClass("Google", "User").schema(config)[1]["roles"], "windows_admin_username" => { "type" => "string", "default" => "muadmin" @@ -1534,12 +1534,12 @@ def self.validateConfig(server, configurator) ok = false end else - server = MU::Cloud::Google::User.genericServiceAccount(server, configurator) + server = MU::Cloud.resourceClass("Google", "User").genericServiceAccount(server, configurator) end subnets = nil if !server['vpc'] - vpcs = MU::Cloud::Google::VPC.find(credentials: server['credentials']) + vpcs = MU::Cloud.resourceClass("Google", "VPC").find(credentials: server['credentials']) if vpcs["default"] server["vpc"] ||= {} server["vpc"]["vpc_id"] = vpcs["default"].self_link @@ -1554,7 +1554,7 @@ def self.validateConfig(server, configurator) if !server['vpc']['subnet_id'] and server['vpc']['subnet_name'].nil? if !subnets if server["vpc"]["vpc_id"] - vpcs = MU::Cloud::Google::VPC.find(cloud_id: server["vpc"]["vpc_id"]) + vpcs = MU::Cloud.resourceClass("Google", "VPC").find(cloud_id: server["vpc"]["vpc_id"]) subnets = vpcs["default"].subnetworks.sample end end diff --git a/modules/mu/providers/google/server_pool.rb b/modules/mu/providers/google/server_pool.rb index 0551282a8..3ae7fb757 100644 --- a/modules/mu/providers/google/server_pool.rb +++ b/modules/mu/providers/google/server_pool.rb @@ -89,8 +89,8 @@ def create machine_type: size, service_accounts: [@service_acct], labels: labels, - disks: MU::Cloud::Google::Server.diskConfig(@config, false, false, credentials: @config['credentials']), - network_interfaces: MU::Cloud::Google::Server.interfaceConfig(@config, @vpc), + disks: MU::Cloud.resourceClass("Google", "Server").diskConfig(@config, false, false, credentials: @config['credentials']), + network_interfaces: MU::Cloud.resourceClass("Google", "Server").interfaceConfig(@config, @vpc), metadata: metadata, tags: MU::Cloud::Google.compute(:Tags).new(items: [MU::Cloud::Google.nameStr(@mu_name)]) ) @@ -324,11 +324,11 @@ def toKitten(**_args) def self.schema(config) toplevel_required = [] schema = { - "ssh_user" => MU::Cloud::Google::Server.schema(config)[1]["ssh_user"], - "metadata" => MU::Cloud::Google::Server.schema(config)[1]["metadata"], - "service_account" => MU::Cloud::Google::Server.schema(config)[1]["service_account"], - "scopes" => MU::Cloud::Google::Server.schema(config)[1]["scopes"], - "network_tags" => MU::Cloud::Google::Server.schema(config)[1]["network_tags"], + "ssh_user" => MU::Cloud.resourceClass("Google", "Server").schema(config)[1]["ssh_user"], + "metadata" => MU::Cloud.resourceClass("Google", "Server").schema(config)[1]["metadata"], + "service_account" => MU::Cloud.resourceClass("Google", "Server").schema(config)[1]["service_account"], + "scopes" => MU::Cloud.resourceClass("Google", "Server").schema(config)[1]["scopes"], + "network_tags" => MU::Cloud.resourceClass("Google", "Server").schema(config)[1]["network_tags"], "availability_zone" => { "type" => "string", "description" => "Target a specific availability zone for this pool, which will create zonal instance managers and scalers instead of regional ones." @@ -382,7 +382,7 @@ def self.validateConfig(pool, configurator) if pool['basis']['launch_config'] launch = pool["basis"]["launch_config"] - launch['size'] = MU::Cloud::Google::Server.validateInstanceType(launch["size"], pool["region"]) + launch['size'] = MU::Cloud.resourceClass("Google", "Server").validateInstanceType(launch["size"], pool["region"]) ok = false if launch['size'].nil? if launch['image_id'].nil? @@ -397,7 +397,7 @@ def self.validateConfig(pool, configurator) real_image = nil begin - real_image = MU::Cloud::Google::Server.fetchImage(launch['image_id'].to_s, credentials: pool['credentials']) + real_image = MU::Cloud.resourceClass("Google", "Server").fetchImage(launch['image_id'].to_s, credentials: pool['credentials']) rescue ::Google::Apis::ClientError => e MU.log e.inspect, MU::WARN end @@ -433,7 +433,7 @@ def self.quality # @return [void] def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credentials: nil, flags: {}) flags["habitat"] ||= MU::Cloud::Google.defaultProject(credentials) - return if !MU::Cloud::Google::Habitat.isLive?(flags["habitat"], credentials) + return if !MU::Cloud.resourceClass("Google", "Habitat").isLive?(flags["habitat"], credentials) filter = %Q{(labels.mu-id = "#{MU.deploy_id.downcase}")} if !ignoremaster and MU.mu_public_ip filter += %Q{ AND (labels.mu-master-ip = "#{MU.mu_public_ip.gsub(/\./, "_")}")} diff --git a/modules/mu/providers/google/user.rb b/modules/mu/providers/google/user.rb index 139876058..acda02adf 100644 --- a/modules/mu/providers/google/user.rb +++ b/modules/mu/providers/google/user.rb @@ -90,7 +90,7 @@ def create end elsif @config['external'] @cloud_id = @config['email'] - MU::Cloud::Google::Role.bindFromConfig("user", @cloud_id, @config['roles'], credentials: @config['credentials']) + MU::Cloud.resourceClass("Google", "Role").bindFromConfig("user", @cloud_id, @config['roles'], credentials: @config['credentials']) else if !@config['email'] domains = MU::Cloud::Google.admin_directory(credentials: @credentials).list_domains(@customer) @@ -122,10 +122,10 @@ def create # Called automatically by {MU::Deploy#createResources} def groom if @config['external'] - MU::Cloud::Google::Role.bindFromConfig("user", @cloud_id, @config['roles'], credentials: @config['credentials']) + MU::Cloud.resourceClass("Google", "Role").bindFromConfig("user", @cloud_id, @config['roles'], credentials: @config['credentials']) elsif @config['type'] == "interactive" need_update = false - MU::Cloud::Google::Role.bindFromConfig("user", @cloud_id, @config['roles'], credentials: @config['credentials']) + MU::Cloud.resourceClass("Google", "Role").bindFromConfig("user", @cloud_id, @config['roles'], credentials: @config['credentials']) if @config['force_password_change'] and !cloud_desc.change_password_at_next_login MU.log "Forcing #{@mu_name} to change their password at next login", MU::NOTICE @@ -170,7 +170,7 @@ def groom end else - MU::Cloud::Google::Role.bindFromConfig("serviceAccount", @cloud_id.gsub(/.*?\/([^\/]+)$/, '\1'), @config['roles'], credentials: @config['credentials']) + MU::Cloud.resourceClass("Google", "Role").bindFromConfig("serviceAccount", @cloud_id.gsub(/.*?\/([^\/]+)$/, '\1'), @config['roles'], credentials: @config['credentials']) if @config['create_api_key'] resp = MU::Cloud::Google.iam(credentials: @config['credentials']).list_project_service_account_keys( cloud_desc.name @@ -275,7 +275,7 @@ def self.cleanup(noop: false, ignoremaster: false, credentials: nil, flags: {}) next if user_email.nil? next if !user_email.match(/^[^\/]+@[^\/]+$/) - MU::Cloud::Google::Role.removeBindings("user", user_email, credentials: credentials, noop: noop) + MU::Cloud.resourceClass("Google", "Role").removeBindings("user", user_email, credentials: credentials, noop: noop) } end @@ -416,7 +416,7 @@ def toKitten(**_args) return nil end - user_roles = MU::Cloud::Google::Role.getAllBindings(@config['credentials'])["by_entity"] + user_roles = MU::Cloud.resourceClass("Google", "Role").getAllBindings(@config['credentials'])["by_entity"] if cloud_desc.nil? MU.log "FAILED TO FIND CLOUD DESCRIPTOR FOR #{self}", MU::ERR, details: @config @@ -439,13 +439,13 @@ def toKitten(**_args) if user_roles["serviceAccount"] and user_roles["serviceAccount"][bok['cloud_id']] and user_roles["serviceAccount"][bok['cloud_id']].size > 0 - bok['roles'] = MU::Cloud::Google::Role.entityBindingsToSchema(user_roles["serviceAccount"][bok['cloud_id']]) + bok['roles'] = MU::Cloud.resourceClass("Google", "Role").entityBindingsToSchema(user_roles["serviceAccount"][bok['cloud_id']]) end else if user_roles["user"] and user_roles["user"][bok['cloud_id']] and user_roles["user"][bok['cloud_id']].size > 0 - bok['roles'] = MU::Cloud::Google::Role.entityBindingsToSchema(user_roles["user"][bok['cloud_id']], credentials: @config['credentials']) + bok['roles'] = MU::Cloud.resourceClass("Google", "Role").entityBindingsToSchema(user_roles["user"][bok['cloud_id']], credentials: @config['credentials']) end bok['given_name'] = cloud_desc.name.given_name if cloud_desc.name.given_name and !cloud_desc.name.given_name.empty? bok['family_name'] = cloud_desc.name.family_name if cloud_desc.name.family_name and !cloud_desc.name.family_name.empty? @@ -528,7 +528,7 @@ def self.schema(_config) "roles" => { "type" => "array", "description" => "One or more Google IAM roles to associate with this user.", - "items" => MU::Cloud::Google::Role.ref_schema + "items" => MU::Cloud.resourceClass("Google", "Role").ref_schema } } [toplevel_required, schema] diff --git a/modules/mu/providers/google/vpc.rb b/modules/mu/providers/google/vpc.rb index 38549a371..226982a05 100644 --- a/modules/mu/providers/google/vpc.rb +++ b/modules/mu/providers/google/vpc.rb @@ -539,7 +539,7 @@ def self.quality # @return [void] def self.cleanup(noop: false, ignoremaster: false, credentials: nil, flags: {}) flags["habitat"] ||= MU::Cloud::Google.defaultProject(credentials) - return if !MU::Cloud::Google::Habitat.isLive?(flags["habitat"], credentials) + return if !MU::Cloud.resourceClass("Google", "Habitat").isLive?(flags["habitat"], credentials) filter = %Q{(labels.mu-id = "#{MU.deploy_id.downcase}")} if !ignoremaster and MU.mu_public_ip filter += %Q{ AND (labels.mu-master-ip = "#{MU.mu_public_ip.gsub(/\./, "_")}")} @@ -565,7 +565,7 @@ def self.cleanup(noop: false, ignoremaster: false, credentials: nil, flags: {}) MU.log e.message, MU::WARN if e.message.match(/Failed to delete network (.+)/) network_name = Regexp.last_match[1] - fwrules = MU::Cloud::Google::FirewallRule.find(project: flags['habitat'], credentials: credentials) + fwrules = MU::Cloud.resourceClass("Google", "FirewallRule").find(project: flags['habitat'], credentials: credentials) fwrules.reject! { |_name, desc| !desc.network.match(/.*?\/#{Regexp.quote(network_name)}$/) } From f17db257845c064edb97c57910503cfbc9e9edf6 Mon Sep 17 00:00:00 2001 From: John Stange Date: Tue, 7 Apr 2020 20:18:38 -0400 Subject: [PATCH 058/124] Config, Adoption: more alignment with new class referencing regime --- bin/mu-gen-docs | 6 ++---- modules/mu/adoption.rb | 11 +++++------ modules/mu/config/doc_helpers.rb | 2 +- modules/mu/config/ref.rb | 2 +- 4 files changed, 9 insertions(+), 12 deletions(-) diff --git a/bin/mu-gen-docs b/bin/mu-gen-docs index d0f9c399b..ac8f3d4d8 100755 --- a/bin/mu-gen-docs +++ b/bin/mu-gen-docs @@ -79,8 +79,7 @@ EOF impl_counts[type] ||= 0 [a, b].each { |cloud| begin - myclass = Object.const_get("MU").const_get("Cloud").const_get(cloud).const_get(type) - case myclass.quality + case MU::Cloud.resourceClass(cloud, type).quality when MU::Cloud::RELEASE cloud_is_useful[cloud] = true counts[cloud] += 4 @@ -114,8 +113,7 @@ EOF cloudlist.each { |cloud| readme += "
" begin - myclass = Object.const_get("MU").const_get("Cloud").const_get(cloud).const_get(type) - case myclass.quality + case MU::Cloud.resourceClass(cloud, type).quality when MU::Cloud::RELEASE readme += "[Release Quality]" when MU::Cloud::BETA diff --git a/modules/mu/adoption.rb b/modules/mu/adoption.rb index ac41bdf35..786cf4f1e 100644 --- a/modules/mu/adoption.rb +++ b/modules/mu/adoption.rb @@ -53,7 +53,7 @@ def scrapeClouds() @default_parent = nil @clouds.each { |cloud| - cloudclass = Object.const_get("MU").const_get("Cloud").const_get(cloud) + cloudclass = MU::Cloud.cloudClass(cloud) next if cloudclass.listCredentials.nil? if cloud == "Google" and !@parent and @target_creds @@ -90,7 +90,7 @@ def scrapeClouds() @types.each { |type| begin - resclass = Object.const_get("MU").const_get("Cloud").const_get(cloud).const_get(type) + resclass = MU::Cloud.resourceClass(cloud, type) rescue ::MU::Cloud::MuCloudResourceNotImplemented next end @@ -372,8 +372,7 @@ def scrubSchemaDefaults(conf_chunk, schema_chunk, depth = 0, type: nil) # theory realschema = if type and schema_chunk["items"] and schema_chunk["items"]["properties"] and item["cloud"] and MU::Cloud.supportedClouds.include?(item['cloud']) - cloudclass = Object.const_get("MU").const_get("Cloud").const_get(item["cloud"]).const_get(type) - _toplevel_required, cloudschema = cloudclass.schema(self) + _toplevel_required, cloudschema = MU::Cloud.resourceClass(item['cloud'], type).schema(self) newschema = schema_chunk["items"].dup newschema["properties"].merge!(cloudschema) @@ -422,7 +421,7 @@ def vacuum(bok, origin: nil, save: false, deploy: nil) raise Incomplete if obj.nil? new_cfg = resolveReferences(resource, deploy, obj) new_cfg.delete("cloud_id") - cred_cfg = MU::Cloud.const_get(obj.cloud).credConfig(obj.credentials) + cred_cfg = MU::Cloud.cloudClass(obj.cloud).credConfig(obj.credentials) if cred_cfg['region'] == new_cfg['region'] new_cfg.delete('region') end @@ -522,7 +521,7 @@ def resolveReferences(cfg, deploy, parent) hashcfg.delete("deploy_id") if hashcfg['deploy_id'] == deploy.deploy_id if parent and parent.config - cred_cfg = MU::Cloud.const_get(parent.cloud).credConfig(parent.credentials) + cred_cfg = MU::Cloud.cloudClass(parent.cloud).credConfig(parent.credentials) if parent.config['region'] == hashcfg['region'] or cred_cfg['region'] == hashcfg['region'] diff --git a/modules/mu/config/doc_helpers.rb b/modules/mu/config/doc_helpers.rb index f8bd1311d..bba243e54 100644 --- a/modules/mu/config/doc_helpers.rb +++ b/modules/mu/config/doc_helpers.rb @@ -31,7 +31,7 @@ def self.docSchema end _required, res_schema = MU::Cloud.resourceClass(cloud, classname).schema(self) docschema["properties"][attrs[:cfg_plural]]["items"]["description"] ||= "" - docschema["properties"][attrs[:cfg_plural]]["items"]["description"] += "\n#\n# `#{cloud}`: "+res_class.quality + docschema["properties"][attrs[:cfg_plural]]["items"]["description"] += "\n#\n# `#{cloud}`: "+MU::Cloud.resourceClass(cloud, classname).quality res_schema.each { |key, cfg| if !docschema["properties"][attrs[:cfg_plural]]["items"]["properties"][key] only_children[attrs[:cfg_plural]] ||= {} diff --git a/modules/mu/config/ref.rb b/modules/mu/config/ref.rb index b88c3f071..05ab6b609 100644 --- a/modules/mu/config/ref.rb +++ b/modules/mu/config/ref.rb @@ -295,7 +295,7 @@ def kitten(mommacat = @mommacat, shallow: false, debug: false) end end - if !@obj and !(@cloud == "Google" and @id and @type == "users" and MU::Cloud::Google::User.cannedServiceAcctName?(@id)) and !shallow + if !@obj and !(@cloud == "Google" and @id and @type == "users" and MU::Cloud.resourceClass("Google", "User").cannedServiceAcctName?(@id)) and !shallow try_deploy_id = @deploy_id begin From 59b85700fbdf537cb298219d8020a7ba84d18bfb Mon Sep 17 00:00:00 2001 From: John Stange Date: Wed, 8 Apr 2020 02:47:39 -0400 Subject: [PATCH 059/124] Config: a better mousetrap for injecting dependencies --- modules/mu/config.rb | 80 ++++++++++++++++---------- modules/mu/config/firewall_rule.rb | 16 +----- modules/mu/config/server.rb | 14 +---- modules/mu/config/tail.rb | 10 ++++ modules/mu/config/vpc.rb | 18 +----- modules/mu/providers/google/habitat.rb | 6 +- modules/mu/providers/google/role.rb | 13 +---- modules/mu/providers/google/user.rb | 12 +--- 8 files changed, 73 insertions(+), 96 deletions(-) diff --git a/modules/mu/config.rb b/modules/mu/config.rb index 57f3d7f84..827f61619 100644 --- a/modules/mu/config.rb +++ b/modules/mu/config.rb @@ -430,6 +430,39 @@ def resolveTails(tree, indent= "") @config.freeze end + # Insert a dependency into the config hash of a resource, with sensible + # error checking and de-duplication. + # @param resource [Hash] + # @param name [String] + # @param type [String] + # @param phase [String] + # @param no_create_wait [Boolean] + def self.addDependency(resource, name, type, phase: nil, no_create_wait: false) + if ![nil, "create", "groom"].include?(phase) + raise MuError, "Invalid phase '#{phase}' while adding dependency #{type} #{name} to #{resource['name']}" + end + resource['dependencies'] ||= [] + _shortclass, cfg_name, _cfg_plural, _classname = MU::Cloud.getResourceNames(type) + + resource['dependencies'].each { |dep| + if dep['type'] == cfg_name and dep['name'].to_s == name.to_s + dep["no_create_wait"] = no_create_wait + dep["phase"] = phase if phase + return + end + } + + newdep = { + "type" => cfg_name, + "name" => name.to_s, + "no_create_wait" => no_create_wait + } + newdep["phase"] = phase if phase + + resource['dependencies'] << newdep + + end + # See if a given resource is configured in the current stack # @param name [String]: The name of the resource being checked # @param type [String]: The type of resource being checked @@ -558,11 +591,7 @@ def insertKitten(descriptor, type, delay_validation = false, ignore_duplicates: if descriptor['project'].nil? descriptor.delete('project') elsif haveLitterMate?(descriptor['project'], "habitats") - descriptor['dependencies'] ||= [] - descriptor['dependencies'] << { - "type" => "habitat", - "name" => descriptor['project'] - } + MU::Config.addDependency(descriptor, descriptor['project'], "habitat") end end @@ -591,20 +620,14 @@ def insertKitten(descriptor, type, delay_validation = false, ignore_duplicates: haveLitterMate?(descriptor["vpc"]["name"], "vpcs") and descriptor["vpc"]['deploy_id'].nil? and descriptor["vpc"]['id'].nil? - descriptor["dependencies"] << { - "type" => "vpc", - "name" => descriptor["vpc"]["name"], - } + MU::Config.addDependency(descriptor, descriptor['vpc']['name'], "vpc") siblingvpc = haveLitterMate?(descriptor["vpc"]["name"], "vpcs") if siblingvpc and siblingvpc['bastion'] and ["server", "server_pool", "container_cluster"].include?(cfg_name) and !descriptor['bastion'] - if descriptor['name'] != siblingvpc['bastion'].to_h['name'] - descriptor["dependencies"] << { - "type" => "server", - "name" => siblingvpc['bastion'].to_h['name'] - } + if descriptor['name'] != siblingvpc['bastion']['name'] + MU::Config.addDependency(descriptor, siblingvpc['bastion']['name'], "server") end end @@ -702,10 +725,7 @@ def insertKitten(descriptor, type, delay_validation = false, ignore_duplicates: if !descriptor["loadbalancers"].nil? descriptor["loadbalancers"].each { |lb| if !lb["concurrent_load_balancer"].nil? - descriptor["dependencies"] << { - "type" => "loadbalancer", - "name" => lb["concurrent_load_balancer"] - } + MU::Config.addDependency(descriptor, lb["concurrent_load_balancer"], "loadbalancer") end } end @@ -714,10 +734,7 @@ def insertKitten(descriptor, type, delay_validation = false, ignore_duplicates: if !descriptor["storage_pools"].nil? descriptor["storage_pools"].each { |sp| if sp["name"] - descriptor["dependencies"] << { - "type" => "storage_pool", - "name" => sp["name"] - } + MU::Config.addDependency(descriptor, sp["name"], "storage_pool") end } end @@ -728,10 +745,7 @@ def insertKitten(descriptor, type, delay_validation = false, ignore_duplicates: next if !acl_include["name"] and !acl_include["rule_name"] acl_include["name"] ||= acl_include["rule_name"] if haveLitterMate?(acl_include["name"], "firewall_rules") - descriptor["dependencies"] << { - "type" => "firewall_rule", - "name" => acl_include["name"] - } + MU::Config.addDependency(descriptor, acl_include["name"], "firewall_rule") elsif acl_include["name"] MU.log shortclass.to_s+" #{descriptor['name']} depends on FirewallRule #{acl_include["name"]}, but no such rule declared.", MU::ERR ok = false @@ -872,7 +886,6 @@ def check_dependencies _shortclass, cfg_name, _cfg_plural, _classname = MU::Cloud.getResourceNames(type, false) values.each { |resource| next if !resource.kind_of?(Hash) or resource["dependencies"].nil? - resource['dependencies'].uniq! resource["dependencies"].each { |dependency| # make sure the thing we depend on really exists @@ -883,9 +896,17 @@ def check_dependencies next end - # Fudge dependency declarations to account for virtual_names + # Fudge dependency declarations to quash virtual_names that we know + # are extraneous. Note that wee can't do all virtual names here; we + # have no way to guess which of a collection of resources is the + # real correct one. if sibling['virtual_name'] == dependency['name'] - dependency['name'] = sibling['name'] + resource["dependencies"].each { |dep_again| + if dep_again['type'] == dependency['type'] and sibling['name'] == dep_again['name'] + dependency['name'] = sibling['name'] + break + end + } end # Check for a circular relationship that will lead to a deadlock @@ -909,6 +930,7 @@ def check_dependencies } end } + resource["dependencies"].uniq! } } diff --git a/modules/mu/config/firewall_rule.rb b/modules/mu/config/firewall_rule.rb index 0e4f74a9c..7c5c98529 100644 --- a/modules/mu/config/firewall_rule.rb +++ b/modules/mu/config/firewall_rule.rb @@ -119,21 +119,7 @@ def resolveIntraStackFirewallRefs(acl, delay_validation = false) if acl_include['sgs'] acl_include['sgs'].each { |sg_ref| if haveLitterMate?(sg_ref, "firewall_rules") - acl["dependencies"] ||= [] - found = false - acl["dependencies"].each { |dep| - if dep["type"] == "firewall_rule" and dep["name"] == sg_ref - dep["no_create_wait"] = true - found = true - end - } - if !found - acl["dependencies"] << { - "type" => "firewall_rule", - "name" => sg_ref, - "no_create_wait" => true - } - end + MU::Config.addDependency(acl, sg_ref, "firewall_rule", no_create_wait: true) siblingfw = haveLitterMate?(sg_ref, "firewall_rules") if !siblingfw["#MU_VALIDATED"] # XXX raise failure somehow diff --git a/modules/mu/config/server.rb b/modules/mu/config/server.rb index 29b530301..98be4f00d 100644 --- a/modules/mu/config/server.rb +++ b/modules/mu/config/server.rb @@ -650,20 +650,12 @@ def self.validate(server, configurator) end if !server["vpc"]["subnet_name"].nil? and configurator.nat_routes.has_key?(server["vpc"]["subnet_name"]) and !configurator.nat_routes[server["vpc"]["subnet_name"]].empty? - server["dependencies"] << { - "type" => "server", - "name" => configurator.nat_routes[server["vpc"]["subnet_name"]], - "phase" => "groom" - } + MU::Config.addDependency(server, configurator.nat_routes[server["vpc"]["subnet_name"]], "server", phase: "groom") elsif !server["vpc"]["name"].nil? siblingvpc = configurator.haveLitterMate?(server["vpc"]["name"], "vpcs") if siblingvpc and siblingvpc['bastion'] and - server['name'] != siblingvpc['bastion'].to_h['name'] - server["dependencies"] << { - "type" => "server", - "name" => siblingvpc['bastion'].to_h['name'], - "phase" => "groom" - } + server['name'] != siblingvpc['bastion']['name'] + MU::Config.addDependency(server, siblingvpc['bastion']['name'], "server", phase: "groom") end end end diff --git a/modules/mu/config/tail.rb b/modules/mu/config/tail.rb index 852a7cc26..5368ade60 100644 --- a/modules/mu/config/tail.rb +++ b/modules/mu/config/tail.rb @@ -117,6 +117,16 @@ def +(o) def gsub(*args) to_s.gsub(*args) end + + # Lets callers access us like a {Hash} + # @param attribute [String,Symbol] + def [](attribute) + if respond_to?(attribute.to_sym) + send(attribute.to_sym) + else + nil + end + end end # Wrapper method for creating a {MU::Config::Tail} object as a reference to diff --git a/modules/mu/config/vpc.rb b/modules/mu/config/vpc.rb index 1e6724675..44e56c8c5 100644 --- a/modules/mu/config/vpc.rb +++ b/modules/mu/config/vpc.rb @@ -562,11 +562,7 @@ def self.validate(vpc, configurator) "name" => vpc["name"], "subnet_pref" => "public" } - vpc["dependencies"] << { - "type" => "server", - "name" => bastion['name'], - "no_create_wait" => true - } + MU::Config.addDependency(vpc, bastion['name'], "server", no_create_wait: true) vpc["bastion"] = MU::Config::Ref.get( name: bastion['name'], cloud: vpc['cloud'], @@ -618,19 +614,11 @@ def self.resolvePeers(vpc, configurator) append_me = { "vpc" => peer["vpc"].dup } append_me['vpc']['name'] = sib['name'] append << append_me - vpc["dependencies"] << { - "type" => "vpc", - "name" => sib['name'], - "phase" => "groom" - } + MU::Config.addDependency(vpc, sib['name'], "vpc", phase: "groom", no_create_wait: true) end delete << peer else - vpc["dependencies"] << { - "type" => "vpc", - "name" => peer['vpc']["name"], - "phase" => "groom" - } + MU::Config.addDependency(vpc, peer['vpc']['name'], "vpc", phase: "groom", no_create_wait: true) end delete << peer if sib['name'] == vpc['name'] } diff --git a/modules/mu/providers/google/habitat.rb b/modules/mu/providers/google/habitat.rb index d1bf11876..b1d386b88 100644 --- a/modules/mu/providers/google/habitat.rb +++ b/modules/mu/providers/google/habitat.rb @@ -376,11 +376,7 @@ def self.validateConfig(habitat, configurator) end if habitat['parent'] and habitat['parent']['name'] and !habitat['parent']['deploy_id'] and configurator.haveLitterMate?(habitat['parent']['name'], "folders") - habitat["dependencies"] ||= [] - habitat["dependencies"] << { - "type" => "folder", - "name" => habitat['parent']['name'] - } + MU::Config.addDependency(habitat, habitat['parent']['name'], "folder") end ok diff --git a/modules/mu/providers/google/role.rb b/modules/mu/providers/google/role.rb index 1c6449e07..3d024f5c2 100644 --- a/modules/mu/providers/google/role.rb +++ b/modules/mu/providers/google/role.rb @@ -1114,11 +1114,7 @@ def self.validateConfig(role, configurator) if role['role_source'] == "project" role['project'] ||= MU::Cloud::Google.defaultProject(role['credentials']) if configurator.haveLitterMate?(role['project'], "habitats") - role['dependencies'] ||= [] - role['dependencies'] << { - "type" => "habitat", - "name" => role['project'] - } + MU::Config.addDependency(role, role['project'], "habitat") end end @@ -1126,12 +1122,7 @@ def self.validateConfig(role, configurator) role['bindings'].each { |binding| if binding['entity'] and binding['entity']['name'] and configurator.haveLitterMate?(binding['entity']['name'], binding['entity']['type']) - role['dependencies'] ||= [] - role['dependencies'] << { - "type" => binding['entity']['type'].sub(/s$/, ''), - "name" => binding['entity']['name'] - } - + MU::Config.addDependency(role, binding['entity']['name'], binding['entity']['type']) end } end diff --git a/modules/mu/providers/google/user.rb b/modules/mu/providers/google/user.rb index acda02adf..91e1fdc88 100644 --- a/modules/mu/providers/google/user.rb +++ b/modules/mu/providers/google/user.rb @@ -614,15 +614,11 @@ def self.validateConfig(user, _configurator) ok = false end - user['dependencies'] ||= [] if user['roles'] user['roles'].each { |r| if r['role'] and r['role']['name'] and (!r['role']['deploy_id'] and !r['role']['id']) - user['dependencies'] << { - "type" => "role", - "name" => r['role']['name'] - } + MU::Config.addDependency(user, r['role']['name'], "role") end if !r["projects"] and !r["organizations"] and !r["folders"] @@ -661,7 +657,6 @@ def self.genericServiceAccount(parent, configurator) user['roles'] = parent['roles'].dup end configurator.insertKitten(user, "users", true) - parent['dependencies'] ||= [] parent['service_account'] = MU::Config::Ref.get( type: "users", cloud: "Google", @@ -669,10 +664,7 @@ def self.genericServiceAccount(parent, configurator) project: user["project"], credentials: user["credentials"] ) - parent['dependencies'] << { - "type" => "user", - "name" => user["name"] - } + MU::Config.addDependency(parent, user['name'], "user") parent end From 66a11b2bd71fd6e308e968ff271c27e1328db3e8 Mon Sep 17 00:00:00 2001 From: John Stange Date: Wed, 8 Apr 2020 13:31:52 -0400 Subject: [PATCH 060/124] Config: have check_depdendencies fix up dependencies that reference virtual_name resources --- modules/mu/config.rb | 20 ++++++++++++++++++++ modules/mu/config/server.rb | 4 ++-- modules/mu/deploy.rb | 2 +- 3 files changed, 23 insertions(+), 3 deletions(-) diff --git a/modules/mu/config.rb b/modules/mu/config.rb index 827f61619..fce702114 100644 --- a/modules/mu/config.rb +++ b/modules/mu/config.rb @@ -886,6 +886,8 @@ def check_dependencies _shortclass, cfg_name, _cfg_plural, _classname = MU::Cloud.getResourceNames(type, false) values.each { |resource| next if !resource.kind_of?(Hash) or resource["dependencies"].nil? + addme = [] + deleteme = [] resource["dependencies"].each { |dependency| # make sure the thing we depend on really exists @@ -901,12 +903,28 @@ def check_dependencies # have no way to guess which of a collection of resources is the # real correct one. if sibling['virtual_name'] == dependency['name'] + real_resources = [] + found_exact = false resource["dependencies"].each { |dep_again| if dep_again['type'] == dependency['type'] and sibling['name'] == dep_again['name'] dependency['name'] = sibling['name'] + found_exact = true break end } + if !found_exact + all_siblings = haveLitterMate?(dependency['name'], dependency['type'], has_multiple: true) + if all_siblings.size > 0 + all_siblings.each { |s| + newguy = dependency.clone + newguy['name'] = s['name'] + addme << newguy + } + deleteme << dependency + MU.log "Expanding dependency which maps to virtual resources to all matching real resources", MU::NOTICE, details: { sibling['virtual_name'] => addme } + next + end + end end # Check for a circular relationship that will lead to a deadlock @@ -930,6 +948,8 @@ def check_dependencies } end } + resource["dependencies"].reject! { |dep| deleteme.include?(dep) } + resource["dependencies"].concat(addme) resource["dependencies"].uniq! } diff --git a/modules/mu/config/server.rb b/modules/mu/config/server.rb index 98be4f00d..f9c7fb0d6 100644 --- a/modules/mu/config/server.rb +++ b/modules/mu/config/server.rb @@ -650,12 +650,12 @@ def self.validate(server, configurator) end if !server["vpc"]["subnet_name"].nil? and configurator.nat_routes.has_key?(server["vpc"]["subnet_name"]) and !configurator.nat_routes[server["vpc"]["subnet_name"]].empty? - MU::Config.addDependency(server, configurator.nat_routes[server["vpc"]["subnet_name"]], "server", phase: "groom") + MU::Config.addDependency(server, configurator.nat_routes[server["vpc"]["subnet_name"]], "server", phase: "groom", no_create_wait: true) elsif !server["vpc"]["name"].nil? siblingvpc = configurator.haveLitterMate?(server["vpc"]["name"], "vpcs") if siblingvpc and siblingvpc['bastion'] and server['name'] != siblingvpc['bastion']['name'] - MU::Config.addDependency(server, siblingvpc['bastion']['name'], "server", phase: "groom") + MU::Config.addDependency(server, siblingvpc['bastion']['name'], "server", phase: "groom", no_create_wait: true) end end end diff --git a/modules/mu/deploy.rb b/modules/mu/deploy.rb index 71f90157c..f3680fae0 100644 --- a/modules/mu/deploy.rb +++ b/modules/mu/deploy.rb @@ -555,7 +555,7 @@ def setThreadDependencies(services) @dependency_threads["#{name}_groom"]=["#{name}_create", "mu_groom_container"] MU.log "Setting dependencies for #{name}", MU::DEBUG, details: resource["dependencies"] - if resource["dependencies"] != nil then + if !resource["dependencies"].nil? then resource["dependencies"].each { |dependency| parent_class = MU::Cloud.loadBaseType(dependency['type']) From be80d251c743965e2e306ea41ade60e83a1e80f9 Mon Sep 17 00:00:00 2001 From: John Stange Date: Wed, 8 Apr 2020 16:05:11 -0400 Subject: [PATCH 061/124] Deploy: further correctives to thread dependency management --- modules/mu/deploy.rb | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/modules/mu/deploy.rb b/modules/mu/deploy.rb index f3680fae0..694dc4474 100644 --- a/modules/mu/deploy.rb +++ b/modules/mu/deploy.rb @@ -578,12 +578,18 @@ def setThreadDependencies(services) if (dependency['phase'] == "groom" or resource["#MU_CLOUDCLASS"].waits_on_parent_completion) and parent_class.instance_methods(false).include?(:groom) parent = parent_type+"_"+dependency["name"]+"_groom" addDependentThread(parent, "#{name}_groom") - if (parent_class.deps_wait_on_my_creation and parent_type != res_type) or resource["#MU_CLOUDCLASS"].waits_on_parent_completion or dependency['phase'] == "groom" + if !dependency["no_create_wait"] and ( + parent_class.deps_wait_on_my_creation or + resource["#MU_CLOUDCLASS"].waits_on_parent_completion or + dependency['phase'] == "groom" + ) addDependentThread(parent, "#{name}_create") end end } end + MU.log "Thread dependencies #{res_type}[#{name}]", MU::DEBUG, details: { "create" => @dependency_threads["#{name}_create"], "groom" => @dependency_threads["#{name}_groom"] } + @dependency_threads["#{name}_groom"]=["#{name}_create", "mu_groom_container"] } end From 7e5163542ca8d85dfca597e44c8a933277e86aff Mon Sep 17 00:00:00 2001 From: John Stange Date: Wed, 8 Apr 2020 17:23:31 -0400 Subject: [PATCH 062/124] Google::ContainerCluster: make attempts to delegate pod subnets automatically based on what's available --- modules/mu/clouds/google/container_cluster.rb | 19 ++++++++++ modules/mu/clouds/google/vpc.rb | 35 +++++++++++++++++++ 2 files changed, 54 insertions(+) diff --git a/modules/mu/clouds/google/container_cluster.rb b/modules/mu/clouds/google/container_cluster.rb index aeb8e9a5a..78467aa36 100644 --- a/modules/mu/clouds/google/container_cluster.rb +++ b/modules/mu/clouds/google/container_cluster.rb @@ -1021,6 +1021,25 @@ def self.validateConfig(cluster, configurator) cluster['ip_aliases'] = true end + # try to stake out some nice /21s for our networking config + if cluster['ip_aliases'] and cluster["vpc"] and cluster["vpc"]["id"] + habarg = if cluster["vpc"]["habitat"] and cluster["vpc"]["habitat"]["id"] + cluster["vpc"]["habitat"]["id"] + else + cluster["project"] + end + found = MU::MommaCat.findStray("Google", "vpcs", cloud_id: cluster["vpc"]["id"], credentials: cluster["credentials"], habitats: [habarg], dummy_ok: true) + if found and found.size == 1 + myvpc = found.first +# XXX this might not make sense with custom_subnet + cluster['pod_ip_block'] ||= myvpc.getUnusedAddressBlock(max_bits: 21) + cluster['services_ip_block'] ||= myvpc.getUnusedAddressBlock(exclude: [cluster['pod_ip_block']], max_bits: 21) + if cluster['tpu'] + cluster['tpu_ip_block'] ||= myvpc.getUnusedAddressBlock(exclude: [cluster['pod_ip_block'], cluster['services_ip_block']], max_bits: 21) + end + end + end + if cluster['service_account'] cluster['service_account']['cloud'] = "Google" cluster['service_account']['habitat'] ||= MU::Config::Ref.get( diff --git a/modules/mu/clouds/google/vpc.rb b/modules/mu/clouds/google/vpc.rb index 38549a371..7c382cfcf 100644 --- a/modules/mu/clouds/google/vpc.rb +++ b/modules/mu/clouds/google/vpc.rb @@ -946,6 +946,41 @@ def createRouteForInstance(route, server) createRoute(route, network: @url, tags: [MU::Cloud::Google.nameStr(server.mu_name)]) end + # Looks at existing subnets, and attempts to find the next available + # IP block that's roughly similar to the ones we already have. This + # checks against secondary IP ranges, as well as each subnet's primary + # CIDR block. + # @param exclude [Array]: One or more CIDRs to treat as unavailable, in addition to those allocated to existing subnets + # @return [String] + def getUnusedAddressBlock(exclude: [], max_bits: 28) + used_ranges = exclude.map { |cidr| NetAddr::IPv4Net.parse(cidr) } + subnets.each { |s| + used_ranges << NetAddr::IPv4Net.parse(s.cloud_desc.ip_cidr_range) + if s.cloud_desc.secondary_ip_ranges + used_ranges.concat(s.cloud_desc.secondary_ip_ranges.map { |r| NetAddr::IPv4Net.parse(r.ip_cidr_range) }) + end + } +# XXX sort used_ranges + candidate = used_ranges.first.next_sib + + begin + if candidate.netmask.prefix_len > max_bits + candidate = candidate.resize(max_bits) + end + try_again = false + used_ranges.each { |cidr| + if !cidr.rel(candidate).nil? + candidate = candidate.next_sib + try_again = true + break + end + } + try_again = false if candidate.nil? + end while try_again + + candidate.to_s + end + private def self.genStandardSubnetACLs(vpc_cidr, vpc_name, configurator, project, _publicroute = true, credentials: nil) From 0dec283e28fe55e0e7c4da172db94552c11e96dc Mon Sep 17 00:00:00 2001 From: John Stange Date: Wed, 8 Apr 2020 19:03:19 -0400 Subject: [PATCH 063/124] further rearranging of deck chairs in MU::Cloud so we can get a better CodeClimate grade later --- modules/mu/cloud/database.rb | 49 ++++++++ modules/mu/cloud/dnszone.rb | 40 +++++++ modules/mu/cloud/resource_base.rb | 192 +----------------------------- modules/mu/cloud/server.rb | 38 ++++++ modules/mu/cloud/server_pool.rb | 1 + modules/mu/cloud/wrappers.rb | 165 +++++++++++++++++++++++++ 6 files changed, 298 insertions(+), 187 deletions(-) create mode 100644 modules/mu/cloud/database.rb create mode 100644 modules/mu/cloud/dnszone.rb create mode 100644 modules/mu/cloud/server.rb create mode 120000 modules/mu/cloud/server_pool.rb create mode 100644 modules/mu/cloud/wrappers.rb diff --git a/modules/mu/cloud/database.rb b/modules/mu/cloud/database.rb new file mode 100644 index 000000000..dacdae6cd --- /dev/null +++ b/modules/mu/cloud/database.rb @@ -0,0 +1,49 @@ +# Copyright:: Copyright (c) 2020 eGlobalTech, Inc., all rights reserved +# +# Licensed under the BSD-3 license (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License in the root of the project or at +# +# http://egt-labs.com/mu/LICENSE.html +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +module MU + # Plugins under this namespace serve as interfaces to cloud providers and + # other provisioning layers. + class Cloud + + # Generic methods for all Database implementations + class Database + + # Getting the password for a database's master user, and saving it in a database / cluster specific vault + def getPassword + if @config['password'].nil? + if @config['auth_vault'] && !@config['auth_vault'].empty? + @config['password'] = @groomclass.getSecret( + vault: @config['auth_vault']['vault'], + item: @config['auth_vault']['item'], + field: @config['auth_vault']['password_field'] + ) + else + # Should we use random instead? + @config['password'] = Password.pronounceable(10..12) + end + end + + creds = { + "username" => @config["master_user"], + "password" => @config["password"] + } + @groomclass.saveSecret(vault: @mu_name, item: "database_credentials", data: creds) + end + + end + + end + +end diff --git a/modules/mu/cloud/dnszone.rb b/modules/mu/cloud/dnszone.rb new file mode 100644 index 000000000..e722ad4c9 --- /dev/null +++ b/modules/mu/cloud/dnszone.rb @@ -0,0 +1,40 @@ +# Copyright:: Copyright (c) 2020 eGlobalTech, Inc., all rights reserved +# +# Licensed under the BSD-3 license (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License in the root of the project or at +# +# http://egt-labs.com/mu/LICENSE.html +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +module MU + # Plugins under this namespace serve as interfaces to cloud providers and + # other provisioning layers. + class Cloud + + # Generic methods for all DNSZone implementations + class DNSZone + + def self.genericMuDNSEntry(*flags) +# XXX have this switch on a global config for where Mu puts its DNS + MU::Cloud.resourceClass(MU::Config.defaultCloud, "DNSZone").genericMuDNSEntry(flags.first) + end + + def self.createRecordsFromConfig(*flags) + cloudclass = MU::Cloud.resourceClass(MU::Config.defaultCloud, "DNSZone") + if !flags.nil? and flags.size == 1 + cloudclass.createRecordsFromConfig(flags.first) + else + cloudclass.createRecordsFromConfig(*flags) + end + end + end + + end + +end diff --git a/modules/mu/cloud/resource_base.rb b/modules/mu/cloud/resource_base.rb index 7fbae5698..27a083c99 100644 --- a/modules/mu/cloud/resource_base.rb +++ b/modules/mu/cloud/resource_base.rb @@ -17,6 +17,9 @@ module MU # other provisioning layers. class Cloud + # Generic class methods (.find, .cleanup, etc) are defined in wrappers.rb + require 'mu/cloud/wrappers' + @@resource_types.keys.each { |name| Object.const_get("MU").const_get("Cloud").const_get(name).class_eval { attr_reader :cloudclass @@ -24,34 +27,6 @@ class Cloud attr_reader :destroyed attr_reader :delayed_save - def self.shortname - name.sub(/.*?::([^:]+)$/, '\1') - end - - def self.cfg_plural - MU::Cloud.resource_types[shortname.to_sym][:cfg_plural] - end - - def self.has_multiples - MU::Cloud.resource_types[shortname.to_sym][:has_multiples] - end - - def self.cfg_name - MU::Cloud.resource_types[shortname.to_sym][:cfg_name] - end - - def self.can_live_in_vpc - MU::Cloud.resource_types[shortname.to_sym][:can_live_in_vpc] - end - - def self.waits_on_parent_completion - MU::Cloud.resource_types[shortname.to_sym][:waits_on_parent_completion] - end - - def self.deps_wait_on_my_creation - MU::Cloud.resource_types[shortname.to_sym][:deps_wait_on_my_creation] - end - # Print something palatable when we're called in a string context. def to_s fullname = "#{self.class.shortname}" @@ -841,113 +816,6 @@ def allowBastionAccess } end - # Defaults any resources that don't declare their release-readiness to - # ALPHA. That'll learn 'em. - def self.quality - MU::Cloud::ALPHA - end - - # Return a list of "container" artifacts, by class, that apply to this - # resource type in a cloud provider. This is so methods that call find - # know whether to call +find+ with identifiers for parent resources. - # This is similar in purpose to the +isGlobal?+ resource class method, - # which tells our search functions whether or not a resource scopes to - # a region. In almost all cases this is one-entry list consisting of - # +:Habitat+. Notable exceptions include most implementations of - # +Habitat+, which either reside inside a +:Folder+ or nothing at all; - # whereas a +:Folder+ tends to not have any containing parent. Very few - # resource implementations will need to override this. - # A +nil+ entry in this list is interpreted as "this resource can be - # global." - # @return [Array] - def self.canLiveIn - if self.shortname == "Folder" - [nil, :Folder] - elsif self.shortname == "Habitat" - [:Folder] - else - [:Habitat] - end - end - - def self.find(*flags) - allfound = {} - - MU::Cloud.availableClouds.each { |cloud| - begin - args = flags.first - next if args[:cloud] and args[:cloud] != cloud - # skip this cloud if we have a region argument that makes no - # sense there - cloudbase = Object.const_get("MU").const_get("Cloud").const_get(cloud) - next if cloudbase.listCredentials.nil? or cloudbase.listCredentials.empty? or cloudbase.credConfig(args[:credentials]).nil? - if args[:region] and cloudbase.respond_to?(:listRegions) - if !cloudbase.listRegions(credentials: args[:credentials]) - MU.log "Failed to get region list for credentials #{args[:credentials]} in cloud #{cloud}", MU::ERR, details: caller - else - next if !cloudbase.listRegions(credentials: args[:credentials]).include?(args[:region]) - end - end - begin - cloudclass = MU::Cloud.resourceClass(cloud, shortname) - rescue MU::MuError - next - end - - found = cloudclass.find(args) - if !found.nil? - if found.is_a?(Hash) - allfound.merge!(found) - else - raise MuError, "#{cloudclass}.find returned a non-Hash result" - end - end - rescue MuCloudResourceNotImplemented - end - } - allfound - end - - # Wrapper for the cleanup class method of underlying cloud object implementations. - def self.cleanup(*flags) - ok = true - params = flags.first - clouds = MU::Cloud.supportedClouds - if params[:cloud] - clouds = [params[:cloud]] - params.delete(:cloud) - end - - clouds.each { |cloud| - begin - cloudclass = MU::Cloud.resourceClass(cloud, shortname) - - if cloudclass.isGlobal? - params.delete(:region) - end - - raise MuCloudResourceNotImplemented if !cloudclass.respond_to?(:cleanup) or cloudclass.method(:cleanup).owner.to_s != "#" - MU.log "Invoking #{cloudclass}.cleanup from #{shortname}", MU::DEBUG, details: flags - cloudclass.cleanup(params) - rescue MuCloudResourceNotImplemented - MU.log "No #{cloud} implementation of #{shortname}.cleanup, skipping", MU::DEBUG, details: flags - rescue StandardError => e - in_msg = cloud - if params and params[:region] - in_msg += " "+params[:region] - end - if params and params[:flags] and params[:flags]["project"] and !params[:flags]["project"].empty? - in_msg += " project "+params[:flags]["project"] - end - MU.log "Skipping #{shortname} cleanup method in #{in_msg} due to #{e.class.name}: #{e.message}", MU::WARN, details: e.backtrace - ok = false - end - } - MU::MommaCat.unlockAll - - ok - end - # A hook that is always called just before each instance method is # invoked, so that we can ensure that repetitive setup tasks (like # resolving +:resource_group+ for Azure resources) have always been @@ -959,58 +827,8 @@ def resourceInitHook end end - if shortname == "Database" - - # Getting the password for a database's master user, and saving it in a database / cluster specific vault - def getPassword - if @config['password'].nil? - if @config['auth_vault'] && !@config['auth_vault'].empty? - @config['password'] = @groomclass.getSecret( - vault: @config['auth_vault']['vault'], - item: @config['auth_vault']['item'], - field: @config['auth_vault']['password_field'] - ) - else - # Should we use random instead? - @config['password'] = Password.pronounceable(10..12) - end - end - - creds = { - "username" => @config["master_user"], - "password" => @config["password"] - } - @groomclass.saveSecret(vault: @mu_name, item: "database_credentials", data: creds) - end - end - - if shortname == "DNSZone" - def self.genericMuDNSEntry(*flags) -# XXX have this switch on a global config for where Mu puts its DNS - cloudclass = MU::Cloud.resourceClass(MU::Config.defaultCloud, "DNSZone") - cloudclass.genericMuDNSEntry(flags.first) - end - def self.createRecordsFromConfig(*flags) - cloudclass = MU::Cloud.resourceClass(MU::Config.defaultCloud, "DNSZone") - if !flags.nil? and flags.size == 1 - cloudclass.createRecordsFromConfig(flags.first) - else - cloudclass.createRecordsFromConfig(*flags) - end - end - end - - if shortname == "Server" or shortname == "ServerPool" - def windows? - return true if %w{win2k16 win2k12r2 win2k12 win2k8 win2k8r2 win2k19 windows}.include?(@config['platform']) - begin - return true if cloud_desc.respond_to?(:platform) and cloud_desc.platform == "Windows" -# XXX ^ that's AWS-speak, doesn't cover GCP or anything else; maybe we should require cloud layers to implement this so we can just call @cloudobj.windows? - rescue MU::MuError - return false - end - false - end + if File.exist?(MU.myRoot+"/lib/modules/cloud/#{cfg_name}.rb") + require "modules/cloud/#{cfg_name}" end # Wrap the instance methods that this cloud resource type has to diff --git a/modules/mu/cloud/server.rb b/modules/mu/cloud/server.rb new file mode 100644 index 000000000..1bf0f08b3 --- /dev/null +++ b/modules/mu/cloud/server.rb @@ -0,0 +1,38 @@ +# Copyright:: Copyright (c) 2020 eGlobalTech, Inc., all rights reserved +# +# Licensed under the BSD-3 license (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License in the root of the project or at +# +# http://egt-labs.com/mu/LICENSE.html +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +module MU + # Plugins under this namespace serve as interfaces to cloud providers and + # other provisioning layers. + class Cloud + + # Generic methods for all Server/ServerPool implementations + class Server + + def windows? + return true if %w{win2k16 win2k12r2 win2k12 win2k8 win2k8r2 win2k19 windows}.include?(@config['platform']) + begin + return true if cloud_desc.respond_to?(:platform) and cloud_desc.platform == "Windows" +# XXX ^ that's AWS-speak, doesn't cover GCP or anything else; maybe we should require cloud layers to implement this so we can just call @cloudobj.windows? + rescue MU::MuError + return false + end + false + end + + end + + end + +end diff --git a/modules/mu/cloud/server_pool.rb b/modules/mu/cloud/server_pool.rb new file mode 120000 index 000000000..5329f7491 --- /dev/null +++ b/modules/mu/cloud/server_pool.rb @@ -0,0 +1 @@ +server.rb \ No newline at end of file diff --git a/modules/mu/cloud/wrappers.rb b/modules/mu/cloud/wrappers.rb new file mode 100644 index 000000000..d07e94267 --- /dev/null +++ b/modules/mu/cloud/wrappers.rb @@ -0,0 +1,165 @@ +# Copyright:: Copyright (c) 2020 eGlobalTech, Inc., all rights reserved +# +# Licensed under the BSD-3 license (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License in the root of the project or at +# +# http://egt-labs.com/mu/LICENSE.html +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +module MU + # Plugins under this namespace serve as interfaces to cloud providers and + # other provisioning layers. + class Cloud + + # In this file: generic class method wrappers for all resource types. + + @@resource_types.keys.each { |name| + Object.const_get("MU").const_get("Cloud").const_get(name).class_eval { + + def self.shortname + name.sub(/.*?::([^:]+)$/, '\1') + end + + def self.cfg_plural + MU::Cloud.resource_types[shortname.to_sym][:cfg_plural] + end + + def self.has_multiples + MU::Cloud.resource_types[shortname.to_sym][:has_multiples] + end + + def self.cfg_name + MU::Cloud.resource_types[shortname.to_sym][:cfg_name] + end + + def self.can_live_in_vpc + MU::Cloud.resource_types[shortname.to_sym][:can_live_in_vpc] + end + + def self.waits_on_parent_completion + MU::Cloud.resource_types[shortname.to_sym][:waits_on_parent_completion] + end + + def self.deps_wait_on_my_creation + MU::Cloud.resource_types[shortname.to_sym][:deps_wait_on_my_creation] + end + + # Defaults any resources that don't declare their release-readiness to + # ALPHA. That'll learn 'em. + def self.quality + MU::Cloud::ALPHA + end + + # Return a list of "container" artifacts, by class, that apply to this + # resource type in a cloud provider. This is so methods that call find + # know whether to call +find+ with identifiers for parent resources. + # This is similar in purpose to the +isGlobal?+ resource class method, + # which tells our search functions whether or not a resource scopes to + # a region. In almost all cases this is one-entry list consisting of + # +:Habitat+. Notable exceptions include most implementations of + # +Habitat+, which either reside inside a +:Folder+ or nothing at all; + # whereas a +:Folder+ tends to not have any containing parent. Very few + # resource implementations will need to override this. + # A +nil+ entry in this list is interpreted as "this resource can be + # global." + # @return [Array] + def self.canLiveIn + if self.shortname == "Folder" + [nil, :Folder] + elsif self.shortname == "Habitat" + [:Folder] + else + [:Habitat] + end + end + + def self.find(*flags) + allfound = {} + + MU::Cloud.availableClouds.each { |cloud| + begin + args = flags.first + next if args[:cloud] and args[:cloud] != cloud + # skip this cloud if we have a region argument that makes no + # sense there + cloudbase = Object.const_get("MU").const_get("Cloud").const_get(cloud) + next if cloudbase.listCredentials.nil? or cloudbase.listCredentials.empty? or cloudbase.credConfig(args[:credentials]).nil? + if args[:region] and cloudbase.respond_to?(:listRegions) + if !cloudbase.listRegions(credentials: args[:credentials]) + MU.log "Failed to get region list for credentials #{args[:credentials]} in cloud #{cloud}", MU::ERR, details: caller + else + next if !cloudbase.listRegions(credentials: args[:credentials]).include?(args[:region]) + end + end + begin + cloudclass = MU::Cloud.resourceClass(cloud, shortname) + rescue MU::MuError + next + end + + found = cloudclass.find(args) + if !found.nil? + if found.is_a?(Hash) + allfound.merge!(found) + else + raise MuError, "#{cloudclass}.find returned a non-Hash result" + end + end + rescue MuCloudResourceNotImplemented + end + } + allfound + end + + # Wrapper for the cleanup class method of underlying cloud object implementations. + def self.cleanup(*flags) + ok = true + params = flags.first + clouds = MU::Cloud.supportedClouds + if params[:cloud] + clouds = [params[:cloud]] + params.delete(:cloud) + end + + clouds.each { |cloud| + begin + cloudclass = MU::Cloud.resourceClass(cloud, shortname) + + if cloudclass.isGlobal? + params.delete(:region) + end + + raise MuCloudResourceNotImplemented if !cloudclass.respond_to?(:cleanup) or cloudclass.method(:cleanup).owner.to_s != "#" + MU.log "Invoking #{cloudclass}.cleanup from #{shortname}", MU::DEBUG, details: flags + cloudclass.cleanup(params) + rescue MuCloudResourceNotImplemented + MU.log "No #{cloud} implementation of #{shortname}.cleanup, skipping", MU::DEBUG, details: flags + rescue StandardError => e + in_msg = cloud + if params and params[:region] + in_msg += " "+params[:region] + end + if params and params[:flags] and params[:flags]["project"] and !params[:flags]["project"].empty? + in_msg += " project "+params[:flags]["project"] + end + MU.log "Skipping #{shortname} cleanup method in #{in_msg} due to #{e.class.name}: #{e.message}", MU::WARN, details: e.backtrace + ok = false + end + } + MU::MommaCat.unlockAll + + ok + end + + } # end dynamic class generation block + } # end resource type iteration + + end + +end From 02b922c0e259f6e0c7d779d95e617cee3c1ed532 Mon Sep 17 00:00:00 2001 From: John Stange Date: Wed, 8 Apr 2020 19:39:30 -0400 Subject: [PATCH 064/124] Cloud: knock out some of the more trivial lint warnings --- modules/mu/cloud/resource_base.rb | 20 ++++++++++---------- modules/mu/providers/aws/firewall_rule.rb | 3 +-- modules/mu/providers/aws/server.rb | 10 +++------- 3 files changed, 14 insertions(+), 19 deletions(-) diff --git a/modules/mu/cloud/resource_base.rb b/modules/mu/cloud/resource_base.rb index 27a083c99..8522a0f9b 100644 --- a/modules/mu/cloud/resource_base.rb +++ b/modules/mu/cloud/resource_base.rb @@ -20,7 +20,7 @@ class Cloud # Generic class methods (.find, .cleanup, etc) are defined in wrappers.rb require 'mu/cloud/wrappers' - @@resource_types.keys.each { |name| + @@resource_types.each_key { |name| Object.const_get("MU").const_get("Cloud").const_get(name).class_eval { attr_reader :cloudclass attr_reader :cloudobj @@ -80,7 +80,7 @@ def initialize(**args) # We are a parent wrapper object. Initialize our child object and # housekeeping bits accordingly. - if self.class.name.match(/^MU::Cloud::([^:]+)$/) + if self.class.name =~ /^MU::Cloud::([^:]+)$/ @live = true @delayed_save = args[:delayed_save] @method_semaphore = Mutex.new @@ -148,7 +148,7 @@ class << self @cloud = @config['cloud'] if !@cloud - if self.class.name.match(/^MU::Cloud::([^:]+)(?:::.+|$)/) + if self.class.name =~ /^MU::Cloud::([^:]+)(?:::.+|$)/ cloudclass_name = Regexp.last_match[1] if MU::Cloud.supportedClouds.include?(cloudclass_name) @cloud = cloudclass_name @@ -265,7 +265,7 @@ def cloud @cloud elsif @config and @config['cloud'] @config['cloud'] - elsif self.class.name.match(/^MU::Cloud::([^:]+)::.+/) + elsif self.class.name =~ /^MU::Cloud::([^:]+)::.+/ cloudclass_name = Regexp.last_match[1] if MU::Cloud.supportedClouds.include?(cloudclass_name) cloudclass_name @@ -353,10 +353,10 @@ def habitat_id(nolookup: false) # that are meant for our wrapped object. def method_missing(method_sym, *arguments) if @cloudobj - MU.log "INVOKING #{method_sym.to_s} FROM PARENT CLOUD OBJECT #{self}", MU::DEBUG, details: arguments + MU.log "INVOKING #{method_sym} FROM PARENT CLOUD OBJECT #{self}", MU::DEBUG, details: arguments @cloudobj.method(method_sym).call(*arguments) else - raise NoMethodError, "No such instance method #{method_sym.to_s} available on #{self.class.name}" + raise NoMethodError, "No such instance method #{method_sym} available on #{self.class.name}" end end @@ -422,7 +422,7 @@ def cloud_desc(use_cache: true) end if !@cloud_desc_cache - MU.log "cloud_desc via #{self.class.name}.find() failed to locate a live object.\nWas called by #{caller[0]}", MU::WARN, details: args + MU.log "cloud_desc via #{self.class.name}.find() failed to locate a live object.\nWas called by #{caller(1..1)}", MU::WARN, details: args end rescue StandardError => e MU.log "Got #{e.inspect} trying to find cloud handle for #{self.class.shortname} #{@mu_name} (#{@cloud_id})", MU::WARN @@ -556,7 +556,7 @@ def dependencies(use_cache: false, debug: false) break elsif @config['vpc']['subnet_name'] and names.include?(@config['vpc']['subnet_name']) -puts "CHOOSING #{@vpc.to_s} 'cause it has #{@config['vpc']['subnet_name']}" +#puts "CHOOSING #{@vpc.to_s} 'cause it has #{@config['vpc']['subnet_name']}" @vpc = sibling break elsif @config['vpc']['subnet_id'] and @@ -665,7 +665,7 @@ def dependencies(use_cache: false, debug: false) # Google accounts usually have a useful default VPC we can use if @vpc.nil? and @project_id and @cloud == "Google" and self.class.can_live_in_vpc - MU.log "Seeing about default VPC for #{self.to_s}", MU::NOTICE + MU.log "Seeing about default VPC for #{self}", MU::NOTICE vpcs = MU::MommaCat.findStray( "Google", "vpc", @@ -782,7 +782,7 @@ def mySubnets subnets = [] @config["vpc"]["subnets"].each { |subnet| subnet_obj = @vpc.getSubnet(cloud_id: subnet["subnet_id"].to_s, name: subnet["subnet_name"].to_s) - raise MuError, "Couldn't find a live subnet for #{self.to_s} matching #{subnet} in #{@vpc.to_s} (#{@vpc.subnets.map { |s| s.name }.join(",")})" if subnet_obj.nil? + raise MuError.new "Couldn't find a live subnet for #{self} matching #{subnet} in #{@vpc}", details: @vpc.subnets.map { |s| s.name }.join(",") if subnet_obj.nil? subnets << subnet_obj } diff --git a/modules/mu/providers/aws/firewall_rule.rb b/modules/mu/providers/aws/firewall_rule.rb index 89a6f2d4a..76ebdc77b 100644 --- a/modules/mu/providers/aws/firewall_rule.rb +++ b/modules/mu/providers/aws/firewall_rule.rb @@ -764,7 +764,6 @@ def purge_extraneous_rules(ec2_rules, ext_permissions) # "ingress_rules" structure parsed and validated by MU::Config. ######################################################################### def setRules(rules, add_to_self: false, ingress: true, egress: false) - describe # XXX warn about attempt to set rules before we exist return if rules.nil? or rules.size == 0 or !@cloud_id @@ -785,7 +784,7 @@ def setRules(rules, add_to_self: false, ingress: true, egress: false) ec2_rules = convertToEc2(rules) return if ec2_rules.nil? - ext_permissions = MU.structToHash(cloud_desc.ip_permissions) + ext_permissions = MU.structToHash(cloud_desc(use_cache: false).ip_permissions) purge_extraneous_rules(ec2_rules, ext_permissions) diff --git a/modules/mu/providers/aws/server.rb b/modules/mu/providers/aws/server.rb index 58b8cf91d..b8ad7e3c3 100644 --- a/modules/mu/providers/aws/server.rb +++ b/modules/mu/providers/aws/server.rb @@ -399,7 +399,7 @@ def reboot(hard = false) # Figure out what's needed to SSH into this server. # @return [Array]: nat_ssh_key, nat_ssh_user, nat_ssh_host, canonical_ip, ssh_user, ssh_key_name, alternate_names def getSSHConfig - describe(cloud_id: @cloud_id) + cloud_desc(use_cache: false) # make sure we're current # XXX add some awesome alternate names from metadata and make sure they end # up in MU::MommaCat's ssh config wangling return nil if @config.nil? or @deploy.nil? @@ -444,8 +444,7 @@ def getSSHConfig # administravia for a new instance. def postBoot(instance_id = nil) @cloud_id ||= instance_id - node, _config, deploydata = describe(cloud_id: @cloud_id) - @mu_name ||= node + _node, _config, deploydata = describe(cloud_id: @cloud_id) raise MuError, "Couldn't find instance #{@mu_name} (#{@cloud_id})" if !cloud_desc return false if !MU::MommaCat.lock(@cloud_id+"-orchestrate", true) @@ -1163,10 +1162,7 @@ def getWindowsAdminPassword(use_cache: true) end end - if @cloud_id.nil? - describe - @cloud_id = cloud_desc.instance_id - end + @cloud_id ||= cloud_desc(use_cache: false).instance_id ssh_keydir = "#{Etc.getpwuid(Process.uid).dir}/.ssh" ssh_key_name = @deploy.ssh_key_name From 19ad04af234beda1eed2948b0f871461c2b7c15e Mon Sep 17 00:00:00 2001 From: John Stange Date: Thu, 9 Apr 2020 10:39:58 -0400 Subject: [PATCH 065/124] mu-run-test: contrive a way to target groomers in test BoKs so that we'll get some basic Chef coverage --- bin/mu-run-tests | 33 +++++++++++++++++++++++---------- modules/mu/groomer.rb | 15 +++++++++++++++ modules/tests/centos6.yaml | 11 +++++++++++ modules/tests/centos7.yaml | 11 +++++++++++ modules/tests/centos8.yaml | 12 ++++++++++++ 5 files changed, 72 insertions(+), 10 deletions(-) create mode 100644 modules/tests/centos6.yaml create mode 100644 modules/tests/centos7.yaml create mode 100644 modules/tests/centos8.yaml diff --git a/bin/mu-run-tests b/bin/mu-run-tests index 6b5956fdc..b71e1bcb6 100755 --- a/bin/mu-run-tests +++ b/bin/mu-run-tests @@ -42,7 +42,7 @@ only = ARGV files = Dir.glob("*.yaml", base: dir) files.concat(Dir.glob("*.yml", base: dir)) -baseclouds = MU::Cloud.supportedClouds.reject { |c| c == "CloudFormation" } +baseclouds = MU::Cloud.availableClouds.reject { |c| c == "CloudFormation" } commands = {} failures = [] @@ -56,20 +56,33 @@ end files.each { |f| clouds = baseclouds.dup + groomer_match = true File.open(dir+"/"+f).readlines.each { |l| l.chomp! - next if !l.match(/^\s*#\s*clouds: (.*)/) - clouds = [] - cloudstr = Regexp.last_match[1] - cloudstr.split(/\s*,\s*/).each { |c| - baseclouds.each { |cloud| - if cloud.match(/^#{Regexp.quote(c)}$/i) - clouds << cloud + if l.match(/^\s*#\s*clouds: (.*)/) + clouds = [] + cloudstr = Regexp.last_match[1] + cloudstr.split(/\s*,\s*/).each { |c| + baseclouds.each { |cloud| + if cloud.match(/^#{Regexp.quote(c)}$/i) + clouds << cloud + end + } + } + elsif l.match(/^\s*#\s*groomers: (.*)/) + groomerstr = Regexp.last_match[1] + groomerstr.split(/\s*,\s*/).each { |g| + if !MU::Groomer.availableGroomers.include?(g) + MU.log "#{f} requires groomer #{g}, which is not available. This test will be skipped.", MU::NOTICE + groomer_match = false end } - } - break + end } + if !groomer_match + next + end + clouds.each { |cloud| cmd = "mu-deploy #{f} --cloud #{cloud} #{$opts[:full] ? "" : "--dryrun"}" commands[cmd] = { diff --git a/modules/mu/groomer.rb b/modules/mu/groomer.rb index 2cfb17af5..8bbae0a11 100644 --- a/modules/mu/groomer.rb +++ b/modules/mu/groomer.rb @@ -30,6 +30,21 @@ def self.supportedGroomers ["Chef", "Ansible"] end + # List of known/supported groomers which are installed and appear to be working + # @return [Array] + def self.availableGroomers + available = [] + MU::Groomer.supportedGroomers.each { |groomer| + begin + groomerbase = loadGroomer(groomer) + available << groomer if groomerbase.available? + rescue NameError + end + } + + available + end + # Instance methods that any Groomer plugin must implement def self.requiredMethods [:preClean, :bootstrap, :haveBootstrapped?, :run, :saveDeployData, :getSecret, :saveSecret, :deleteSecret, :reinstall] diff --git a/modules/tests/centos6.yaml b/modules/tests/centos6.yaml new file mode 100644 index 000000000..3f6f544e2 --- /dev/null +++ b/modules/tests/centos6.yaml @@ -0,0 +1,11 @@ +# groomers: Chef +--- +appname: smoketest +servers: +- name: centos6 + platform: centos6 + size: m3.medium + run_list: + - recipe[mu-tools::apply_security] + - recipe[mu-tools::updates] + - recipe[mu-tools::split_var_partitions] diff --git a/modules/tests/centos7.yaml b/modules/tests/centos7.yaml new file mode 100644 index 000000000..4f16b6046 --- /dev/null +++ b/modules/tests/centos7.yaml @@ -0,0 +1,11 @@ +# groomers: Chef +--- +appname: smoketest +servers: +- name: centos7 + platform: centos7 + size: m3.medium + run_list: + - recipe[mu-tools::apply_security] + - recipe[mu-tools::updates] + - recipe[mu-tools::split_var_partitions] diff --git a/modules/tests/centos8.yaml b/modules/tests/centos8.yaml new file mode 100644 index 000000000..25ecf03ff --- /dev/null +++ b/modules/tests/centos8.yaml @@ -0,0 +1,12 @@ +# groomers: Chef +# clouds: Azure, Google +--- +appname: smoketest +servers: +- name: centos8 + platform: centos8 + size: m3.medium + run_list: + - recipe[mu-tools::apply_security] + - recipe[mu-tools::updates] + - recipe[mu-tools::split_var_partitions] From d4bded0b358462a4baf35eb5fee4b970cebe2df8 Mon Sep 17 00:00:00 2001 From: John Stange Date: Thu, 9 Apr 2020 11:04:45 -0400 Subject: [PATCH 066/124] Config: migrate to addDependency everywhere that was manually manipulating the config hash --- modules/mu/config.rb | 7 +------ modules/mu/config/alarm.rb | 6 +----- modules/mu/config/database.rb | 20 ++++---------------- modules/mu/config/server_pool.rb | 8 ++------ 4 files changed, 8 insertions(+), 33 deletions(-) diff --git a/modules/mu/config.rb b/modules/mu/config.rb index fce702114..a7ca830e4 100644 --- a/modules/mu/config.rb +++ b/modules/mu/config.rb @@ -1236,12 +1236,7 @@ def validate(config = @config) "port" => db["port"], "sgs" => [cfg_name+server['name']] } - - ruleset["dependencies"] << { - "name" => cfg_name+server['name'], - "type" => "firewall_rule", - "no_create_wait" => true - } + MU::Config.addDependency(ruleset, cfg_name+server['name'], "firewall_rule", no_create_wait: true) end } } diff --git a/modules/mu/config/alarm.rb b/modules/mu/config/alarm.rb index 3b398f015..aa888f352 100644 --- a/modules/mu/config/alarm.rb +++ b/modules/mu/config/alarm.rb @@ -281,11 +281,7 @@ def self.validate(alarm, configurator) } ok = false if !configurator.insertKitten(notifier, "notifiers") end - alarm["dependencies"] ||= [] - alarm["dependencies"] << { - "name" => alarm["notification_group"], - "type" => "notifier" - } + MU::Config.addDependency(alarm, alarm["notification_group"], "notifier") end end diff --git a/modules/mu/config/database.rb b/modules/mu/config/database.rb index e4c1e1303..661c31447 100644 --- a/modules/mu/config/database.rb +++ b/modules/mu/config/database.rb @@ -341,11 +341,7 @@ def self.validate(db, configurator) "region" => db['region'], "credentials" => db['credentials'], } - replica['dependencies'] << { - "type" => "database", - "name" => db["name"], - "phase" => "groom" - } + MU::Config.addDependency(replica, db["name"], "database", phase: "groom") read_replicas << replica end end @@ -371,11 +367,7 @@ def self.validate(db, configurator) "type" => "databases" } # AWS will figure out for us which database instance is the writer/master so we can create all of them concurrently. - node['dependencies'] << { - "type" => "database", - "name" => db["name"], - "phase" => "groom" - } + MU::Config.addDependency(node, db["name"], "database", phase: "groom") cluster_nodes << node # Alarms are set on each DB cluster node, not on the cluster itself, @@ -393,6 +385,7 @@ def self.validate(db, configurator) rr = MU::Config::Ref.get(db['read_replica_of']) if rr.name and !rr.deploy_id db['dependencies'] << { "name" => rr.name, "type" => "database" } + MU::Config.addDependency(db, rr.name, "database") elsif !rr.kitten MU.log "Couldn't resolve Database reference to a unique live Database in #{db['name']}", MU::ERR, details: rr ok = false @@ -417,12 +410,7 @@ def self.validate(db, configurator) if db["source"]["name"] and !db["source"]["deploy_id"] and configurator.haveLitterMate?(db["source"]["name"], "databases") - db["dependencies"] ||= [] - db["dependencies"] << { - "type" => "database", - "name" => db["source"]["name"]#, -# "phase" => "groom" - } + MU::Config.addDependency(db, db["source"]["name"], "database") end db["source"]["cloud"] ||= db["cloud"] end diff --git a/modules/mu/config/server_pool.rb b/modules/mu/config/server_pool.rb index d8e6d39ea..648145a81 100644 --- a/modules/mu/config/server_pool.rb +++ b/modules/mu/config/server_pool.rb @@ -186,11 +186,7 @@ def self.validate(pool, configurator) if !pool["vpc"].nil? if !pool["vpc"]["subnet_name"].nil? and configurator.nat_routes.has_key?(pool["vpc"]["subnet_name"]) - pool["dependencies"] << { - "type" => "pool", - "name" => configurator.nat_routes[pool["vpc"]["subnet_name"]], - "phase" => "groom" - } + MU::Config.addDependency(pool, configurator.nat_routes[pool["vpc"]["subnet_name"]], "server", phase: "groom", no_create_wait: true) end end # TODO make sure this is handled... somewhere @@ -203,7 +199,7 @@ def self.validate(pool, configurator) # } # end if pool["basis"] and pool["basis"]["server"] - pool["dependencies"] << {"type" => "server", "name" => pool["basis"]["server"]} + MU::Config.addDependency(pool, pool["basis"]["server"], "server", phase: "groom") end if !pool['static_ip'].nil? and !pool['ip'].nil? ok = false From 768f5c895d2f5e21bd5cd42a6c991c779a65772a Mon Sep 17 00:00:00 2001 From: John Stange Date: Thu, 9 Apr 2020 11:34:43 -0400 Subject: [PATCH 067/124] AWS, Google, Azure: migrate to addDependency everywhere that was manually manipulating the config hash --- modules/mu/groomer.rb | 2 +- modules/mu/providers/aws/alarm.rb | 2 +- modules/mu/providers/aws/container_cluster.rb | 31 ++++--------------- modules/mu/providers/aws/dnszone.rb | 5 +-- modules/mu/providers/aws/endpoint.rb | 13 ++------ modules/mu/providers/aws/firewall_rule.rb | 28 +++-------------- modules/mu/providers/aws/function.rb | 12 ++----- modules/mu/providers/aws/group.rb | 6 +--- modules/mu/providers/aws/msg_queue.rb | 10 ++---- modules/mu/providers/aws/role.rb | 6 +--- modules/mu/providers/aws/search_domain.rb | 11 ++----- modules/mu/providers/aws/server.rb | 12 ++----- modules/mu/providers/aws/server_pool.rb | 8 ++--- modules/mu/providers/aws/user.rb | 6 +--- modules/mu/providers/aws/vpc.rb | 17 ++-------- .../mu/providers/azure/container_cluster.rb | 6 +--- modules/mu/providers/azure/server.rb | 20 ++---------- modules/mu/providers/azure/vpc.rb | 6 +--- .../mu/providers/google/container_cluster.rb | 6 +--- modules/mu/providers/google/folder.rb | 6 +--- modules/mu/providers/google/group.rb | 12 ++----- 21 files changed, 41 insertions(+), 184 deletions(-) diff --git a/modules/mu/groomer.rb b/modules/mu/groomer.rb index 8bbae0a11..e73c59fef 100644 --- a/modules/mu/groomer.rb +++ b/modules/mu/groomer.rb @@ -38,7 +38,7 @@ def self.availableGroomers begin groomerbase = loadGroomer(groomer) available << groomer if groomerbase.available? - rescue NameError + rescue LoadError end } diff --git a/modules/mu/providers/aws/alarm.rb b/modules/mu/providers/aws/alarm.rb index 75f985b36..d7a0d4ded 100644 --- a/modules/mu/providers/aws/alarm.rb +++ b/modules/mu/providers/aws/alarm.rb @@ -321,7 +321,7 @@ def self.validateConfig(alarm, configurator) if !depclass.nil? dimension["depclass"] = depclass if !dimension["name"].nil? and !dimension["name"].empty? - alarm["dependencies"] << { "name" => dimension["name"], "type" => depclass } + MU::Config.addDependency(alarm, dimension["name"], depclass) end end } diff --git a/modules/mu/providers/aws/container_cluster.rb b/modules/mu/providers/aws/container_cluster.rb index 0052503a0..4bbceaca2 100644 --- a/modules/mu/providers/aws/container_cluster.rb +++ b/modules/mu/providers/aws/container_cluster.rb @@ -1296,7 +1296,7 @@ def self.validateConfig(cluster, configurator) end if !created_generic_loggroup - cluster["dependencies"] << { "type" => "log", "name" => logname } + MU::Config.addDependency(cluster, logname, "log") logdesc = { "name" => logname, "region" => cluster["region"], @@ -1335,10 +1335,7 @@ def self.validateConfig(cluster, configurator) } configurator.insertKitten(roledesc, "roles") - cluster["dependencies"] << { - "type" => "role", - "name" => rolename - } + MU::Config.addDependency(cluster, rolename, "role") end created_generic_loggroup = true @@ -1367,11 +1364,7 @@ def self.validateConfig(cluster, configurator) role["tags"] = cluster["tags"] if !cluster["tags"].nil? role["optional_tags"] = cluster["optional_tags"] if !cluster["optional_tags"].nil? configurator.insertKitten(role, "roles") - cluster['dependencies'] << { - "type" => "role", - "name" => cluster["name"]+"pods", - "phase" => "groom" - } + MU::Config.addDependency(cluster, cluster["name"]+"pods", "role", phase: "groom") if !MU::Master.kubectl MU.log "Since I can't find a kubectl executable, you will have to handle all service account, user, and role bindings manually!", MU::WARN end @@ -1483,12 +1476,7 @@ def self.validateConfig(cluster, configurator) "AmazonEKS_CNI_Policy", "AmazonEC2ContainerRegistryReadOnly" ] - worker_pool["dependencies"] = [ - { - "type" => "container_cluster", - "name" => cluster['name'] - } - ] + MU::Config.addDependency(worker_pool, cluster["name"], "container_cluster") worker_pool["run_list"] = ["recipe[mu-tools::eks]"] worker_pool["run_list"].concat(cluster["run_list"]) if cluster["run_list"] MU::Config::Server.common_properties.keys.each { |k| @@ -1502,10 +1490,7 @@ def self.validateConfig(cluster, configurator) configurator.insertKitten(worker_pool, "server_pools") if cluster["flavor"] == "ECS" - cluster["dependencies"] << { - "name" => cluster["name"]+"workers", - "type" => "server_pool", - } + MU::Config.addDependency(cluster, cluster["name"]+"workers", "server_pool") end end @@ -1527,11 +1512,7 @@ def self.validateConfig(cluster, configurator) role["tags"] = cluster["tags"] if !cluster["tags"].nil? role["optional_tags"] = cluster["optional_tags"] if !cluster["optional_tags"].nil? configurator.insertKitten(role, "roles") - cluster['dependencies'] << { - "type" => "role", - "name" => cluster["name"]+"controlplane", - "phase" => "groom" - } + MU::Config.addDependency(cluster, cluster["name"]+"controlplane", "role", phase: "groom") end ok diff --git a/modules/mu/providers/aws/dnszone.rb b/modules/mu/providers/aws/dnszone.rb index 8095df95d..ac3ce7042 100644 --- a/modules/mu/providers/aws/dnszone.rb +++ b/modules/mu/providers/aws/dnszone.rb @@ -825,10 +825,7 @@ def self.validateConfig(zone, _configurator) end if !record['mu_type'].nil? - zone["dependencies"] << { - "type" => record['mu_type'], - "name" => record['target'] - } + MU::Config.addDependency(zone, record['target'], record['mu_type']) end if record.has_key?('healthchecks') && !record['healthchecks'].empty? diff --git a/modules/mu/providers/aws/endpoint.rb b/modules/mu/providers/aws/endpoint.rb index 1ad277cf3..45f569ef3 100644 --- a/modules/mu/providers/aws/endpoint.rb +++ b/modules/mu/providers/aws/endpoint.rb @@ -472,11 +472,7 @@ def self.validateConfig(endpoint, configurator) endpoint['methods'].each { |m| if m['integrate_with'] and m['integrate_with']['name'] if m['integrate_with']['type'] != "aws_generic" - endpoint['dependencies'] ||= [] - endpoint['dependencies'] << { - "type" => m['integrate_with']['type'], - "name" => m['integrate_with']['name'] - } + MU::Config.addDependency(endpoint, m['integrate_with']['name'], m['integrate_with']['type']) end m['integrate_with']['backend_http_method'] ||= m['type'] @@ -525,13 +521,8 @@ def self.validateConfig(endpoint, configurator) end configurator.insertKitten(roledesc, "roles") - endpoint['dependencies'] ||= [] m['iam_role'] = endpoint['name']+"-"+m['integrate_with']['name'] - - endpoint['dependencies'] << { - "type" => "role", - "name" => endpoint['name']+"-"+m['integrate_with']['name'] - } + MU::Config.addDependency(endpoint, m['iam_role'], "role") end end } diff --git a/modules/mu/providers/aws/firewall_rule.rb b/modules/mu/providers/aws/firewall_rule.rb index 76ebdc77b..000719778 100644 --- a/modules/mu/providers/aws/firewall_rule.rb +++ b/modules/mu/providers/aws/firewall_rule.rb @@ -648,36 +648,16 @@ def self.validateConfig(acl, configurator) if rule['firewall_rules'] rule['firewall_rules'].each { |sg| - if sg.is_a?(MU::Config::Ref) and sg.name - acl["dependencies"] << { - "type" => "firewall_rule", - "name" => sg.name, - "no_create_wait" => true - } - elsif sg['name'] and !sg['deploy_id'] - acl["dependencies"] << { - "type" => "firewall_rule", - "name" => sg['name'], - "no_create_wait" => true - } + if sg['name'] and !sg['deploy_id'] + MU::Config.addDependency(acl, sg['name'], "firewall_rule", no_create_wait: true) end } end if rule['loadbalancers'] rule['loadbalancers'].each { |lb| - if lb.is_a?(MU::Config::Ref) and lb.name - acl["dependencies"] << { - "type" => "loadbalancer", - "name" => lb.name, - "phase" => "groom" - } - elsif lb['name'] and !lb['deploy_id'] - acl["dependencies"] << { - "type" => "loadbalancer", - "name" => lb['name'], - "phase" => "groom" - } + if lb['name'] and !lb['deploy_id'] + MU::Config.addDependency(acl, lb['name'], "loadbalancer", phase: "groom") end } end diff --git a/modules/mu/providers/aws/function.rb b/modules/mu/providers/aws/function.rb index 5589bb695..2e43f3446 100644 --- a/modules/mu/providers/aws/function.rb +++ b/modules/mu/providers/aws/function.rb @@ -505,11 +505,7 @@ def self.validateConfig(function, configurator) function["add_firewall_rules"] << {"name" => fwname} function["permissions"] ||= [] function["permissions"] << "network" - function['dependencies'] ||= [] - function['dependencies'] << { - "name" => fwname, - "type" => "firewall_rule" - } + MU::Config.addDependency(function, fwname, "firewall_rule") end if !function['iam_role'] @@ -541,13 +537,9 @@ def self.validateConfig(function, configurator) } configurator.insertKitten(roledesc, "roles") - function['dependencies'] ||= [] function['iam_role'] = function['name']+"execrole" - function['dependencies'] << { - "type" => "role", - "name" => function['name']+"execrole" - } + MU::Config.addDependency(function, function['name']+"execrole", "role") end ok diff --git a/modules/mu/providers/aws/group.rb b/modules/mu/providers/aws/group.rb index 1e9be64b2..8c073bc7d 100644 --- a/modules/mu/providers/aws/group.rb +++ b/modules/mu/providers/aws/group.rb @@ -378,11 +378,7 @@ def self.validateConfig(group, configurator) if group['members'] group['members'].each { |user| if configurator.haveLitterMate?(user, "users") - group["dependencies"] ||= [] - group["dependencies"] << { - "type" => "user", - "name" => user - } + MU::Config.addDependency(group, user, "group") else found = MU::Cloud.resourceClass("AWS", "User").find(cloud_id: user) if found.nil? or found.empty? diff --git a/modules/mu/providers/aws/msg_queue.rb b/modules/mu/providers/aws/msg_queue.rb index 2c14eefd9..2f8ae9f2b 100644 --- a/modules/mu/providers/aws/msg_queue.rb +++ b/modules/mu/providers/aws/msg_queue.rb @@ -327,16 +327,10 @@ def self.validateConfig(queue, configurator) failq.delete("failqueue") ok = false if !configurator.insertKitten(failq, "msg_queues") queue['failqueue']['name'] = failq['name'] - queue['dependencies'] << { - "name" => failq['name'], - "type" => "msg_queue" - } + MU::Config.addDependency(queue, failq["name"], "msg_queue") else if configurator.haveLitterMate?(queue['failqueue']['name'], "msg_queue") - queue['dependencies'] << { - "name" => queue['failqueue']['name'], - "type" => "msg_queue" - } + MU::Config.addDependency(queue, queue['failqueue']['name'], "msg_queue") else failq = MU::Cloud::AWS::MsgQueue.find(cloud_id: queue['failqueue']['name']) if !failq diff --git a/modules/mu/providers/aws/role.rb b/modules/mu/providers/aws/role.rb index 7bc70e23a..e375b05f0 100644 --- a/modules/mu/providers/aws/role.rb +++ b/modules/mu/providers/aws/role.rb @@ -1087,11 +1087,7 @@ def self.validateConfig(role, _configurator) role['policies'].each { |policy| policy['targets'].each { |target| if target['type'] - role['dependencies'] ||= [] - role['dependencies'] << { - "name" => target['identifier'], - "type" => target['type'] - } + MU::Config.addDependency(role, target['identifier'], target['type']) end } } diff --git a/modules/mu/providers/aws/search_domain.rb b/modules/mu/providers/aws/search_domain.rb index e817c9725..1b77f0c2b 100644 --- a/modules/mu/providers/aws/search_domain.rb +++ b/modules/mu/providers/aws/search_domain.rb @@ -378,7 +378,7 @@ def self.validateConfig(dom, configurator) if dom['slow_logs'] if configurator.haveLitterMate?(dom['slow_logs'], "log") - dom['dependencies'] << { "name" => dom['slow_logs'], "type" => "log" } + MU::Config.addDependency(dom, dom['slow_logs'], "log") else log_group = MU::Cloud.resourceClass("AWS", "Log").find(cloud_id: dom['slow_logs'], region: dom['region']).values.first if !log_group @@ -395,7 +395,7 @@ def self.validateConfig(dom, configurator) "credentials" => dom['credentials'] } ok = false if !configurator.insertKitten(log_group, "logs") - dom['dependencies'] << { "name" => dom['slow_logs'], "type" => "log" } + MU::Config.addDependency(dom, dom['slow_logs'], "log") end if dom['advanced_options'] @@ -456,12 +456,7 @@ def self.validateConfig(dom, configurator) ] } configurator.insertKitten(roledesc, "roles") - - dom['dependencies'] ||= [] - dom['dependencies'] << { - "type" => "role", - "name" => dom['name']+"cognitorole" - } + MU::Config.addDependency(dom, dom['name']+"cognitorole", "role") end end diff --git a/modules/mu/providers/aws/server.rb b/modules/mu/providers/aws/server.rb index b8ad7e3c3..7d2d78379 100644 --- a/modules/mu/providers/aws/server.rb +++ b/modules/mu/providers/aws/server.rb @@ -1804,12 +1804,7 @@ def self.generateStandardRole(server, configurator) end configurator.insertKitten(role, "roles") - - server["dependencies"] ||= [] - server["dependencies"] << { - "type" => "role", - "name" => server["name"] - } + MU::Config.addDependency(server, server["name"], "role") end # Cloud-specific pre-processing of {MU::Config::BasketofKittens::servers}, bare and unvalidated. @@ -1860,10 +1855,7 @@ def self.validateConfig(server, configurator) server["loadbalancers"].each { |lb| lb["name"] ||= lb["concurrent_load_balancer"] if lb["name"] - server["dependencies"] << { - "type" => "loadbalancer", - "name" => lb["name"] - } + MU::Config.addDependency(server, lb["name"], "loadbalancer") end } end diff --git a/modules/mu/providers/aws/server_pool.rb b/modules/mu/providers/aws/server_pool.rb index cd60754ce..05135b5d5 100644 --- a/modules/mu/providers/aws/server_pool.rb +++ b/modules/mu/providers/aws/server_pool.rb @@ -930,11 +930,7 @@ def self.validateConfig(pool, configurator) role['credentials'] = pool['credentials'] if pool['credentials'] configurator.insertKitten(role, "roles") - pool["dependencies"] ||= [] - pool["dependencies"] << { - "type" => "role", - "name" => pool["name"] - } + MU::Config.addDependency(pool, pool['name'], "role") end launch["ami_id"] ||= launch["image_id"] if launch["server"].nil? and launch["instance_id"].nil? and launch["ami_id"].nil? @@ -948,7 +944,7 @@ def self.validateConfig(pool, configurator) end end if launch["server"] != nil - pool["dependencies"] << {"type" => "server", "name" => launch["server"]} + MU::Config.addDependency(pool, launch["server"], "server", phase: "groom") # XXX I dunno, maybe toss an error if this isn't done already # servers.each { |server| # if server["name"] == launch["server"] diff --git a/modules/mu/providers/aws/user.rb b/modules/mu/providers/aws/user.rb index d6bdc4cad..b8725e3a5 100644 --- a/modules/mu/providers/aws/user.rb +++ b/modules/mu/providers/aws/user.rb @@ -542,11 +542,7 @@ def self.validateConfig(user, configurator) end if need_dependency - user["dependencies"] ||= [] - user["dependencies"] << { - "type" => "group", - "name" => group - } + MU::Config.addDependency(user, group, "group") end } end diff --git a/modules/mu/providers/aws/vpc.rb b/modules/mu/providers/aws/vpc.rb index 017a1069d..5c9702049 100644 --- a/modules/mu/providers/aws/vpc.rb +++ b/modules/mu/providers/aws/vpc.rb @@ -916,11 +916,7 @@ def self.validateConfig(vpc, configurator) logdesc["tags"] = vpc["tags"] if !vpc["tags"].nil? # logdesc["optional_tags"] = vpc["optional_tags"] if !vpc["optional_tags"].nil? configurator.insertKitten(logdesc, "logs") - vpc['dependencies'] ||= [] - vpc['dependencies'] << { - "type" => "log", - "name" => vpc['name']+"loggroup" - } + MU::Config.addDependency(vpc, vpc['name']+"loggroup", "log") roledesc = { "name" => vpc['name']+"logrole", @@ -958,11 +954,7 @@ def self.validateConfig(vpc, configurator) roledesc["tags"] = vpc["tags"] if !vpc["tags"].nil? roledesc["optional_tags"] = vpc["optional_tags"] if !vpc["optional_tags"].nil? configurator.insertKitten(roledesc, "roles") - vpc['dependencies'] ||= [] - vpc['dependencies'] << { - "type" => "role", - "name" => vpc['name']+"logrole" - } + MU::Config.addDependency(vpc, vpc['name']+"logrole", "role") end subnet_routes = Hash.new @@ -1013,10 +1005,7 @@ def self.validateConfig(vpc, configurator) subnet_routes[table['name']].each { |subnet| nat_routes[subnet] = route['nat_host_name'] } - vpc['dependencies'] << { - "type" => "server", - "name" => route['nat_host_name'] - } + MU::Config.addDependency(vpc, route['nat_host_name'], "server", no_create_wait: true) elsif route['gateway'] == '#NAT' vpc['create_nat_gateway'] = true private_rtbs << table['name'] diff --git a/modules/mu/providers/azure/container_cluster.rb b/modules/mu/providers/azure/container_cluster.rb index 1ab510155..22ecb6951 100644 --- a/modules/mu/providers/azure/container_cluster.rb +++ b/modules/mu/providers/azure/container_cluster.rb @@ -218,11 +218,7 @@ def self.validateConfig(cluster, configurator) "Azure Kubernetes Service Cluster Admin Role" ] } - cluster['dependencies'] ||= [] - cluster['dependencies'] << { - "type" => "user", - "name" => cluster["name"]+"user" - } + MU::Config.addDependency(cluster, cluster['name']+"user", "user") ok = false if !configurator.insertKitten(svcacct_desc, "users") diff --git a/modules/mu/providers/azure/server.rb b/modules/mu/providers/azure/server.rb index db5a05777..33c2bd22e 100644 --- a/modules/mu/providers/azure/server.rb +++ b/modules/mu/providers/azure/server.rb @@ -612,18 +612,8 @@ def self.validateConfig(server, configurator) if !configurator.insertKitten(vpc, "vpcs", true) ok = false end - server['dependencies'] ||= [] - - server['dependencies'] << { - "type" => "vpc", - "name" => server['name']+"vpc" - } -# XXX what happens if there's no natstion here? - server['dependencies'] << { - "type" => "server", - "name" => server['name']+"vpc-natstion", - "phase" => "groom" - } + MU::Config.addDependency(server, server['name']+"vpc", "vpc") + MU::Config.addDependency(server, server['name']+"vpc-natstion", "server", phase: "groom") server['vpc'] = { "name" => server['name']+"vpc", "subnet_pref" => "private" @@ -640,11 +630,7 @@ def self.validateConfig(server, configurator) "credentials" => server["credentials"], "roles" => server["roles"] } - server['dependencies'] ||= [] - server['dependencies'] << { - "type" => "user", - "name" => server["name"]+"user" - } + MU::Config.addDependency(server, server['name']+"user", "user") ok = false if !configurator.insertKitten(svcacct_desc, "users") diff --git a/modules/mu/providers/azure/vpc.rb b/modules/mu/providers/azure/vpc.rb index b5cf6e630..81df50bb7 100644 --- a/modules/mu/providers/azure/vpc.rb +++ b/modules/mu/providers/azure/vpc.rb @@ -424,11 +424,7 @@ def self.validateConfig(vpc, configurator) } ] } - vpc["dependencies"] ||= [] - vpc["dependencies"] << { - "type" => "firewall_rule", - "name" => vpc['name']+"-defaultfw" - } + MU::Config.addDependency(vpc, vpc['name']+"-defaultfw", "firewall_rule") if !configurator.insertKitten(default_acl, "firewall_rules", true) ok = false diff --git a/modules/mu/providers/google/container_cluster.rb b/modules/mu/providers/google/container_cluster.rb index 921f5f39f..7a3946961 100644 --- a/modules/mu/providers/google/container_cluster.rb +++ b/modules/mu/providers/google/container_cluster.rb @@ -1051,11 +1051,7 @@ def self.validateConfig(cluster, configurator) if cluster['service_account']['name'] and !cluster['service_account']['id'] and !cluster['service_account']['deploy_id'] - cluster['dependencies'] ||= [] - cluster['dependencies'] << { - "type" => "user", - "name" => cluster['service_account']['name'] - } + MU::Config.addDependency(cluster, cluster['service_account']['name'], "user") end found = MU::Config::Ref.get(cluster['service_account']) # XXX verify that found.kitten fails when it's supposed to diff --git a/modules/mu/providers/google/folder.rb b/modules/mu/providers/google/folder.rb index 4fc7c4096..bef792522 100644 --- a/modules/mu/providers/google/folder.rb +++ b/modules/mu/providers/google/folder.rb @@ -355,11 +355,7 @@ def self.validateConfig(folder, configurator) end if folder['parent'] and folder['parent']['name'] and !folder['parent']['deploy_id'] and configurator.haveLitterMate?(folder['parent']['name'], "folders") - folder["dependencies"] ||= [] - folder["dependencies"] << { - "type" => "folder", - "name" => folder['parent']['name'] - } + MU::Config.addDependency(folder, folder['parent']['name'], "folder") end ok diff --git a/modules/mu/providers/google/group.rb b/modules/mu/providers/google/group.rb index 810531f20..b7bd180d6 100644 --- a/modules/mu/providers/google/group.rb +++ b/modules/mu/providers/google/group.rb @@ -340,11 +340,7 @@ def self.validateConfig(group, configurator) if group['members'] group['members'].each { |m| if configurator.haveLitterMate?(m, "users") - group['dependencies'] ||= [] - group['dependencies'] << { - "name" => m, - "type" => "user" - } + MU::Config.addDependency(group, m, "user") end } end @@ -353,11 +349,7 @@ def self.validateConfig(group, configurator) group['roles'].each { |r| if r['role'] and r['role']['name'] and (!r['role']['deploy_id'] and !r['role']['id']) - group['dependencies'] ||= [] - group['dependencies'] << { - "type" => "role", - "name" => r['role']['name'] - } + MU::Config.addDependency(group, r['role']['name'], "role") end } end From 577263c9e9e3552d01095b8e535e638ba137a5d1 Mon Sep 17 00:00:00 2001 From: John Stange Date: Thu, 9 Apr 2020 12:48:06 -0400 Subject: [PATCH 068/124] AWS::Role: fix some adoption-related edge cases --- modules/mu/providers/aws/group.rb | 2 +- modules/mu/providers/aws/role.rb | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/modules/mu/providers/aws/group.rb b/modules/mu/providers/aws/group.rb index 8c073bc7d..a823a6b21 100644 --- a/modules/mu/providers/aws/group.rb +++ b/modules/mu/providers/aws/group.rb @@ -378,7 +378,7 @@ def self.validateConfig(group, configurator) if group['members'] group['members'].each { |user| if configurator.haveLitterMate?(user, "users") - MU::Config.addDependency(group, user, "group") + MU::Config.addDependency(group, user, "user") else found = MU::Cloud.resourceClass("AWS", "User").find(cloud_id: user) if found.nil? or found.empty? diff --git a/modules/mu/providers/aws/role.rb b/modules/mu/providers/aws/role.rb index e375b05f0..2d4ed959d 100644 --- a/modules/mu/providers/aws/role.rb +++ b/modules/mu/providers/aws/role.rb @@ -615,7 +615,6 @@ def toKitten(**_args) ) JSON.parse(URI.decode(version.policy_version.document)) end - bok["policies"] = MU::Cloud::AWS::Role.doc2MuPolicies(pol.policy_name, doc, bok["policies"]) end } @@ -695,6 +694,7 @@ def toKitten(**_args) end bok["attachable_policies"].uniq! if bok["attachable_policies"] + bok["name"].gsub!(/[^a-zA-Z0-9_\-]/, "_") bok end @@ -707,6 +707,10 @@ def toKitten(**_args) def self.doc2MuPolicies(basename, doc, policies = []) policies ||= [] + if !doc["Statement"].is_a?(Array) + doc["Statement"] = [doc["Statement"]] + end + doc["Statement"].each { |s| if !s["Action"] MU.log "Statement in policy document for #{basename} didn't have an Action field", MU::WARN, details: doc From ec7236b1fdcda0460127f8a44e036fcbfd2095c1 Mon Sep 17 00:00:00 2001 From: John Stange Date: Thu, 9 Apr 2020 13:54:56 -0400 Subject: [PATCH 069/124] Azure, Google: add machine image mapping entries for CentOS 8 --- modules/mu/defaults/Azure.yaml | 1 + modules/mu/defaults/Google.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/modules/mu/defaults/Azure.yaml b/modules/mu/defaults/Azure.yaml index 057aa2e3e..9185deb11 100644 --- a/modules/mu/defaults/Azure.yaml +++ b/modules/mu/defaults/Azure.yaml @@ -2,6 +2,7 @@ centos6: ¢os6 OpenLogic/CentOS/6 #centos7: ¢os7 westernoceansoftwaresprivatelimited/centos-7-6/centos-7-6-server centos7: ¢os7 OpenLogic/CentOS/7 +centos8: ¢os7 OpenLogic/CentOS/8 rhel8: &rhel8 RedHat/RHEL/8 rhel7: &rhel7 RedHat/RHEL/7 rhel6: &rhel6 RedHat/RHEL/6 diff --git a/modules/mu/defaults/Google.yaml b/modules/mu/defaults/Google.yaml index 78bd99c00..ed1ab18ae 100644 --- a/modules/mu/defaults/Google.yaml +++ b/modules/mu/defaults/Google.yaml @@ -1,6 +1,7 @@ --- centos6: ¢os6 egt-labs-admin/mu-centos-6 centos7: ¢os7 egt-labs-admin/mu-centos-7 +centos8: ¢os8 centos-cloud/centos-8 rhel71: &rhel71 rhel-cloud/rhel-7 rhel6: &rhel6 rhel-cloud/rhel-6 debian10: &debian10 debian-cloud/debian-10 From 8f5d6fce50dac9e2fbdb947e4a9933d4d50332b7 Mon Sep 17 00:00:00 2001 From: John Stange Date: Fri, 10 Apr 2020 14:13:26 -0400 Subject: [PATCH 070/124] Cloud: load submodules correctly; MommaCat: fixed miss object loads on read-in of live deploys; AWS::Database: honor deploy metadata during cleanup, frameout of #toKitten --- bin/mu-adopt | 3 +- modules/mu/adoption.rb | 4 +- modules/mu/cleanup.rb | 19 +++++---- modules/mu/cloud/resource_base.rb | 4 +- modules/mu/mommacat/search.rb | 29 +++++++++---- modules/mu/mommacat/storage.rb | 8 +--- modules/mu/providers/aws.rb | 2 +- modules/mu/providers/aws/database.rb | 64 +++++++++++++++++++++++----- 8 files changed, 94 insertions(+), 39 deletions(-) diff --git a/bin/mu-adopt b/bin/mu-adopt index d5180edc0..e15644840 100755 --- a/bin/mu-adopt +++ b/bin/mu-adopt @@ -48,6 +48,7 @@ $opt = Optimist::options do opt :diff, "List the differences between what we find and an existing, saved deploy from a previous run, if one exists.", :required => false, :type => :boolean opt :grouping, "Methods for grouping found resources into separate Baskets.\n\n"+MU::Adoption::GROUPMODES.keys.map { |g| "* "+g.to_s+": "+MU::Adoption::GROUPMODES[g] }.join("\n")+"\n\n", :required => false, :type => :string, :default => "logical" opt :habitats, "Limit scope of searches to the named accounts/projects/subscriptions, instead of search all habitats visible to our credentials.", :required => false, :type => :strings + opt :regions, "Restrict to operating on a subset of available regions, instead of all that we know about.", :require => false, :type => :strings opt :scrub, "Whether to set scrub_mu_isms in the BoKs we generate", :default => $MU_CFG.has_key?('adopt_scrub_mu_isms') ? $MU_CFG['adopt_scrub_mu_isms'] : false end @@ -103,7 +104,7 @@ if !ok end -adoption = MU::Adoption.new(clouds: clouds, types: types, parent: $opt[:parent], billing: $opt[:billing], sources: $opt[:sources], credentials: $opt[:credentials], group_by: $opt[:grouping].to_sym, savedeploys: $opt[:savedeploys], diff: $opt[:diff], habitats: $opt[:habitats], scrub_mu_isms: $opt[:scrub]) +adoption = MU::Adoption.new(clouds: clouds, types: types, parent: $opt[:parent], billing: $opt[:billing], sources: $opt[:sources], credentials: $opt[:credentials], group_by: $opt[:grouping].to_sym, savedeploys: $opt[:savedeploys], diff: $opt[:diff], habitats: $opt[:habitats], scrub_mu_isms: $opt[:scrub], regions: $opt[:regions]) found = adoption.scrapeClouds if found.nil? or found.empty? MU.log "No resources found to adopt", MU::WARN, details: {"clouds" => clouds, "types" => types } diff --git a/modules/mu/adoption.rb b/modules/mu/adoption.rb index 786cf4f1e..5bc47e5be 100644 --- a/modules/mu/adoption.rb +++ b/modules/mu/adoption.rb @@ -30,7 +30,7 @@ class Incomplete < MU::MuNonFatal; end :omnibus => "Jam everything into one monolothic configuration" } - def initialize(clouds: MU::Cloud.supportedClouds, types: MU::Cloud.resource_types.keys, parent: nil, billing: nil, sources: nil, credentials: nil, group_by: :logical, savedeploys: false, diff: false, habitats: [], scrub_mu_isms: false) + def initialize(clouds: MU::Cloud.supportedClouds, types: MU::Cloud.resource_types.keys, parent: nil, billing: nil, sources: nil, credentials: nil, group_by: :logical, savedeploys: false, diff: false, habitats: [], scrub_mu_isms: false, regions: []) @scraped = {} @clouds = clouds @types = types @@ -44,6 +44,7 @@ def initialize(clouds: MU::Cloud.supportedClouds, types: MU::Cloud.resource_type @savedeploys = savedeploys @diff = diff @habitats = habitats + @regions = regions @habitats ||= [] @scrub_mu_isms = scrub_mu_isms end @@ -106,6 +107,7 @@ def scrapeClouds() credentials: credset, allow_multi: true, habitats: @habitats.dup, + region: @regions, dummy_ok: true, skip_provider_owned: true, # debug: false#, diff --git a/modules/mu/cleanup.rb b/modules/mu/cleanup.rb index 653dabce5..6a97b57a1 100644 --- a/modules/mu/cleanup.rb +++ b/modules/mu/cleanup.rb @@ -324,16 +324,17 @@ def self.cleanHabitat(cloud, credset, region, habitat, global_vs_region_semaphor def self.call_cleanup(type, credset, provider, flags, region) if @mommacat.nil? or @mommacat.numKittens(types: [type]) > 0 if @mommacat + found = @mommacat.findLitterMate(type: type, return_all: true, credentials: credset) - flags['known'] ||= [] - if found.is_a?(Array) - found.each { |k| - flags['known'] << k.cloud_id - } - elsif found and found.is_a?(Hash) - flags['known'] << found['cloud_id'] - elsif found - flags['known'] << found.cloud_id + + if found + flags['known'] = if found.is_a?(Array) + found.map { |k| k.cloud_id } + elsif found.is_a?(Hash) + found.each_value.map { |k| k.cloud_id } + else + [found.cloud_id] + end end end diff --git a/modules/mu/cloud/resource_base.rb b/modules/mu/cloud/resource_base.rb index 8522a0f9b..18c792819 100644 --- a/modules/mu/cloud/resource_base.rb +++ b/modules/mu/cloud/resource_base.rb @@ -827,8 +827,8 @@ def resourceInitHook end end - if File.exist?(MU.myRoot+"/lib/modules/cloud/#{cfg_name}.rb") - require "modules/cloud/#{cfg_name}" + if File.exist?(MU.myRoot+"/modules/mu/cloud/#{cfg_name}.rb") + require "mu/cloud/#{cfg_name}" end # Wrap the instance methods that this cloud resource type has to diff --git a/modules/mu/mommacat/search.rb b/modules/mu/mommacat/search.rb index 792f1e719..cd1da6f40 100644 --- a/modules/mu/mommacat/search.rb +++ b/modules/mu/mommacat/search.rb @@ -107,6 +107,7 @@ def self.findStray(cloud, type, matches = [] credlist.each { |creds| +# next if region and region.is_a?(Array) and !region.empty? and !region.include?(r) cloud_descs = search_cloud_provider(type, cloud, habitats, region, cloud_id: cloud_id, tag_key: tag_key, tag_value: tag_value, credentials: creds, flags: flags) cloud_descs.each_pair.each { |p, regions| @@ -161,8 +162,7 @@ def findLitterMate(type: nil, name: nil, mu_name: nil, cloud_id: nil, created_on @kitten_semaphore.synchronize { return nil if !@kittens.has_key?(type) - matches = [] - + matches = {} @kittens[type].each { |habitat_group, sib_classes| next if habitat and habitat_group and habitat_group != habitat sib_classes.each_pair { |sib_class, cloud_objs| @@ -171,7 +171,8 @@ def findLitterMate(type: nil, name: nil, mu_name: nil, cloud_id: nil, created_on next if !name.nil? and name != sib_class or cloud_objs.empty? if !name.nil? if return_all - return cloud_objs.dup + matches.merge!(cloud_objs.clone) + next elsif cloud_objs.size == 1 and does_match.call(cloud_objs.values.first) return cloud_objs.values.first end @@ -179,17 +180,21 @@ def findLitterMate(type: nil, name: nil, mu_name: nil, cloud_id: nil, created_on cloud_objs.each_value { |obj| if does_match.call(obj) - return (return_all ? cloud_objs.clone : obj.clone) + if return_all + matches.merge!(cloud_objs.clone) + else + return obj.clone + end end } - # has_multiples is false + # has_multiples is false, "cloud_objs" is actually a singular object elsif (name.nil? and does_match.call(cloud_objs)) or [sib_class, cloud_objs.virtual_name(name)].include?(name.to_s) - matches << cloud_objs.clone + matches[cloud_objs.config['name']] = cloud_objs.clone end } } - return matches.first if matches.size == 1 + return matches.values.first if matches.size == 1 return matches if return_all and matches.size > 1 } @@ -279,7 +284,15 @@ def self.search_cloud_provider(type, cloud, habitats, region, cloud_id: nil, tag regions = if resourceclass.isGlobal? [nil] else - region ? [region] : cloudclass.listRegions(credentials: credentials) + if region + if region.is_a?(Array) and !region.empty? + region + else + [region] + end + else + cloudclass.listRegions(credentials: credentials) + end end # Decide what habitats (accounts/projects/subscriptions) we'll diff --git a/modules/mu/mommacat/storage.rb b/modules/mu/mommacat/storage.rb index d340acdc9..1aace299c 100644 --- a/modules/mu/mommacat/storage.rb +++ b/modules/mu/mommacat/storage.rb @@ -570,14 +570,10 @@ def loadObjects(delay_descriptor_load) orig_cfg['environment'] = @environment # not always set in old deploys if attrs[:has_multiples] data.keys.each { |mu_name| - attrs[:interface].new(mommacat: self, kitten_cfg: orig_cfg, mu_name: mu_name, delay_descriptor_load: delay_descriptor_load) + addKitten(type, res_name, attrs[:interface].new(mommacat: self, kitten_cfg: orig_cfg, mu_name: mu_name, delay_descriptor_load: delay_descriptor_load)) } else - # XXX hack for old deployments, this can go away some day - if data['mu_name'].nil? - raise MuError, "Unable to find or guess a Mu name for #{res_type}: #{res_name} in #{@deploy_id}" - end - attrs[:interface].new(mommacat: self, kitten_cfg: orig_cfg, mu_name: data['mu_name'], cloud_id: data['cloud_id']) + addKitten(type, res_name, attrs[:interface].new(mommacat: self, kitten_cfg: orig_cfg, mu_name: data['mu_name'], cloud_id: data['cloud_id'])) end rescue StandardError => e if e.class != MU::Cloud::MuCloudResourceNotImplemented diff --git a/modules/mu/providers/aws.rb b/modules/mu/providers/aws.rb index 119e64819..4bba05dd8 100644 --- a/modules/mu/providers/aws.rb +++ b/modules/mu/providers/aws.rb @@ -1468,7 +1468,7 @@ def method_missing(method_sym, *arguments) retval = @api.method(method_sym).call end return retval - rescue Aws::EC2::Errors::InternalError, Aws::EC2::Errors::RequestLimitExceeded, Aws::EC2::Errors::Unavailable, Aws::Route53::Errors::Throttling, Aws::ElasticLoadBalancing::Errors::HttpFailureException, Aws::EC2::Errors::Http503Error, Aws::AutoScaling::Errors::Http503Error, Aws::AutoScaling::Errors::InternalFailure, Aws::AutoScaling::Errors::ServiceUnavailable, Aws::Route53::Errors::ServiceUnavailable, Aws::ElasticLoadBalancing::Errors::Throttling, Aws::RDS::Errors::ClientUnavailable, Aws::Waiters::Errors::UnexpectedError, Aws::ElasticLoadBalancing::Errors::ServiceUnavailable, Aws::ElasticLoadBalancingV2::Errors::Throttling, Seahorse::Client::NetworkingError, Aws::IAM::Errors::Throttling, Aws::EFS::Errors::ThrottlingException, Aws::Pricing::Errors::ThrottlingException, Aws::APIGateway::Errors::TooManyRequestsException, Aws::ECS::Errors::ThrottlingException, Net::ReadTimeout, Faraday::TimeoutError, Aws::CloudWatchLogs::Errors::ThrottlingException => e + rescue Aws::RDS::Errors::Throttling, Aws::EC2::Errors::InternalError, Aws::EC2::Errors::RequestLimitExceeded, Aws::EC2::Errors::Unavailable, Aws::Route53::Errors::Throttling, Aws::ElasticLoadBalancing::Errors::HttpFailureException, Aws::EC2::Errors::Http503Error, Aws::AutoScaling::Errors::Http503Error, Aws::AutoScaling::Errors::InternalFailure, Aws::AutoScaling::Errors::ServiceUnavailable, Aws::Route53::Errors::ServiceUnavailable, Aws::ElasticLoadBalancing::Errors::Throttling, Aws::RDS::Errors::ClientUnavailable, Aws::Waiters::Errors::UnexpectedError, Aws::ElasticLoadBalancing::Errors::ServiceUnavailable, Aws::ElasticLoadBalancingV2::Errors::Throttling, Seahorse::Client::NetworkingError, Aws::IAM::Errors::Throttling, Aws::EFS::Errors::ThrottlingException, Aws::Pricing::Errors::ThrottlingException, Aws::APIGateway::Errors::TooManyRequestsException, Aws::ECS::Errors::ThrottlingException, Net::ReadTimeout, Faraday::TimeoutError, Aws::CloudWatchLogs::Errors::ThrottlingException => e if e.class.name == "Seahorse::Client::NetworkingError" and e.message.match(/Name or service not known/) MU.log e.inspect, MU::ERR raise e diff --git a/modules/mu/providers/aws/database.rb b/modules/mu/providers/aws/database.rb index caa22be20..2f07dd21a 100644 --- a/modules/mu/providers/aws/database.rb +++ b/modules/mu/providers/aws/database.rb @@ -80,6 +80,9 @@ def initialize(**args) @mu_name.gsub(/(--|-$)/i, "").gsub(/(_)/, "-").gsub!(/^[^a-z]/i, "") + if args[:from_cloud_desc] and args[:from_cloud_desc].is_a?(Aws::RDS::Types::DBCluster) + @config['create_cluster'] = true + end if @config['source'] @config["source"] = MU::Config::Ref.get(@config["source"]) elsif @config["read_replica_of"] @@ -198,12 +201,14 @@ def self.find(**args) resp.send("db_#{noun}s").each { |db| found[db.send("db_#{noun}_identifier".to_sym)] = db } - end while marker.nil? + end while !marker.nil? } + if args[:cluster] or !args.has_key?(:cluster) + fetch.call("cluster") + pp found + end if !args[:cluster] fetch.call("instance") - elsif args[:cluster] or !args.has_key?(:cluster) - fetch.call("cluster") end if args[:tag_key] and args[:tag_value] keep = [] @@ -228,6 +233,35 @@ def self.find(**args) return found end + # Reverse-map our cloud description into a runnable config hash. + # We assume that any values we have in +@config+ are placeholders, and + # calculate our own accordingly based on what's live in the cloud. + def toKitten(**_args) + bok = { + "cloud" => "AWS", + "region" => @config['region'], + "credentials" => @credentials, + "cloud_id" => @cloud_id, + "create_cluster" => @config['create_cluster'] + } + + noun = bok["create_cluster"] ? "cluster" : "db" + tags = MU::Cloud::AWS.rds(credentials: @credentials, region: @config['region']).list_tags_for_resource( + resource_name: MU::Cloud::AWS::Database.getARN(@cloud_id, noun, "rds", region: @config['region'], credentials: @credentials) + ).tag_list +MU.log "tags for #{noun} #{@cloud_id}", MU::WARN, details: tags + bok["name"] = @cloud_id +# cloud_desc.db_cluster_members +# arn = MU::Cloud::AWS::Database.getARN(resource.send(id_method), arn_type, "rds", region: region, credentials: credentials) +# tags = MU::Cloud::AWS.rds(credentials: credentials, region: region).list_tags_for_resource(resource_name: arn).tag_list + +# pp cloud_desc + exit if bok["create_cluster"] +# realname = MU::Adoption.tagsToName(bok['tags']) + + bok + end + # Construct an Amazon Resource Name for an RDS resource. The RDS API is # peculiar, and we often need this identifier in order to do things that # the other APIs can do with shorthand. @@ -505,7 +539,7 @@ def self.quality end # @return [Array] - def self.threaded_resource_purge(describe_method, list_method, id_method, arn_type, region, credentials, ignoremaster) + def self.threaded_resource_purge(describe_method, list_method, id_method, arn_type, region, credentials, ignoremaster, known: []) deletia = [] resp = MU::Cloud::AWS.rds(credentials: credentials, region: region).send(describe_method) @@ -518,7 +552,7 @@ def self.threaded_resource_purge(describe_method, list_method, id_method, arn_ty next end - if should_delete?(tags, ignoremaster) + if should_delete?(tags, resource.send(id_method), ignoremaster, MU.deploy_id, MU.mu_public_ip, known) deletia << resource.send(id_method) end } @@ -542,15 +576,15 @@ def self.threaded_resource_purge(describe_method, list_method, id_method, arn_ty def self.cleanup(noop: false, ignoremaster: false, credentials: nil, region: MU.curRegion, flags: {}) ["instance", "cluster"].each { |type| - threaded_resource_purge("describe_db_#{type}s".to_sym, "db_#{type}s".to_sym, "db_#{type}_identifier".to_sym, (type == "instance" ? "db" : "cluster"), region, credentials, ignoremaster) { |id| - terminate_rds_instance(nil, noop: noop, skipsnapshots: flags["skipsnapshots"], region: region, deploy_id: MU.deploy_id, cloud_id: id, mu_name: id.upcase, credentials: credentials, cluster: (type == "cluster")) + threaded_resource_purge("describe_db_#{type}s".to_sym, "db_#{type}s".to_sym, "db_#{type}_identifier".to_sym, (type == "instance" ? "db" : "cluster"), region, credentials, ignoremaster, known: flags['known']) { |id| + terminate_rds_instance(nil, noop: noop, skipsnapshots: flags["skipsnapshots"], region: region, deploy_id: MU.deploy_id, cloud_id: id, mu_name: id.upcase, credentials: credentials, cluster: (type == "cluster"), known: flags['known']) }.each { |t| t.join } } - threads = threaded_resource_purge(:describe_db_subnet_groups, :db_subnet_groups, :db_subnet_group_name, "subgrp", region, credentials, ignoremaster) { |id| + threads = threaded_resource_purge(:describe_db_subnet_groups, :db_subnet_groups, :db_subnet_group_name, "subgrp", region, credentials, ignoremaster, known: flags['known']) { |id| MU.log "Deleting RDS subnet group #{id}" MU.retrier([Aws::RDS::Errors::InvalidDBSubnetGroupStateFault], wait: 30, max: 5, ignoreme: [Aws::RDS::Errors::DBSubnetGroupNotFoundFault]) { MU::Cloud::AWS.rds(region: region).delete_db_subnet_group(db_subnet_group_name: id) if !noop @@ -558,7 +592,7 @@ def self.cleanup(noop: false, ignoremaster: false, credentials: nil, region: MU. } ["db", "db_cluster"].each { |type| - threads.concat threaded_resource_purge("describe_#{type}_parameter_groups".to_sym, "#{type}_parameter_groups".to_sym, "#{type}_parameter_group_name".to_sym, (type == "db" ? "pg" : "cluster-pg"), region, credentials, ignoremaster) { |id| + threads.concat threaded_resource_purge("describe_#{type}_parameter_groups".to_sym, "#{type}_parameter_groups".to_sym, "#{type}_parameter_group_name".to_sym, (type == "db" ? "pg" : "cluster-pg"), region, credentials, ignoremaster, known: flags['known']) { |id| MU.log "Deleting RDS #{type} parameter group #{id}" MU.retrier([Aws::RDS::Errors::InvalidDBParameterGroupState], wait: 30, max: 5, ignoreme: [Aws::RDS::Errors::DBParameterGroupNotFound]) { MU::Cloud::AWS.rds(region: region).send("delete_#{type}_parameter_group", { "#{type}_parameter_group_name".to_sym => id }) if !noop @@ -1334,7 +1368,8 @@ def self.run_sql_mysql(address, port, user, password, db, cmds = [], identifier end private_class_method :run_sql_mysql - def self.should_delete?(tags, ignoremaster = false, deploy_id = MU.deploy_id, master_ip = MU.mu_public_ip) + def self.should_delete?(tags, cloud_id, ignoremaster = false, deploy_id = MU.deploy_id, master_ip = MU.mu_public_ip, known = []) + found_muid = false found_master = false tags.each { |tag| @@ -1346,6 +1381,8 @@ def self.should_delete?(tags, ignoremaster = false, deploy_id = MU.deploy_id, ma true elsif !ignoremaster && found_muid && found_master true + elsif known and cloud_id and known.include?(cloud_id) + true else false end @@ -1356,7 +1393,7 @@ def self.should_delete?(tags, ignoremaster = false, deploy_id = MU.deploy_id, ma # Remove an RDS database and associated artifacts # @param db [OpenStruct]: The cloud provider's description of the database artifact # @return [void] - def self.terminate_rds_instance(db, noop: false, skipsnapshots: false, region: MU.curRegion, deploy_id: MU.deploy_id, mu_name: nil, cloud_id: nil, credentials: nil, cluster: false) + def self.terminate_rds_instance(db, noop: false, skipsnapshots: false, region: MU.curRegion, deploy_id: MU.deploy_id, mu_name: nil, cloud_id: nil, credentials: nil, cluster: false, known: []) db ||= MU::Cloud::AWS::Database.find(cloud_id: cloud_id, region: region, credentials: credentials, cluster: cluster).values.first if cloud_id db_obj ||= MU::MommaCat.findStray( "AWS", @@ -1370,6 +1407,11 @@ def self.terminate_rds_instance(db, noop: false, skipsnapshots: false, region: M if db_obj cloud_id ||= db_obj.cloud_id db ||= db_obj.cloud_desc + ["parameter_group_name", "subnet_group_name"].each { |attr| + if db_obj.config[attr] + known << db_obj.config[attr] + end + } end raise MuError, "terminate_rds_instance requires a non-nil database descriptor (#{cloud_id})" if db.nil? or cloud_id.nil? From 3c8965888bf19652ed8200ddc7098e90aec8eeaa Mon Sep 17 00:00:00 2001 From: John Stange Date: Fri, 10 Apr 2020 14:16:08 -0400 Subject: [PATCH 071/124] Cloud: make sure dependency lookups don't go expecting Array from findLitterMate when it will get a Hash --- modules/mu/cloud/resource_base.rb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/mu/cloud/resource_base.rb b/modules/mu/cloud/resource_base.rb index 18c792819..76a6f144f 100644 --- a/modules/mu/cloud/resource_base.rb +++ b/modules/mu/cloud/resource_base.rb @@ -534,7 +534,7 @@ def dependencies(use_cache: false, debug: false) MU.log "Attempting findLitterMate on VPC for #{self}", loglevel, details: @config['vpc'] sib_by_name = @deploy.findLitterMate(name: @config['vpc']['name'], type: "vpcs", return_all: true, habitat: @config['vpc']['project'], debug: debug) - if sib_by_name.is_a?(Array) + if sib_by_name.is_a?(Hash) if sib_by_name.size == 1 @vpc = matches.first MU.log "Single VPC match for #{self}", loglevel, details: @vpc.to_s @@ -543,7 +543,7 @@ def dependencies(use_cache: false, debug: false) # we got multiple matches, try to pick one by preferred subnet # behavior MU.log "Sorting a bunch of VPC matches for #{self}", loglevel, details: sib_by_name.map { |s| s.to_s }.join(", ") - sib_by_name.each { |sibling| + sib_by_name.values.each { |sibling| all_private = sibling.subnets.map { |s| s.private? }.all?(true) all_public = sibling.subnets.map { |s| s.private? }.all?(false) names = sibling.subnets.map { |s| s.name } @@ -566,7 +566,7 @@ def dependencies(use_cache: false, debug: false) end } if !@vpc - sibling = sib_by_name.sample + sibling = sib_by_name.values.sample MU.log "Got multiple matching VPCs for #{self.class.cfg_name} #{@mu_name}, so I'm arbitrarily choosing #{sibling.mu_name}", MU::WARN, details: @config['vpc'] @vpc = sibling end From 479537795332cde28e5a3b446e7343d7d732caff Mon Sep 17 00:00:00 2001 From: John Stange Date: Fri, 10 Apr 2020 16:04:49 -0400 Subject: [PATCH 072/124] AWS::VPC: rework peering connection cleanup to happen earlier --- modules/mu/providers/aws/dnszone.rb | 2 +- modules/mu/providers/aws/vpc.rb | 121 ++++++++++++++++------------ 2 files changed, 71 insertions(+), 52 deletions(-) diff --git a/modules/mu/providers/aws/dnszone.rb b/modules/mu/providers/aws/dnszone.rb index ac3ce7042..f813352e2 100644 --- a/modules/mu/providers/aws/dnszone.rb +++ b/modules/mu/providers/aws/dnszone.rb @@ -345,7 +345,7 @@ def self.toggleVPCAccess(id: nil, vpc_id: nil, region: MU.curRegion, remove: fal rescue Aws::Route53::Errors::LastVPCAssociation => e MU.log e.inspect, MU::WARN rescue Aws::Route53::Errors::VPCAssociationNotFound - MU.log "VPC #{vpc_id} access to zone #{id} already revoked", MU::WARN + MU.log "VPC #{vpc_id} access to zone #{id} already revoked", MU::NOTICE end end end diff --git a/modules/mu/providers/aws/vpc.rb b/modules/mu/providers/aws/vpc.rb index 5c9702049..9d26ea84c 100644 --- a/modules/mu/providers/aws/vpc.rb +++ b/modules/mu/providers/aws/vpc.rb @@ -838,9 +838,23 @@ def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credent vpcs = resp if !resp.empty? } +# resp = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_vpc_peering_connections( +# filters: [ +# { +# name: "requester-vpc-info.vpc-id", +# values: [@cloud_id] +# }, +# { +# name: "accepter-vpc-info.vpc-id", +# values: [peer_id.to_s] +# } +# ] +# ) + if !vpcs.empty? gwthreads = [] vpcs.each { |vpc| + purge_peering_connections(noop, vpc.vpc_id, region: region, credentials: credentials) # NAT gateways don't have any tags, and we can't assign them a name. Lets find them based on a VPC ID gwthreads << Thread.new { purge_nat_gateways(noop, vpc_id: vpc.vpc_id, region: region, credentials: credentials) @@ -1699,6 +1713,61 @@ def self.purge_dhcpopts(noop = false, tagfilters = [{name: "tag:MU-ID", values: end private_class_method :purge_dhcpopts + def self.purge_peering_connections(noop, vpc_id, region: MU.curRegion, credentials: nil) + my_peer_conns = MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_vpc_peering_connections( + filters: [ + { + name: "requester-vpc-info.vpc-id", + values: [vpc_id] + } + ] + ).vpc_peering_connections + my_peer_conns.concat(MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_vpc_peering_connections( + filters: [ + { + name: "accepter-vpc-info.vpc-id", + values: [vpc_id] + } + ] + ).vpc_peering_connections) + + my_peer_conns.each { |cnxn| + [cnxn.accepter_vpc_info.vpc_id, cnxn.requester_vpc_info.vpc_id].each { |peer_vpc| + MU::Cloud::AWS::VPC.listAllSubnetRouteTables(peer_vpc, region: region, credentials: credentials).each { |rtb_id| + begin + resp = MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_route_tables( + route_table_ids: [rtb_id] + ) + rescue Aws::EC2::Errors::InvalidRouteTableIDNotFound + next + end + resp.route_tables.each { |rtb| + rtb.routes.each { |route| + if route.vpc_peering_connection_id == cnxn.vpc_peering_connection_id + MU.log "Removing route #{route.destination_cidr_block} from route table #{rtb_id} in VPC #{peer_vpc}" + MU::Cloud::AWS.ec2(credentials: credentials, region: region).delete_route( + route_table_id: rtb_id, + destination_cidr_block: route.destination_cidr_block + ) if !noop + end + } + } + } + } + MU.log "Deleting VPC peering connection #{cnxn.vpc_peering_connection_id}" + begin + MU::Cloud::AWS.ec2(credentials: credentials, region: region).delete_vpc_peering_connection( + vpc_peering_connection_id: cnxn.vpc_peering_connection_id + ) if !noop + rescue Aws::EC2::Errors::InvalidStateTransition + MU.log "VPC peering connection #{cnxn.vpc_peering_connection_id} not in removable (state #{cnxn.status.code})", MU::WARN + rescue Aws::EC2::Errors::OperationNotPermitted => e + MU.log "VPC peering connection #{cnxn.vpc_peering_connection_id} refuses to delete: #{e.message}", MU::WARN + end + } + end + private_class_method :purge_peering_connections + # Remove all VPCs associated with the currently loaded deployment. # @param noop [Boolean]: If true, will only print what would be done # @param tagfilters [Array]: EC2 tags to filter against when search for resources to purge @@ -1713,57 +1782,7 @@ def self.purge_vpcs(noop = false, tagfilters = [{name: "tag:MU-ID", values: [MU. return if vpcs.nil? or vpcs.size == 0 vpcs.each { |vpc| - my_peer_conns = MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_vpc_peering_connections( - filters: [ - { - name: "requester-vpc-info.vpc-id", - values: [vpc.vpc_id] - } - ] - ).vpc_peering_connections - my_peer_conns.concat(MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_vpc_peering_connections( - filters: [ - { - name: "accepter-vpc-info.vpc-id", - values: [vpc.vpc_id] - } - ] - ).vpc_peering_connections) - my_peer_conns.each { |cnxn| - - [cnxn.accepter_vpc_info.vpc_id, cnxn.requester_vpc_info.vpc_id].each { |peer_vpc| - MU::Cloud::AWS::VPC.listAllSubnetRouteTables(peer_vpc, region: region, credentials: credentials).each { |rtb_id| - begin - resp = MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_route_tables( - route_table_ids: [rtb_id] - ) - rescue Aws::EC2::Errors::InvalidRouteTableIDNotFound - next - end - resp.route_tables.each { |rtb| - rtb.routes.each { |route| - if route.vpc_peering_connection_id == cnxn.vpc_peering_connection_id - MU.log "Removing route #{route.destination_cidr_block} from route table #{rtb_id} in VPC #{peer_vpc}" - MU::Cloud::AWS.ec2(credentials: credentials, region: region).delete_route( - route_table_id: rtb_id, - destination_cidr_block: route.destination_cidr_block - ) if !noop - end - } - } - } - } - MU.log "Deleting VPC peering connection #{cnxn.vpc_peering_connection_id}" - begin - MU::Cloud::AWS.ec2(credentials: credentials, region: region).delete_vpc_peering_connection( - vpc_peering_connection_id: cnxn.vpc_peering_connection_id - ) if !noop - rescue Aws::EC2::Errors::InvalidStateTransition - MU.log "VPC peering connection #{cnxn.vpc_peering_connection_id} not in removable (state #{cnxn.status.code})", MU::WARN - rescue Aws::EC2::Errors::OperationNotPermitted => e - MU.log "VPC peering connection #{cnxn.vpc_peering_connection_id} refuses to delete: #{e.message}", MU::WARN - end - } + purge_peering_connections(noop, vpc.vpc_id, region: region, credentials: credentials) on_retry = Proc.new { MU::Cloud.resourceClass("AWS", "FirewallRule").cleanup( From e570d3970677af56d6660c5b478f3412c6a93a72 Mon Sep 17 00:00:00 2001 From: John Stange Date: Sat, 11 Apr 2020 01:39:37 -0400 Subject: [PATCH 073/124] exorcise remaining const_get chains for looking up our own stuff --- bin/mu-adopt | 8 +--- bin/mu-azure-tests | 57 +++++++++++++++++++++++++++ bin/mu-cleanup | 6 +-- bin/mu-findstray-tests | 25 ++++++++++++ modules/mu.rb | 19 +++------ modules/mu/cloud.rb | 6 +-- modules/mu/cloud/resource_base.rb | 6 +-- modules/mu/cloud/ssh_sessions.rb | 4 +- modules/mu/cloud/wrappers.rb | 2 +- modules/mu/mommacat.rb | 14 +++---- modules/mu/mommacat/storage.rb | 3 +- modules/mu/providers/aws/server.rb | 3 +- modules/mu/providers/azure/server.rb | 3 +- modules/mu/providers/google/server.rb | 3 +- 14 files changed, 108 insertions(+), 51 deletions(-) create mode 100755 bin/mu-azure-tests create mode 100755 bin/mu-findstray-tests diff --git a/bin/mu-adopt b/bin/mu-adopt index e15644840..4cd6330c3 100755 --- a/bin/mu-adopt +++ b/bin/mu-adopt @@ -21,12 +21,6 @@ require 'bundler/setup' require 'optimist' require 'mu' -available_clouds = MU::Cloud.supportedClouds -available_clouds.reject! { |cloud| - cloudclass = Object.const_get("MU").const_get("Cloud").const_get(cloud) - cloudclass.listCredentials.nil? or cloudclass.listCredentials.size == 0 -} - available_types = MU::Cloud.resource_types.keys.map { |t| t.to_s } grouping_options = { "logical" => "Group resources in logical layers (folders and habitats together, users/roles/groups together, network resources together, etc)", @@ -39,7 +33,7 @@ $opt = Optimist::options do EOS opt :appname, "The overarching name of the application stack we will generate", :required => false, :default => "mu", :type => :string opt :types, "The resource types to scan and import. Valid types: #{available_types.join(", ")}", :required => false, :type => :strings, :default => available_types - opt :clouds, "The cloud providers to scan and import.", :required => false, :type => :strings, :default => available_clouds + opt :clouds, "The cloud providers to scan and import.", :required => false, :type => :strings, :default => MU::Cloud.availableClouds opt :parent, "Where applicable, resources which reside in the root folder or organization are configured with the specified parent in our target BoK", :required => false, :type => :string opt :billing, "Force-set this billing entity on created resources, instead of copying from the live resources", :required => false, :type => :string opt :sources, "One or more sets of credentials to use when importing resources. By default we will search and import from all sets of available credentials for each cloud provider specified with --clouds", :required => false, :type => :strings diff --git a/bin/mu-azure-tests b/bin/mu-azure-tests new file mode 100755 index 000000000..2978c3022 --- /dev/null +++ b/bin/mu-azure-tests @@ -0,0 +1,57 @@ +#!/usr/local/ruby-current/bin/ruby +# Copyright:: Copyright (c) 2014 eGlobalTech, Inc., all rights reserved +# +# Licensed under the BSD-3 license (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License in the root of the project or at +# +# http://egt-labs.com/mu/LICENSE.html +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +require 'rubygems' +require 'bundler/setup' +require 'json' +require 'erb' +require 'optimist' +require 'json-schema' +require File.realpath(File.expand_path(File.dirname(__FILE__)+"/mu-load-config.rb")) +require 'mu' + +(0..100000).to_a.each { |n| +retries = 0 +seed = nil +# begin +# raise MuError, "Failed to allocate an unused MU-ID after #{retries} tries!" if retries > 70 +# seedsize = 1 + (retries/10).abs +# seed = (0...seedsize+1).map { ('a'..'z').to_a[rand(26)] }.join +# end while seed == "mu" or seed[0] == seed[1] +seed = "nn" +handle = MU::MommaCat.generateHandle(seed) +puts handle +} +exit + +#pp MU::Cloud::Azure.listRegions +#pp MU::Cloud::Azure::Habitat.testcalls +#pp MU::Cloud::Azure::VPC.find(cloud_id: MU::Cloud::Azure::Id.new(resource_group: "mu", name: "mu-vnet")) +#pp MU::Cloud::Azure.authorization.role_assignments.list_for_resource_group("AKS-DEV-2019062015-KA-EASTUS") +#pp MU::Cloud::Azure::Role.find(role_name: "Azure Kubernetes Service Cluster Admin Role") +#puts MU::Cloud::Azure.default_subscription +#pp MU::Cloud::Azure.fetchPublicIP("MYVPC-DEV-2019061911-XI-EASTUS", "ip-addr-thingy") +#pp MU::Cloud::Azure.ensureProvider("egtazure", "Microsoft.ContainerService", force: true) +pp MU::Cloud::Azure::Server.find(cloud_id: "mu") +exit +pp MU::Cloud::Azure::Server.fetchImage("OpenLogic/CentOS/6") +pp MU::Cloud::Azure::Server.fetchImage("OpenLogic/CentOS/7") +pp MU::Cloud::Azure::Server.fetchImage("RedHat/RHEL/8") +pp MU::Cloud::Azure::Server.fetchImage("RedHat/RHEL/7") +pp MU::Cloud::Azure::Server.fetchImage("RedHat/RHEL/6") +pp MU::Cloud::Azure::Server.fetchImage("Debian/debian-10/10") +pp MU::Cloud::Azure::Server.fetchImage("MicrosoftWindowsServer/WindowsServer/2012-R2-Datacenter") +pp MU::Cloud::Azure::Server.fetchImage("MicrosoftWindowsServer/WindowsServer/2016-Datacenter") +pp MU::Cloud::Azure::Server.fetchImage("MicrosoftWindowsServer/WindowsServer/2019-Datacenter") diff --git a/bin/mu-cleanup b/bin/mu-cleanup index e3f6675a7..453c6ff53 100755 --- a/bin/mu-cleanup +++ b/bin/mu-cleanup @@ -24,10 +24,8 @@ require 'mu' Dir.chdir(MU.installDir) credentials = [] -MU::Cloud.supportedClouds.each { |cloud| - cloudclass = Object.const_get("MU").const_get("Cloud").const_get(cloud) - next if cloudclass.listCredentials.nil? or cloudclass.listCredentials.size == 0 - credentials.concat(cloudclass.listCredentials) +MU::Cloud.availableClouds.each { |cloud| + credentials.concat(MU::Cloud.cloudClass(cloud).listCredentials) } credentials.uniq! diff --git a/bin/mu-findstray-tests b/bin/mu-findstray-tests new file mode 100755 index 000000000..25c249a40 --- /dev/null +++ b/bin/mu-findstray-tests @@ -0,0 +1,25 @@ +#!/usr/local/ruby-current/bin/ruby +# Copyright:: Copyright (c) 2014 eGlobalTech, Inc., all rights reserved +# +# Licensed under the BSD-3 license (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License in the root of the project or at +# +# http://egt-labs.com/mu/LICENSE.html +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +require 'rubygems' +require 'bundler/setup' +require 'json' +require 'erb' +require 'optimist' +require 'json-schema' +require File.realpath(File.expand_path(File.dirname(__FILE__)+"/mu-load-config.rb")) +require 'mu' + +MU::MommaCat.findStray("AWS", "firewall_rule", region: MU.myRegion, dummy_ok: true, debug: true) diff --git a/modules/mu.rb b/modules/mu.rb index 8b36f2bb9..6ab3dd98e 100644 --- a/modules/mu.rb +++ b/modules/mu.rb @@ -655,7 +655,7 @@ def self.detectCloudProviders new_cfg = $MU_CFG.dup examples = {} MU::Cloud.supportedClouds.each { |cloud| - cloudclass = Object.const_get("MU").const_get("Cloud").const_get(cloud) + cloudclass = MU::Cloud.cloudClass(cloud) begin if cloudclass.hosted? and !$MU_CFG[cloud.downcase] cfg_blob = cloudclass.hosted_config @@ -811,11 +811,7 @@ def self.chefVersion # @param groomer [String]: The grooming agent to load. # @return [Class]: The class object implementing this groomer agent def self.loadGroomer(groomer) - if !File.size?(MU.myRoot+"/modules/mu/groomers/#{groomer.downcase}.rb") - raise MuError, "Requested to use unsupported grooming agent #{groomer}" - end - require "mu/groomers/#{groomer.downcase}" - return Object.const_get("MU").const_get("Groomer").const_get(groomer) + MU::Groomer.loadGroomer(groomer) end @@myRegion_var = nil @@ -969,8 +965,7 @@ def self.structToHash(struct, stringify_keys: false) @@myCloudDescriptor = nil if MU.myCloud - svrclass = const_get("MU").const_get("Cloud").const_get(MU.myCloud).const_get("Server") - found = svrclass.find(cloud_id: @@myInstanceId, region: MU.myRegion) # XXX need habitat arg for google et al + found = MU::Cloud.resourceClass(MU.myCloud, "Server").find(cloud_id: @@myInstanceId, region: MU.myRegion) # XXX need habitat arg for google et al # found = MU::MommaCat.findStray(MU.myCloud, "server", cloud_id: @@myInstanceId, dummy_ok: true, region: MU.myRegion) if !found.nil? and found.size == 1 @@myCloudDescriptor = found.values.first @@ -983,8 +978,7 @@ def self.structToHash(struct, stringify_keys: false) def self.myVPCObj return nil if MU.myCloud.nil? return @@myVPCObj_var if @@myVPCObj_var - cloudclass = const_get("MU").const_get("Cloud").const_get(MU.myCloud) - @@myVPCObj_var ||= cloudclass.myVPCObj + @@myVPCObj_var ||= MU::Cloud.cloudClass(MU.myCloud).myVPCObj @@myVPCObj_var end @@ -1109,10 +1103,9 @@ def self.adminBucketName(platform = nil, credentials: nil) clouds = platform.nil? ? MU::Cloud.supportedClouds : [platform] clouds.each { |cloud| - cloudclass = Object.const_get("MU").const_get("Cloud").const_get(cloud) - bucketname = cloudclass.adminBucketName(credentials) + bucketname = MU::Cloud.cloudClass(cloud).adminBucketName(credentials) begin - if platform or (cloudclass.hosted? and platform.nil?) or cloud == MU::Config.defaultCloud + if platform or (MU::Cloud.cloudClass(cloud).hosted? and platform.nil?) or cloud == MU::Config.defaultCloud return bucketname end end diff --git a/modules/mu/cloud.rb b/modules/mu/cloud.rb index f2abe13fc..ab5095de0 100644 --- a/modules/mu/cloud.rb +++ b/modules/mu/cloud.rb @@ -463,9 +463,9 @@ def self.getResourceNames(type, assert = true) if name == type.to_sym or cloudclass[:cfg_name] == type or cloudclass[:cfg_plural] == type or - Object.const_get("MU").const_get("Cloud").const_get(name) == type + MU::Cloud.loadBaseType(name) == type type = name - return [type.to_sym, cloudclass[:cfg_name], cloudclass[:cfg_plural], Object.const_get("MU").const_get("Cloud").const_get(name), cloudclass] + return [type.to_sym, cloudclass[:cfg_name], cloudclass[:cfg_plural], MU::Cloud.loadBaseTypMU::Cloud.loadBaseType(name), cloudclass] end } if assert @@ -593,7 +593,7 @@ def self.resourceClass(cloud, type) @cloud_class_cache[cloud] = {} if !@cloud_class_cache.has_key?(cloud) begin - cloudclass = Object.const_get("MU").const_get("Cloud").const_get(cloud) + cloudclass = const_get("MU").const_get("Cloud").const_get(cloud) myclass = Object.const_get("MU").const_get("Cloud").const_get(cloud).const_get(shortclass) @@resource_types[shortclass.to_sym][:class].each { |class_method| diff --git a/modules/mu/cloud/resource_base.rb b/modules/mu/cloud/resource_base.rb index 76a6f144f..f24dea52c 100644 --- a/modules/mu/cloud/resource_base.rb +++ b/modules/mu/cloud/resource_base.rb @@ -98,7 +98,7 @@ def initialize(**args) raise MuError, "Can't instantiate a MU::Cloud object without a valid cloud (saw '#{my_cloud}')" end @cloudclass = MU::Cloud.resourceClass(my_cloud, self.class.shortname) - @cloudparentclass = Object.const_get("MU").const_get("Cloud").const_get(my_cloud) + @cloudparentclass = MU::Cloud.cloudClass(my_cloud) @cloudobj = @cloudclass.new( mommacat: args[:mommacat], kitten_cfg: args[:kitten_cfg], @@ -166,7 +166,7 @@ class << self end @cloudclass = MU::Cloud.resourceClass(@cloud, self.class.shortname) - @cloudparentclass = Object.const_get("MU").const_get("Cloud").const_get(@cloud) + @cloudparentclass = MU::Cloud.cloudClass(@cloud) # A pre-existing object, you say? if args[:cloud_id] @@ -181,7 +181,7 @@ class << self # If we can build us an ::Id object for @cloud_id instead of a # string, do so. begin - idclass = Object.const_get("MU").const_get("Cloud").const_get(@cloud).const_get("Id") + idclass = @cloudparentclass.const_get(:Id) long_id = if @deploydata and @deploydata[idclass.idattr.to_s] @deploydata[idclass.idattr.to_s] elsif self.respond_to?(idclass.idattr) diff --git a/modules/mu/cloud/ssh_sessions.rb b/modules/mu/cloud/ssh_sessions.rb index 884755597..06893ed97 100644 --- a/modules/mu/cloud/ssh_sessions.rb +++ b/modules/mu/cloud/ssh_sessions.rb @@ -140,8 +140,6 @@ def getSSHSession(max_retries = 12, retry_interval = 30) session = nil retries = 0 - vpc_class = Object.const_get("MU").const_get("Cloud").const_get(@cloud).const_get("VPC") - # XXX WHY is this a thing Thread.handle_interrupt(Errno::ECONNREFUSED => :never) { } @@ -205,7 +203,7 @@ def getSSHSession(max_retries = 12, retry_interval = 30) msg = "ssh #{ssh_user}@#{@mu_name}: #{e.message}, waiting #{retry_interval}s (attempt #{retries}/#{max_retries})" if retries == 1 or (retries/max_retries <= 0.5 and (retries % 3) == 0) MU.log msg, MU::NOTICE - if !vpc_class.haveRouteToInstance?(cloud_desc, credentials: @credentials) and + if !MU::Cloud.resourceClass(@cloud, "VPC").haveRouteToInstance?(cloud_desc, credentials: @credentials) and canonical_ip.match(/(^127\.)|(^192\.168\.)|(^10\.)|(^172\.1[6-9]\.)|(^172\.2[0-9]\.)|(^172\.3[0-1]\.)|(^::1$)|(^[fF][cCdD])/) and !nat_ssh_host MU.log "Node #{@mu_name} at #{canonical_ip} looks like it's in a private address space, and I don't appear to have a direct route to it. It may not be possible to connect with this routing!", MU::WARN diff --git a/modules/mu/cloud/wrappers.rb b/modules/mu/cloud/wrappers.rb index d07e94267..28922c563 100644 --- a/modules/mu/cloud/wrappers.rb +++ b/modules/mu/cloud/wrappers.rb @@ -88,7 +88,7 @@ def self.find(*flags) next if args[:cloud] and args[:cloud] != cloud # skip this cloud if we have a region argument that makes no # sense there - cloudbase = Object.const_get("MU").const_get("Cloud").const_get(cloud) + cloudbase = MU::Cloud.cloudClass(cloud) next if cloudbase.listCredentials.nil? or cloudbase.listCredentials.empty? or cloudbase.credConfig(args[:credentials]).nil? if args[:region] and cloudbase.respond_to?(:listRegions) if !cloudbase.listRegions(credentials: args[:credentials]) diff --git a/modules/mu/mommacat.rb b/modules/mu/mommacat.rb index 5d828d640..0a266a72b 100644 --- a/modules/mu/mommacat.rb +++ b/modules/mu/mommacat.rb @@ -253,8 +253,7 @@ def credsUsed seen << resource['credentials'] else cloudconst = @original_config['cloud'] ? @original_config['cloud'] : MU::Config.defaultCloud - Object.const_get("MU").const_get("Cloud").const_get(cloudconst) - seen << cloudclass.credConfig(name_only: true) + seen << MU::Cloud.cloudClass(cloudconst).credConfig(name_only: true) end } end @@ -289,11 +288,10 @@ def habitatsUsed habitats << hab_ref.id end elsif resource['cloud'] - cloudclass = Object.const_get("MU").const_get("Cloud").const_get(resource['cloud']) # XXX this should be a general method implemented by each cloud # provider if resource['cloud'] == "Google" - habitats << cloudclass.defaultProject(resource['credentials']) + habitats << MU::Cloud.cloudClass(resource['cloud']).defaultProject(resource['credentials']) end end } @@ -317,13 +315,11 @@ def regionsUsed if @original_config[type] @original_config[type].each { |resource| if resource['cloud'] - cloudclass = Object.const_get("MU").const_get("Cloud").const_get(resource['cloud']) - resclass = MU::Cloud.resourceClass(resource['cloud'], res_type) - if resclass.isGlobal? + if MU::Cloud.resourceClass(resource['cloud'], res_type).isGlobal? # XXX why was I doing this, urgh next elsif !resource['region'] - regions << cloudclass.myRegion + regions << MU::Cloud.cloudClass(resource['cloud']).myRegion(resource['credentials']) end end if resource['region'] @@ -838,7 +834,7 @@ def nodeSSLCerts(resource, poolname = false, keysize = 4096) end if resource and resource.config and resource.config['cloud'] - cloudclass = Object.const_get("MU").const_get("Cloud").const_get(resource.config['cloud']) + cloudclass = MU::Cloud.cloudClass(resource.config['cloud']) cloudclass.writeDeploySecret(@deploy_id, cert.to_pem, cert_cn+".crt", credentials: resource.config['credentials']) cloudclass.writeDeploySecret(@deploy_id, key.to_pem, cert_cn+".key", credentials: resource.config['credentials']) diff --git a/modules/mu/mommacat/storage.rb b/modules/mu/mommacat/storage.rb index 1aace299c..6ec2499b2 100644 --- a/modules/mu/mommacat/storage.rb +++ b/modules/mu/mommacat/storage.rb @@ -512,9 +512,8 @@ def setDeploySecret if !@original_config['scrub_mu_isms'] and !@no_artifacts credsets.each_pair { |cloud, creds| creds.uniq! - cloudclass = Object.const_get("MU").const_get("Cloud").const_get(cloud) creds.each { |credentials| - cloudclass.writeDeploySecret(@deploy_id, @deploy_secret, credentials: credentials) + MU::Cloud.cloudClass(cloud).writeDeploySecret(@deploy_id, @deploy_secret, credentials: credentials) } } end diff --git a/modules/mu/providers/aws/server.rb b/modules/mu/providers/aws/server.rb index 7d2d78379..7bc3db36d 100644 --- a/modules/mu/providers/aws/server.rb +++ b/modules/mu/providers/aws/server.rb @@ -1736,8 +1736,7 @@ def self.validateInstanceType(size, region) MU::Cloud.availableClouds.each { |cloud| next if cloud == "AWS" - cloudbase = Object.const_get("MU").const_get("Cloud").const_get(cloud) - foreign_types = (cloudbase.listInstanceTypes).values.first + foreign_types = (MU::Cloud.cloudClass(cloud).listInstanceTypes).values.first if foreign_types.size == 1 foreign_types = foreign_types.values.first end diff --git a/modules/mu/providers/azure/server.rb b/modules/mu/providers/azure/server.rb index 33c2bd22e..638b5e7e8 100644 --- a/modules/mu/providers/azure/server.rb +++ b/modules/mu/providers/azure/server.rb @@ -519,8 +519,7 @@ def self.validateInstanceType(size, region) foundmatch = false MU::Cloud.availableClouds.each { |cloud| next if cloud == "Azure" - cloudbase = Object.const_get("MU").const_get("Cloud").const_get(cloud) - foreign_types = (cloudbase.listInstanceTypes).values.first + foreign_types = (MU::Cloud.cloudClass(cloud).listInstanceTypes).values.first if foreign_types.size == 1 foreign_types = foreign_types.values.first end diff --git a/modules/mu/providers/google/server.rb b/modules/mu/providers/google/server.rb index eca8f401d..30fff8fbf 100644 --- a/modules/mu/providers/google/server.rb +++ b/modules/mu/providers/google/server.rb @@ -1467,8 +1467,7 @@ def self.validateInstanceType(size, region, project: nil, credentials: nil) foundmatch = false MU::Cloud.availableClouds.each { |cloud| next if cloud == "Google" - cloudbase = Object.const_get("MU").const_get("Cloud").const_get(cloud) - foreign_types = (cloudbase.listInstanceTypes).values.first + foreign_types = (MU::Cloud.cloudClass(cloud).listInstanceTypes).values.first if foreign_types.size == 1 foreign_types = foreign_types.values.first end From cef4e021538d8e3b791dde1c95dc7179d2fed335 Mon Sep 17 00:00:00 2001 From: John Stange Date: Sat, 11 Apr 2020 10:38:31 -0400 Subject: [PATCH 074/124] typo --- modules/mu/cloud.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/mu/cloud.rb b/modules/mu/cloud.rb index ab5095de0..1982fd00d 100644 --- a/modules/mu/cloud.rb +++ b/modules/mu/cloud.rb @@ -465,7 +465,7 @@ def self.getResourceNames(type, assert = true) cloudclass[:cfg_plural] == type or MU::Cloud.loadBaseType(name) == type type = name - return [type.to_sym, cloudclass[:cfg_name], cloudclass[:cfg_plural], MU::Cloud.loadBaseTypMU::Cloud.loadBaseType(name), cloudclass] + return [type.to_sym, cloudclass[:cfg_name], cloudclass[:cfg_plural], MU::Cloud.loadBaseType(name), cloudclass] end } if assert From 8f143c05d91f8fb297b17863d7bb7f60a4ff8eb3 Mon Sep 17 00:00:00 2001 From: John Stange Date: Sat, 11 Apr 2020 13:42:01 -0400 Subject: [PATCH 075/124] try running parse tests before you bother committing eh wot --- modules/mu/cloud.rb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/mu/cloud.rb b/modules/mu/cloud.rb index 1982fd00d..cfa30870f 100644 --- a/modules/mu/cloud.rb +++ b/modules/mu/cloud.rb @@ -463,9 +463,9 @@ def self.getResourceNames(type, assert = true) if name == type.to_sym or cloudclass[:cfg_name] == type or cloudclass[:cfg_plural] == type or - MU::Cloud.loadBaseType(name) == type + MU::Cloud.const_get(name) == type type = name - return [type.to_sym, cloudclass[:cfg_name], cloudclass[:cfg_plural], MU::Cloud.loadBaseType(name), cloudclass] + return [type.to_sym, cloudclass[:cfg_name], cloudclass[:cfg_plural], MU::Cloud.const_get(name), cloudclass] end } if assert From 89c090c9ff38c3ad9a5907753815f143fee5c9b5 Mon Sep 17 00:00:00 2001 From: John Stange Date: Tue, 14 Apr 2020 03:31:12 -0400 Subject: [PATCH 076/124] Adoption: ensure adopted deploys have actual cloud object metadata; partial headway on a real interface for diff output (so it can be Slacked, etc) --- modules/mu.rb | 26 ++++++++++++++++++++++---- modules/mu/adoption.rb | 19 ++++++++++++++----- modules/mu/cloud/resource_base.rb | 2 +- modules/mu/mommacat/search.rb | 12 +++++++++++- 4 files changed, 48 insertions(+), 11 deletions(-) diff --git a/modules/mu.rb b/modules/mu.rb index 6ab3dd98e..064b3d0c1 100644 --- a/modules/mu.rb +++ b/modules/mu.rb @@ -83,7 +83,7 @@ def <=>(other) end # Recursively compare two hashes - def diff(with, on = self, level: 0, parents: []) + def diff(with, on = self, level: 0, print: false, parents: [], report: {}) return if with.nil? and on.nil? if with.nil? or on.nil? or with.class != on.class return # XXX ...however we're flagging differences @@ -93,6 +93,10 @@ def diff(with, on = self, level: 0, parents: []) tree = "" indentsize = 0 parents.each { |p| + if p.nil? + pp parents + next + end tree += (" " * indentsize) + p + " => \n" indentsize += 2 } @@ -104,12 +108,19 @@ def diff(with, on = self, level: 0, parents: []) with_unique = (with.keys - on.keys) shared = (with.keys & on.keys) shared.each { |k| - diff(with[k], on[k], level: level+1, parents: parents + [k]) + report[k] = {} + diff(with[k], on[k], level: level+1, parents: parents + [k], report: report[k]) } on_unique.each { |k| + report["removed"] ||= {} + report["removed"][k] = on[k] +#puts "HASH CHANGE -#{PP.pp({k => on[k] }, '')}" changes << "- ".red+PP.pp({k => on[k] }, '') } with_unique.each { |k| + report["added"] ||= {} + report["added"][k] = with[k] +#puts "HASH CHANGE +#{PP.pp({k => on[k] }, '')}" changes << "+ ".green+PP.pp({k => with[k]}, '') } elsif on.is_a?(Array) @@ -144,7 +155,7 @@ def diff(with, on = self, level: 0, parents: []) elt['entity']['id'] end - diff(other_elt, elt, level: level+1, parents: parents + [namestr]) + diff(other_elt, elt, level: level+1, parents: parents + [namestr], report: report) break end } @@ -163,32 +174,39 @@ def diff(with, on = self, level: 0, parents: []) # end # end on_unique.each { |e| +#puts "ARRAY CHANGE -#{PP.pp(Hash.bok_minimize(e), '')}" changes << if e.is_a?(Hash) "- ".red+PP.pp(Hash.bok_minimize(e), '').gsub(/\n/, "\n "+(indent)) else +#puts "ARRAY ELEMENT CHANGE -#{e.to_s}" "- ".red+e.to_s end } with_unique.each { |e| changes << if e.is_a?(Hash) +#puts "ARRAY CHANGE +#{PP.pp(Hash.bok_minimize(e), '')}" "+ ".green+PP.pp(Hash.bok_minimize(e), '').gsub(/\n/, "\n "+(indent)) else +#puts "ARRAY ELEMENT CHANGE +#{e.to_s}" "+ ".green+e.to_s end } else if on != with +#puts "LEAF CHANGE -#{on.to_s} +#{with.to_s}" changes << "-".red+" #{on.to_s}" changes << "+".green+" #{with.to_s}" end end - if changes.size > 0 + if changes.size > 0 and print puts tree changes.each { |c| puts indent+c } end + + report end # Implement a merge! that just updates each hash leaf as needed, not diff --git a/modules/mu/adoption.rb b/modules/mu/adoption.rb index 5bc47e5be..773254081 100644 --- a/modules/mu/adoption.rb +++ b/modules/mu/adoption.rb @@ -222,9 +222,13 @@ def generateBaskets(prefix: "") } deploy = MU::MommaCat.findMatchingDeploy(origin) - if @diff and !deploy - MU.log "--diff was set but I failed to find a deploy like me to compare to", MU::ERR, details: origin - exit 1 + if @diff + if !deploy + MU.log "--diff was set but I failed to find a deploy like me to compare to", MU::ERR, details: origin + exit 1 + else + MU.log "Will diff against #{deploy.deploy_id}", MU::NOTICE, details: origin + end end threads = [] @@ -333,8 +337,8 @@ def generateBaskets(prefix: "") exit 1 end newcfg = MU::Config.manxify(@boks[bok['appname']]) - - prevcfg.diff(newcfg) + report = prevcfg.diff(newcfg, print: true) +# pp report exit end } @@ -421,6 +425,10 @@ def vacuum(bok, origin: nil, save: false, deploy: nil) obj = deploy.findLitterMate(type: attrs[:cfg_plural], name: resource['name']) begin raise Incomplete if obj.nil? + if save + deploydata = obj.notify + deploy.notify(attrs[:cfg_plural], resource['name'], deploydata, triggering_node: obj) + end new_cfg = resolveReferences(resource, deploy, obj) new_cfg.delete("cloud_id") cred_cfg = MU::Cloud.cloudClass(obj.cloud).credConfig(obj.credentials) @@ -433,6 +441,7 @@ def vacuum(bok, origin: nil, save: false, deploy: nil) end processed << new_cfg rescue Incomplete +#MU.log "#{attrs[:cfg_name]} #{resource['name']} didn't show up from findLitterMate", MU::WARN, details: deploy.original_config[attrs[:cfg_plural]].reject { |r| r['name'] != "" } end } diff --git a/modules/mu/cloud/resource_base.rb b/modules/mu/cloud/resource_base.rb index f24dea52c..10b9a89c5 100644 --- a/modules/mu/cloud/resource_base.rb +++ b/modules/mu/cloud/resource_base.rb @@ -866,7 +866,7 @@ def resourceInitHook else retval = @cloudobj.method(method).call end - if (method == :create or method == :groom or method == :postBoot) and + if [:create, :groom, :postBoot, :toKitten].include?(method) and (!@destroyed and !@cloudobj.destroyed) deploydata = @cloudobj.method(:notify).call @deploydata ||= deploydata # XXX I don't remember why we're not just doing this from the get-go; maybe because we prefer some mangling occurring in @deploy.notify? diff --git a/modules/mu/mommacat/search.rb b/modules/mu/mommacat/search.rb index cd1da6f40..0229e05c1 100644 --- a/modules/mu/mommacat/search.rb +++ b/modules/mu/mommacat/search.rb @@ -128,6 +128,8 @@ def self.findStray(cloud, type, matches end + @object_load_fails = false + # Return the resource object of another member of this deployment # @param type [String,Symbol]: The type of resource # @param name [String]: The name of the resource as defined in its 'name' Basket of Kittens field @@ -161,7 +163,15 @@ def findLitterMate(type: nil, name: nil, mu_name: nil, cloud_id: nil, created_on @kitten_semaphore.synchronize { - return nil if !@kittens.has_key?(type) + if !@kittens.has_key?(type) + return nil if @original_config[type].nil? + loadObjects(false) + if @object_load_fails or !@kittens[type] + MU.log "#{@deploy_id}'s original config has #{@original_config[type].size.to_s} #{type}, but loadObjects did not populate any into @kittens", MU::ERR, @deployment.keys + @object_load_fails = true + return nil + end + end matches = {} @kittens[type].each { |habitat_group, sib_classes| next if habitat and habitat_group and habitat_group != habitat From e64c5191806ad5db5f87f9c7f14b002e202d48a0 Mon Sep 17 00:00:00 2001 From: John Stange Date: Tue, 14 Apr 2020 14:05:20 -0400 Subject: [PATCH 077/124] Adoption: ensure that the BoK we save in a deploy matches the one we generate for ourselves so that diff doesn't have spurious noise in it --- modules/mu.rb | 5 +---- modules/mu/adoption.rb | 13 +++++++------ modules/mu/cloud/resource_base.rb | 3 +++ modules/mu/mommacat/daemon.rb | 10 +++++----- 4 files changed, 16 insertions(+), 15 deletions(-) diff --git a/modules/mu.rb b/modules/mu.rb index 064b3d0c1..c90067b31 100644 --- a/modules/mu.rb +++ b/modules/mu.rb @@ -93,10 +93,7 @@ def diff(with, on = self, level: 0, print: false, parents: [], report: {}) tree = "" indentsize = 0 parents.each { |p| - if p.nil? - pp parents - next - end + p ||= "" tree += (" " * indentsize) + p + " => \n" indentsize += 2 } diff --git a/modules/mu/adoption.rb b/modules/mu/adoption.rb index 773254081..449e9884c 100644 --- a/modules/mu/adoption.rb +++ b/modules/mu/adoption.rb @@ -338,6 +338,7 @@ def generateBaskets(prefix: "") end newcfg = MU::Config.manxify(@boks[bok['appname']]) report = prevcfg.diff(newcfg, print: true) +# sendAdminSlack("Grooming FAILED for `#{kitten.mu_name}` with `#{e.message}` :crying_cat_face:", msg: e.backtrace.join("\n")) # pp report exit end @@ -452,24 +453,23 @@ def vacuum(bok, origin: nil, save: false, deploy: nil) # Pare out global values like +cloud+ or +region+ that appear to be # universal in the deploy we're creating. - def scrub_globals(h, field) + scrub_globals = Proc.new { |h, field| if h.is_a?(Hash) newhash = {} h.each_pair { |k, v| next if k == field - newhash[k] = scrub_globals(v, field) + newhash[k] = scrub_globals.call(v, field) } h = newhash elsif h.is_a?(Array) newarr = [] h.each { |v| - newarr << scrub_globals(v, field) + newarr << scrub_globals.call(v, field) } h = newarr end - h - end + } globals.each_pair { |field, counts| next if counts.size != 1 @@ -479,7 +479,7 @@ def scrub_globals(h, field) if bok[attrs[:cfg_plural]] new_resources = [] bok[attrs[:cfg_plural]].each { |resource| - new_resources << scrub_globals(resource, field) + new_resources << scrub_globals.call(resource, field) } bok[attrs[:cfg_plural]] = new_resources end @@ -489,6 +489,7 @@ def scrub_globals(h, field) scrubSchemaDefaults(bok, MU::Config.schema) if save + deploy.updateBasketofKittens(bok) MU.log "Committing adopted deployment to #{MU.dataDir}/deployments/#{deploy.deploy_id}", MU::NOTICE, details: origin deploy.save!(force: true, origin: origin) end diff --git a/modules/mu/cloud/resource_base.rb b/modules/mu/cloud/resource_base.rb index 10b9a89c5..b1d32baf8 100644 --- a/modules/mu/cloud/resource_base.rb +++ b/modules/mu/cloud/resource_base.rb @@ -94,6 +94,9 @@ def initialize(**args) end my_cloud = args[:kitten_cfg]['cloud'].to_s || MU::Config.defaultCloud + if (my_cloud.nil? or my_cloud.empty?) and args[:mommacat] + my_cloud = args[:mommacat].original_config['cloud'] + end if my_cloud.nil? or !MU::Cloud.supportedClouds.include?(my_cloud) raise MuError, "Can't instantiate a MU::Cloud object without a valid cloud (saw '#{my_cloud}')" end diff --git a/modules/mu/mommacat/daemon.rb b/modules/mu/mommacat/daemon.rb index 0d3cb9046..df0576c15 100644 --- a/modules/mu/mommacat/daemon.rb +++ b/modules/mu/mommacat/daemon.rb @@ -165,11 +165,11 @@ def groomNode(cloud_id, name, type, mu_name: nil, reraise_fail: false, sync_wait if e.class.name != "MU::Cloud::AWS::Server::BootstrapTempFail" and !File.exist?(deploy_dir+"/.cleanup."+cloud_id) and !File.exist?(deploy_dir+"/.cleanup") MU.log "Grooming FAILED for #{kitten.mu_name} (#{e.inspect})", MU::ERR, details: e.backtrace sendAdminSlack("Grooming FAILED for `#{kitten.mu_name}` with `#{e.message}` :crying_cat_face:", msg: e.backtrace.join("\n")) - sendAdminMail("Grooming FAILED for #{kitten.mu_name} on #{MU.appname} \"#{MU.handle}\" (#{MU.deploy_id})", - msg: e.inspect, - data: e.backtrace, - debug: true - ) + sendAdminMail("Grooming FAILED for #{kitten.mu_name} on #{MU.appname} \"#{MU.handle}\" (#{MU.deploy_id})", + msg: e.inspect, + data: e.backtrace, + debug: true + ) raise e if reraise_fail else MU.log "Grooming of #{kitten.mu_name} interrupted by cleanup or planned reboot" From aeaae860654a2c12a3ac98b579a8276354a1729f Mon Sep 17 00:00:00 2001 From: John Stange Date: Tue, 14 Apr 2020 15:59:37 -0400 Subject: [PATCH 078/124] Adoption: Hash#diff now returns a useful hash report of results; MommaCat: don't willy-nilly pollute loaded objects with an extra environment field --- modules/mu.rb | 24 ++++++++++-------------- modules/mu/mommacat.rb | 1 + modules/mu/mommacat/storage.rb | 4 ++-- 3 files changed, 13 insertions(+), 16 deletions(-) diff --git a/modules/mu.rb b/modules/mu.rb index c90067b31..54b76003b 100644 --- a/modules/mu.rb +++ b/modules/mu.rb @@ -89,6 +89,7 @@ def diff(with, on = self, level: 0, print: false, parents: [], report: {}) return # XXX ...however we're flagging differences end return if on == with + ["added", "removed", "changed"].each { |f| report[f] ||= [] } tree = "" indentsize = 0 @@ -105,19 +106,14 @@ def diff(with, on = self, level: 0, print: false, parents: [], report: {}) with_unique = (with.keys - on.keys) shared = (with.keys & on.keys) shared.each { |k| - report[k] = {} - diff(with[k], on[k], level: level+1, parents: parents + [k], report: report[k]) + diff(with[k], on[k], level: level+1, parents: parents + [k], report: report, print: print) } on_unique.each { |k| - report["removed"] ||= {} - report["removed"][k] = on[k] -#puts "HASH CHANGE -#{PP.pp({k => on[k] }, '')}" + report["removed"] << { "field" => k, "parents" => parents, "value" => on[k] } changes << "- ".red+PP.pp({k => on[k] }, '') } with_unique.each { |k| - report["added"] ||= {} - report["added"][k] = with[k] -#puts "HASH CHANGE +#{PP.pp({k => on[k] }, '')}" + report["added"] << { "field" => k, "parents" => parents, "value" => on[k] } changes << "+ ".green+PP.pp({k => with[k]}, '') } elsif on.is_a?(Array) @@ -152,7 +148,7 @@ def diff(with, on = self, level: 0, print: false, parents: [], report: {}) elt['entity']['id'] end - diff(other_elt, elt, level: level+1, parents: parents + [namestr], report: report) + diff(other_elt, elt, level: level+1, parents: parents + [namestr], report: report, print: print) break end } @@ -171,26 +167,26 @@ def diff(with, on = self, level: 0, print: false, parents: [], report: {}) # end # end on_unique.each { |e| -#puts "ARRAY CHANGE -#{PP.pp(Hash.bok_minimize(e), '')}" changes << if e.is_a?(Hash) + report["removed"] << { "parents" => parents, "value" => e } "- ".red+PP.pp(Hash.bok_minimize(e), '').gsub(/\n/, "\n "+(indent)) else -#puts "ARRAY ELEMENT CHANGE -#{e.to_s}" + report["removed"] << { "parents" => parents, "value" => e } "- ".red+e.to_s end } with_unique.each { |e| changes << if e.is_a?(Hash) -#puts "ARRAY CHANGE +#{PP.pp(Hash.bok_minimize(e), '')}" + report["added"] << { "parents" => parents, "value" => e } "+ ".green+PP.pp(Hash.bok_minimize(e), '').gsub(/\n/, "\n "+(indent)) else -#puts "ARRAY ELEMENT CHANGE +#{e.to_s}" + report["added"] << { "parents" => parents, "value" => e } "+ ".green+e.to_s end } else if on != with -#puts "LEAF CHANGE -#{on.to_s} +#{with.to_s}" + report["changed"] << { "parents" => parents, "old" => on, "new" => with } changes << "-".red+" #{on.to_s}" changes << "+".green+" #{with.to_s}" end diff --git a/modules/mu/mommacat.rb b/modules/mu/mommacat.rb index 0a266a72b..66ecbcdc6 100644 --- a/modules/mu/mommacat.rb +++ b/modules/mu/mommacat.rb @@ -182,6 +182,7 @@ def initialize(deploy_id, @appname ||= @original_config['name'] if @original_config @timestamp = timestamp @environment = environment + @original_config['environment'] ||= @environment if @original_config if set_context_to_me MU::MommaCat.setThreadContext(self) diff --git a/modules/mu/mommacat/storage.rb b/modules/mu/mommacat/storage.rb index 6ec2499b2..58270c5a4 100644 --- a/modules/mu/mommacat/storage.rb +++ b/modules/mu/mommacat/storage.rb @@ -520,6 +520,8 @@ def setDeploySecret end def loadObjects(delay_descriptor_load) + # Load up MU::Cloud objects for all our kittens in this deploy + MU::Cloud.resource_types.each_pair { |res_type, attrs| type = attrs[:cfg_plural] next if !@deployment.has_key?(type) @@ -565,8 +567,6 @@ def loadObjects(delay_descriptor_load) end begin - # Load up MU::Cloud objects for all our kittens in this deploy - orig_cfg['environment'] = @environment # not always set in old deploys if attrs[:has_multiples] data.keys.each { |mu_name| addKitten(type, res_name, attrs[:interface].new(mommacat: self, kitten_cfg: orig_cfg, mu_name: mu_name, delay_descriptor_load: delay_descriptor_load)) From fe18d6c69fe74677a1a264fc4ce64e2f9c91494a Mon Sep 17 00:00:00 2001 From: John Stange Date: Tue, 14 Apr 2020 23:22:41 -0400 Subject: [PATCH 079/124] Adoption: Slack notifications now an option for --diff results --- modules/mu/adoption.rb | 51 +++++++++++++++++++++++++++++++++++++++--- modules/mu/mommacat.rb | 21 ++++++++++------- 2 files changed, 61 insertions(+), 11 deletions(-) diff --git a/modules/mu/adoption.rb b/modules/mu/adoption.rb index 449e9884c..25149e828 100644 --- a/modules/mu/adoption.rb +++ b/modules/mu/adoption.rb @@ -338,9 +338,9 @@ def generateBaskets(prefix: "") end newcfg = MU::Config.manxify(@boks[bok['appname']]) report = prevcfg.diff(newcfg, print: true) -# sendAdminSlack("Grooming FAILED for `#{kitten.mu_name}` with `#{e.message}` :crying_cat_face:", msg: e.backtrace.join("\n")) -# pp report - exit + if MU.muCfg['adopt_change_notify'] + notifyChanges(deploy, report) + end end } @boks @@ -348,6 +348,51 @@ def generateBaskets(prefix: "") private + def notifyChanges(deploy, report) + if MU.muCfg['adopt_change_notify']['slack'] + ["added", "removed", "changed"].each { |c| + next if !report[c] or report[c].empty? + report[c].each { |item| + shortclass, _cfg_name, _cfg_plural, _classname = MU::Cloud.getResourceNames(item["parents"].first, false) + + noun = if shortclass + shortclass + else + "Resource" + end + + verb = if item['parents'].size > 1 + "modified" + else + c + end + + text = if item['value'].is_a?(Hash) and item['value']['name'] + "#{noun} \*#{item['value']['name']}\* was \*#{verb}\*." + else + "A #{noun} was #{verb}." + end + + item['parents'].shift if shortclass + if !item["parents"].empty? + text += " Path:\n\n`"+item["parents"].join(" => ")+"`" + end + + if item['value'] and (item['value'].is_a?(Array) or item['value'].is_a?(Hash)) + deploy.sendAdminSlack(text, scrub_mu_isms: MU.muCfg['adopt_scrub_mu_isms'], msg: PP.pp(item['value'], '')) + MU.log text, MU::NOTICE, details: item['value'] + else + deploy.sendAdminSlack(text, scrub_mu_isms: MU.muCfg['adopt_scrub_mu_isms']) + MU.log text + end + } + } + end + + if MU.muCfg['adopt_change_notify']['email'] + end + end + def scrubSchemaDefaults(conf_chunk, schema_chunk, depth = 0, type: nil) return if schema_chunk.nil? diff --git a/modules/mu/mommacat.rb b/modules/mu/mommacat.rb index 66ecbcdc6..7c072f5a0 100644 --- a/modules/mu/mommacat.rb +++ b/modules/mu/mommacat.rb @@ -611,16 +611,21 @@ def notify(type, key, data, mu_name: nil, remove: false, triggering_node: nil, d # @param subject [String]: The subject line of the message. # @param msg [String]: The message body. # @return [void] - def sendAdminSlack(subject, msg: "") - if $MU_CFG['slack'] and $MU_CFG['slack']['webhook'] and - (!$MU_CFG['slack']['skip_environments'] or !$MU_CFG['slack']['skip_environments'].any?{ |s| s.casecmp(MU.environment)==0 }) + def sendAdminSlack(subject, msg: "", scrub_mu_isms: true) + if MU.muCfg['slack'] and MU.muCfg['slack']['webhook'] and + (!MU.muCfg['slack']['skip_environments'] or !MU.muCfg['slack']['skip_environments'].any?{ |s| s.casecmp(MU.environment)==0 }) require 'slack-notifier' - slack = Slack::Notifier.new $MU_CFG['slack']['webhook'] + begin + slack = Slack::Notifier.new MU.muCfg['slack']['webhook'] + prefix = scrub_mu_isms ? subject : "#{MU.appname} \*\"#{MU.handle}\"\* (`#{MU.deploy_id}`) - #{subject}" - if msg and !msg.empty? - slack.ping "#{MU.appname} \*\"#{MU.handle}\"\* (`#{MU.deploy_id}`) - #{subject}:\n\n```#{msg}\n```", channel: $MU_CFG['slack']['channel'] - else - slack.ping "#{MU.appname} \*\"#{MU.handle}\"\* (`#{MU.deploy_id}`) - #{subject}", channel: $MU_CFG['slack']['channel'] + if msg and !msg.empty? + slack.ping "#{prefix}:\n\n```#{msg}```", channel: MU.muCfg['slack']['channel'] + else + slack.ping prefix, channel: MU.muCfg['slack']['channel'] + end + rescue Slack::Notifier::APIError => e + MU.log "Failed to send message to slack: #{e.message}", MU::ERR, details: MU.muCfg['slack'] end end end From 5f88bcb49f3fd3bfbc6128b03bba64a1477ce1e8 Mon Sep 17 00:00:00 2001 From: John Stange Date: Fri, 17 Apr 2020 03:50:33 -0400 Subject: [PATCH 080/124] Hash#diff: discard matched array entries from report as intended --- modules/mu.rb | 76 +++++++++++++++++++++++++-------------------------- 1 file changed, 37 insertions(+), 39 deletions(-) diff --git a/modules/mu.rb b/modules/mu.rb index 54b76003b..98796f9ec 100644 --- a/modules/mu.rb +++ b/modules/mu.rb @@ -89,12 +89,12 @@ def diff(with, on = self, level: 0, print: false, parents: [], report: {}) return # XXX ...however we're flagging differences end return if on == with - ["added", "removed", "changed"].each { |f| report[f] ||= [] } tree = "" indentsize = 0 + report_tree = {} parents.each { |p| - p ||= "" + p ||= "" tree += (" " * indentsize) + p + " => \n" indentsize += 2 } @@ -106,14 +106,18 @@ def diff(with, on = self, level: 0, print: false, parents: [], report: {}) with_unique = (with.keys - on.keys) shared = (with.keys & on.keys) shared.each { |k| - diff(with[k], on[k], level: level+1, parents: parents + [k], report: report, print: print) + report_data = diff(with[k], on[k], level: level+1, parents: parents + [k], report: report[k], print: print) + if report_data and !report_data.empty? + report ||= {} + report[k] = report_data + end } on_unique.each { |k| - report["removed"] << { "field" => k, "parents" => parents, "value" => on[k] } + report[k] = { :action => :removed, :parents => parents, :value => on[k] } changes << "- ".red+PP.pp({k => on[k] }, '') } with_unique.each { |k| - report["added"] << { "field" => k, "parents" => parents, "value" => on[k] } + report[k] = { :action => :added, :parents => parents, :value => with[k] } changes << "+ ".green+PP.pp({k => with[k]}, '') } elsif on.is_a?(Array) @@ -126,29 +130,24 @@ def diff(with, on = self, level: 0, print: false, parents: [], report: {}) # sorting arrays full of weird, non-primitive types. done = [] on.sort.each { |elt| - if elt.is_a?(Hash) and elt['name'] or elt['entity']# or elt['cloud_id'] - with.sort.each { |other_elt| - # Figure out what convention this thing is using for resource identification - compare_a, compare_b = if elt['name'].nil? and elt["id"].nil? and !elt["entity"].nil? and !other_elt["entity"].nil? - [elt["entity"], other_elt["entity"]] - else - [elt, other_elt] - end + if elt.is_a?(Hash) and MU::MommaCat.getChunkName(elt) + elt_namestr = MU::MommaCat.getChunkName(elt) +if elt_namestr.nil? + MU.log "Failed to come up with a name string for this guy #{parents.join(" => ")}", MU::WARN, details: elt +end - if (compare_a['name'] and compare_b['name'] == compare_a['name']) or - (compare_a['name'].nil? and !compare_a["id"].nil? and compare_a["id"] == compare_b["id"]) - break if elt == other_elt + with.sort.each { |other_elt| + other_elt_namestr = MU::MommaCat.getChunkName(other_elt) + # Case 1: The array element exists in both version of this array + if elt_namestr and other_elt_namestr and elt_namestr == other_elt_namestr done << elt done << other_elt - namestr = if elt['type'] - "#{elt['type']}[#{elt['name']}]" - elsif elt['name'] - elt['name'] - elsif elt['entity'] and elt["entity"]["id"] - elt['entity']['id'] + break if elt == other_elt # if they're identical, we're done + report_data = diff(other_elt, elt, level: level+1, parents: parents + [elt_namestr], print: print) + if report_data and !report_data.empty? + report ||= {} + report[elt_namestr] = report_data end - - diff(other_elt, elt, level: level+1, parents: parents + [namestr], report: report, print: print) break end } @@ -156,37 +155,36 @@ def diff(with, on = self, level: 0, print: false, parents: [], report: {}) } on_unique = (on - with) - done with_unique = (with - on) - done -# if on_unique.size > 0 or with_unique.size > 0 -# if before_a != after_a -# MU.log "A BEFORE", MU::NOTICE, details: before_a -# MU.log "A AFTER", MU::NOTICE, details: after_a -# end -# if before_b != after_b -# MU.log "B BEFORE", MU::NOTICE, details: before_b -# MU.log "B AFTER", MU::NOTICE, details: after_b -# end -# end + + # Case 2: This array entry exists in the old version, but not the new one on_unique.each { |e| + namestr = MU::MommaCat.getChunkName(e) + + report ||= {} changes << if e.is_a?(Hash) - report["removed"] << { "parents" => parents, "value" => e } + report[namestr] = { :action => :removed, :parents => parents, :value => e } "- ".red+PP.pp(Hash.bok_minimize(e), '').gsub(/\n/, "\n "+(indent)) else - report["removed"] << { "parents" => parents, "value" => e } + report[namestr] = { :action => :removed, :parents => parents, :value => e } "- ".red+e.to_s end } + # Case 3: This array entry exists in the new version, but not the old one with_unique.each { |e| + namestr = MU::MommaCat.getChunkName(e) + + report ||= {} changes << if e.is_a?(Hash) - report["added"] << { "parents" => parents, "value" => e } + report[namestr] = { :action => :added, :parents => parents, :value => e } "+ ".green+PP.pp(Hash.bok_minimize(e), '').gsub(/\n/, "\n "+(indent)) else - report["added"] << { "parents" => parents, "value" => e } + report[namestr] = { :action => :added, :parents => parents, :value => e } "+ ".green+e.to_s end } else if on != with - report["changed"] << { "parents" => parents, "old" => on, "new" => with } + report = { :action => :changed, :parents => parents, :oldvalue => on, :value => with } changes << "-".red+" #{on.to_s}" changes << "+".green+" #{with.to_s}" end From d838e539797cfa8a5d9ba3c6f89f2c866d058ae1 Mon Sep 17 00:00:00 2001 From: John Stange Date: Fri, 17 Apr 2020 12:55:13 -0400 Subject: [PATCH 081/124] Hash#diff: drop console printing behavior, we'll let caller take care of that; other misc support for this functionality --- modules/mu.rb | 41 ++-------- modules/mu/adoption.rb | 123 ++++++++++++++++++++++------ modules/mu/mommacat.rb | 24 ++++-- modules/mu/mommacat/naming.rb | 35 ++++++++ modules/mu/providers/google/user.rb | 10 ++- 5 files changed, 167 insertions(+), 66 deletions(-) diff --git a/modules/mu.rb b/modules/mu.rb index 98796f9ec..ee4c2f442 100644 --- a/modules/mu.rb +++ b/modules/mu.rb @@ -83,30 +83,20 @@ def <=>(other) end # Recursively compare two hashes - def diff(with, on = self, level: 0, print: false, parents: [], report: {}) + def diff(with, on = self, level: 0, parents: [], report: {}) return if with.nil? and on.nil? if with.nil? or on.nil? or with.class != on.class return # XXX ...however we're flagging differences end return if on == with - tree = "" - indentsize = 0 - report_tree = {} - parents.each { |p| - p ||= "" - tree += (" " * indentsize) + p + " => \n" - indentsize += 2 - } - indent = (" " * indentsize) - changes = [] if on.is_a?(Hash) on_unique = (on.keys - with.keys) with_unique = (with.keys - on.keys) shared = (with.keys & on.keys) shared.each { |k| - report_data = diff(with[k], on[k], level: level+1, parents: parents + [k], report: report[k], print: print) + report_data = diff(with[k], on[k], level: level+1, parents: parents + [k], report: report[k]) if report_data and !report_data.empty? report ||= {} report[k] = report_data @@ -114,11 +104,9 @@ def diff(with, on = self, level: 0, print: false, parents: [], report: {}) } on_unique.each { |k| report[k] = { :action => :removed, :parents => parents, :value => on[k] } - changes << "- ".red+PP.pp({k => on[k] }, '') } with_unique.each { |k| report[k] = { :action => :added, :parents => parents, :value => with[k] } - changes << "+ ".green+PP.pp({k => with[k]}, '') } elsif on.is_a?(Array) return if with == on @@ -132,9 +120,6 @@ def diff(with, on = self, level: 0, print: false, parents: [], report: {}) on.sort.each { |elt| if elt.is_a?(Hash) and MU::MommaCat.getChunkName(elt) elt_namestr = MU::MommaCat.getChunkName(elt) -if elt_namestr.nil? - MU.log "Failed to come up with a name string for this guy #{parents.join(" => ")}", MU::WARN, details: elt -end with.sort.each { |other_elt| other_elt_namestr = MU::MommaCat.getChunkName(other_elt) @@ -143,7 +128,7 @@ def diff(with, on = self, level: 0, print: false, parents: [], report: {}) done << elt done << other_elt break if elt == other_elt # if they're identical, we're done - report_data = diff(other_elt, elt, level: level+1, parents: parents + [elt_namestr], print: print) + report_data = diff(other_elt, elt, level: level+1, parents: parents + [elt_namestr]) if report_data and !report_data.empty? report ||= {} report[elt_namestr] = report_data @@ -161,42 +146,32 @@ def diff(with, on = self, level: 0, print: false, parents: [], report: {}) namestr = MU::MommaCat.getChunkName(e) report ||= {} - changes << if e.is_a?(Hash) + if e.is_a?(Hash) report[namestr] = { :action => :removed, :parents => parents, :value => e } - "- ".red+PP.pp(Hash.bok_minimize(e), '').gsub(/\n/, "\n "+(indent)) else report[namestr] = { :action => :removed, :parents => parents, :value => e } - "- ".red+e.to_s end } + # Case 3: This array entry exists in the new version, but not the old one with_unique.each { |e| namestr = MU::MommaCat.getChunkName(e) report ||= {} - changes << if e.is_a?(Hash) + if e.is_a?(Hash) report[namestr] = { :action => :added, :parents => parents, :value => e } - "+ ".green+PP.pp(Hash.bok_minimize(e), '').gsub(/\n/, "\n "+(indent)) else report[namestr] = { :action => :added, :parents => parents, :value => e } - "+ ".green+e.to_s end } + + # A plain old leaf node of data else if on != with report = { :action => :changed, :parents => parents, :oldvalue => on, :value => with } - changes << "-".red+" #{on.to_s}" - changes << "+".green+" #{with.to_s}" end end - if changes.size > 0 and print - puts tree - changes.each { |c| - puts indent+c - } - end - report end diff --git a/modules/mu/adoption.rb b/modules/mu/adoption.rb index 25149e828..ed39a6bb6 100644 --- a/modules/mu/adoption.rb +++ b/modules/mu/adoption.rb @@ -227,7 +227,7 @@ def generateBaskets(prefix: "") MU.log "--diff was set but I failed to find a deploy like me to compare to", MU::ERR, details: origin exit 1 else - MU.log "Will diff against #{deploy.deploy_id}", MU::NOTICE, details: origin + MU.log "Will diff current live resources against #{deploy.deploy_id}", MU::NOTICE, details: origin end end @@ -337,7 +337,7 @@ def generateBaskets(prefix: "") exit 1 end newcfg = MU::Config.manxify(@boks[bok['appname']]) - report = prevcfg.diff(newcfg, print: true) + report = prevcfg.diff(newcfg) if MU.muCfg['adopt_change_notify'] notifyChanges(deploy, report) end @@ -348,42 +348,111 @@ def generateBaskets(prefix: "") private - def notifyChanges(deploy, report) - if MU.muCfg['adopt_change_notify']['slack'] - ["added", "removed", "changed"].each { |c| - next if !report[c] or report[c].empty? - report[c].each { |item| - shortclass, _cfg_name, _cfg_plural, _classname = MU::Cloud.getResourceNames(item["parents"].first, false) + # @param tier [Hash] + # @param parent_key [String] + # @param formatting [String]: Should be one of +console+, +email+, or +slack+ + def crawlChangeReport(tier, parent_key = nil, formatting: "console") + report = [] + if tier.is_a?(Array) + tier.each { |a| + sub_report = crawlChangeReport(a, parent_key) + report.concat(sub_report) if sub_report and !sub_report.empty? + } + elsif tier.is_a?(Hash) + if tier[:action] + preposition = if tier[:action] == :added + "to" + elsif tier[:action] == :removed + "from" + else + "in" + end - noun = if shortclass - shortclass - else - "Resource" + loc = "" + type_of = parent_key.sub(/s$/, '') if parent_key + + name = if tier[:value] and tier[:value].is_a?(Hash) + MU::MommaCat.getChunkName(tier[:value], type_of) + elsif parent_key + parent_key + end + + + if tier[:value] and tier[:value].is_a?(Hash) + if tier[:value]['project'] + loc = " #{preposition} \*"+tier[:value]['project']+"\*" + elsif tier[:value]['habitat'] and tier[:value]['habitat']['id'] + loc = " #{preposition} \*"+tier[:value]['habitat']['id']+"\*" end + end - verb = if item['parents'].size > 1 - "modified" + text = "`"+(name ? name : type_of)+"`" + text ||= "" + text += " was #{tier[:action]}#{loc}" + + + if tier[:parents] and tier[:parents].size > 2 + path = tier[:parents].clone + path.shift + path.shift + text += " under `"+path.join("/")+"`" + end + text += "." + + if tier[:value] and (tier[:value].is_a?(Array) or tier[:value].is_a?(Hash)) + if tier[:value].is_a?(Hash) and (tier[:value].keys - ["id", "name", "type"]).size > 0 + report << { "text" => text, "details" => tier[:value] } else - c + report << { "text" => text } end + else + tier[:value] ||= "" + report << { "text" => text+" New #{tier[:field] ? "`"+tier[:field]+"`" : :value}: \*#{tier[:value]}\*" } + end + else + tier.each_pair { |k, v| + next if !(v.is_a?(Hash) or v.is_a?(Array)) + sub_report = crawlChangeReport(v, k) + report.concat(sub_report) if sub_report and !sub_report.empty? + } + end + end + + report + end + - text = if item['value'].is_a?(Hash) and item['value']['name'] - "#{noun} \*#{item['value']['name']}\* was \*#{verb}\*." + def notifyChanges(deploy, report) +exit + if MU.muCfg['adopt_change_notify']['slack'] + snippet_threshold = MU.muCfg['adopt_change_notify']['slack_snippet_threshold'] || 5 + report.each_pair { |res_type, resources| + shortclass, _cfg_name, _cfg_plural, _classname = MU::Cloud.getResourceNames(res_type, false) + noun = shortclass ? shortclass.to_s : res_type.capitalize + + resources.each_pair { |name, data| + verb = if data[:action] + data[:action] else - "A #{noun} was #{verb}." + "modified" end - item['parents'].shift if shortclass - if !item["parents"].empty? - text += " Path:\n\n`"+item["parents"].join(" => ")+"`" - end + slacktext = "#{noun} \*#{name}\* was #{verb}" + snippets = [] - if item['value'] and (item['value'].is_a?(Array) or item['value'].is_a?(Hash)) - deploy.sendAdminSlack(text, scrub_mu_isms: MU.muCfg['adopt_scrub_mu_isms'], msg: PP.pp(item['value'], '')) - MU.log text, MU::NOTICE, details: item['value'] + if [:added, :removed].include?(data[:action]) and data[:value] + snippets << { text: "```"+JSON.pretty_generate(data[:value])+"```" } else - deploy.sendAdminSlack(text, scrub_mu_isms: MU.muCfg['adopt_scrub_mu_isms']) - MU.log text + changes = crawlChangeReport(data) + changes.each { |c| + slacktext += "\n • "+c["text"] + if c["details"] + details = JSON.pretty_generate(c["details"]) + snippets << { text: "```"+JSON.pretty_generate(c["details"])+"```" } + end + } + + deploy.sendAdminSlack(slacktext, scrub_mu_isms: MU.muCfg['adopt_scrub_mu_isms'], snippets: snippets) end } } diff --git a/modules/mu/mommacat.rb b/modules/mu/mommacat.rb index 7c072f5a0..04edc3d65 100644 --- a/modules/mu/mommacat.rb +++ b/modules/mu/mommacat.rb @@ -611,23 +611,37 @@ def notify(type, key, data, mu_name: nil, remove: false, triggering_node: nil, d # @param subject [String]: The subject line of the message. # @param msg [String]: The message body. # @return [void] - def sendAdminSlack(subject, msg: "", scrub_mu_isms: true) + def sendAdminSlack(subject, msg: "", scrub_mu_isms: true, snippets: [], noop: false) if MU.muCfg['slack'] and MU.muCfg['slack']['webhook'] and (!MU.muCfg['slack']['skip_environments'] or !MU.muCfg['slack']['skip_environments'].any?{ |s| s.casecmp(MU.environment)==0 }) require 'slack-notifier' + slackargs = nil + keyword_args = { channel: MU.muCfg['slack']['channel'] } begin slack = Slack::Notifier.new MU.muCfg['slack']['webhook'] prefix = scrub_mu_isms ? subject : "#{MU.appname} \*\"#{MU.handle}\"\* (`#{MU.deploy_id}`) - #{subject}" - if msg and !msg.empty? - slack.ping "#{prefix}:\n\n```#{msg}```", channel: MU.muCfg['slack']['channel'] + text = if msg and !msg.empty? + "#{prefix}:\n\n```#{msg}```" else - slack.ping prefix, channel: MU.muCfg['slack']['channel'] + prefix + end + + if snippets and snippets.size > 0 + keyword_args[:attachments] = snippets + end + + if !noop + slack.ping(text, **keyword_args) + else + MU.log "Would send to #{MU.muCfg['slack']['channel']}", MU::NOTICE, details: [ text, keyword_args ] end rescue Slack::Notifier::APIError => e - MU.log "Failed to send message to slack: #{e.message}", MU::ERR, details: MU.muCfg['slack'] + MU.log "Failed to send message to slack: #{e.message}", MU::ERR, details: keyword_args + return false end end + true end # Send an email notification to a deployment's administrators. diff --git a/modules/mu/mommacat/naming.rb b/modules/mu/mommacat/naming.rb index edb3c4045..9b163d55e 100644 --- a/modules/mu/mommacat/naming.rb +++ b/modules/mu/mommacat/naming.rb @@ -47,6 +47,41 @@ def self.guessName(desc, resourceclass, cloud_id: nil, tag_value: nil) end + # Given a piece of a BoK resource descriptor Hash, come up with a shorthand + # string to give it a name for human readers. If nothing reasonable can be + # extracted, returns nil. + # @param obj [Hash] + # @param array_of [String] + # @return [String,nil] + def self.getChunkName(obj, array_of = nil) + return nil if obj.nil? + if [String, Integer, Boolean].include?(obj.class) + return obj + end + obj_type = array_of || obj['type'] + obj_name = obj['name'] || obj['id'] || obj['mu_name'] || obj['cloud_id'] + if obj_name + if obj_type + "#{obj_type}[#{obj_name}]" + else + obj_name + end + else + found_it = nil + ["entity", "role"].each { |subtype| + if obj[subtype] and obj[subtype].is_a?(Hash) + found_it = if obj[subtype]["id"] + obj[subtype]['id'] + elsif obj[subtype]["type"] and obj[subtype]["name"] + "#{obj[subtype]['type']}[#{obj[subtype]['name']}]" + end + break + end + } + found_it + end + end + # Generate a three-character string which can be used to unique-ify the # names of resources which might potentially collide, e.g. Windows local # hostnames, Amazon Elastic Load Balancers, or server pool instances. diff --git a/modules/mu/providers/google/user.rb b/modules/mu/providers/google/user.rb index 91e1fdc88..8d22f4eda 100644 --- a/modules/mu/providers/google/user.rb +++ b/modules/mu/providers/google/user.rb @@ -58,7 +58,7 @@ def create account_id: acct_id, service_account: MU::Cloud::Google.iam(:ServiceAccount).new( display_name: @mu_name, - description: @config['scrub_mu_isms'] ? nil : @deploy.deploy_id + description: @config['scrub_mu_isms'] ? @config['description'] : @deploy.deploy_id ) ) if @config['use_if_exists'] @@ -429,6 +429,10 @@ def toKitten(**_args) if bok['type'] == "service" bok['name'].gsub!(/@.*/, '') + if cloud_desc.description and !cloud_desc.description.empty? and + !cloud_desc.description.match(/^[A-Z0-9_-]+-[A-Z0-9_-]+-\d{10}-[A-Z]{2}$/) + bok['description'] = cloud_desc.description + end bok['project'] = @project_id keys = MU::Cloud::Google.iam(credentials: @config['credentials']).list_project_service_account_keys(@cloud_id) @@ -501,6 +505,10 @@ def self.schema(_config) "type" => "string", "description" => "Alias for +family_name+" }, + "description" => { + "type" => "string", + "description" => "Comment field for service accounts, which we normally use to store the originating deploy's deploy id, since GCP service accounts do not have labels. This field is only honored if +scrub_mu_isms+ is set." + }, "email" => { "type" => "string", "description" => "Canonical email address for a +directory+ user. If not specified, will be set to +name@domain+." From 1bdf277743805307e8b9e58ee6b9557f58837069 Mon Sep 17 00:00:00 2001 From: John Stange Date: Sun, 19 Apr 2020 23:37:07 -0400 Subject: [PATCH 082/124] MommaCat: ok ok actually save! when updating BoKs in stored deploys --- modules/mu/mommacat/storage.rb | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/modules/mu/mommacat/storage.rb b/modules/mu/mommacat/storage.rb index 58270c5a4..30dc4137f 100644 --- a/modules/mu/mommacat/storage.rb +++ b/modules/mu/mommacat/storage.rb @@ -94,7 +94,6 @@ def self.locks; def updateBasketofKittens(new_conf) loadDeploy if new_conf == @original_config - MU.log "#{@deploy_id}", MU::WARN return end @@ -106,8 +105,8 @@ def updateBasketofKittens(new_conf) config.flock(File::LOCK_UN) config.close - @original_config = new_conf -# save! # XXX this will happen later, more sensibly + @original_config = new_conf.clone + save! # XXX this will happen later, more sensibly MU.log "New config saved to #{deploy_dir}/basket_of_kittens.json" end From adc8d7b4a3401ea8bf6314bb2ad4f0d66ba19b1f Mon Sep 17 00:00:00 2001 From: John Stange Date: Mon, 20 Apr 2020 11:33:36 -0400 Subject: [PATCH 083/124] pipeline: use gitlab's docker runnnners instead of ours perhaps --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index cc3488184..026e6aa07 100755 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -161,7 +161,7 @@ Docker Build: variables: - $IMAGE_BUILD tags: - - docker-build + - docker retry: 2 Parser Test With Gem: From 70051b37e85456361cfd29e4f335a5ab35fc6a30 Mon Sep 17 00:00:00 2001 From: John Stange Date: Tue, 21 Apr 2020 00:54:04 -0400 Subject: [PATCH 084/124] Adoption: finally a quasi-satisfactory state for diff notifications --- bin/mu-adopt | 4 +- bin/mu-deploy | 2 +- modules/mu.rb | 16 ++- modules/mu/adoption.rb | 184 +++++++++++++++++++--------- modules/mu/cloud/resource_base.rb | 4 + modules/mu/mommacat.rb | 27 ++-- modules/mu/mommacat/naming.rb | 40 ++++-- modules/mu/mommacat/storage.rb | 14 ++- modules/mu/providers/google/role.rb | 8 +- modules/mu/providers/google/user.rb | 9 +- 10 files changed, 218 insertions(+), 90 deletions(-) diff --git a/bin/mu-adopt b/bin/mu-adopt index 4cd6330c3..53bff4200 100755 --- a/bin/mu-adopt +++ b/bin/mu-adopt @@ -40,6 +40,7 @@ $opt = Optimist::options do opt :credentials, "Override the 'credentials' value in our generated Baskets of Kittens to target a single, specific account. Our default behavior is to set each resource to deploy into the account from which it was sourced.", :required => false, :type => :string opt :savedeploys, "Generate actual deployment metadata in #{MU.dataDir}/deployments, as though the resources we found were created with mu-deploy. If we are generating more than one configuration, and a resource needs to reference another resource (e.g. to declare a VPC in which to reside), this will allow us to reference them as virtual resource, rather than by raw cloud identifier.", :required => false, :type => :boolean, :default => false opt :diff, "List the differences between what we find and an existing, saved deploy from a previous run, if one exists.", :required => false, :type => :boolean + opt :merge_changes, "When using --diff, merge detected changes into the baseline deploy after reporting on them.", :required => false, :type => :boolean, :default => false opt :grouping, "Methods for grouping found resources into separate Baskets.\n\n"+MU::Adoption::GROUPMODES.keys.map { |g| "* "+g.to_s+": "+MU::Adoption::GROUPMODES[g] }.join("\n")+"\n\n", :required => false, :type => :string, :default => "logical" opt :habitats, "Limit scope of searches to the named accounts/projects/subscriptions, instead of search all habitats visible to our credentials.", :required => false, :type => :strings opt :regions, "Restrict to operating on a subset of available regions, instead of all that we know about.", :require => false, :type => :strings @@ -97,8 +98,7 @@ if !ok exit 1 end - -adoption = MU::Adoption.new(clouds: clouds, types: types, parent: $opt[:parent], billing: $opt[:billing], sources: $opt[:sources], credentials: $opt[:credentials], group_by: $opt[:grouping].to_sym, savedeploys: $opt[:savedeploys], diff: $opt[:diff], habitats: $opt[:habitats], scrub_mu_isms: $opt[:scrub], regions: $opt[:regions]) +adoption = MU::Adoption.new(clouds: clouds, types: types, parent: $opt[:parent], billing: $opt[:billing], sources: $opt[:sources], credentials: $opt[:credentials], group_by: $opt[:grouping].to_sym, savedeploys: $opt[:savedeploys], diff: $opt[:diff], habitats: $opt[:habitats], scrub_mu_isms: $opt[:scrub], regions: $opt[:regions], merge: $opt[:merge_changes]) found = adoption.scrapeClouds if found.nil? or found.empty? MU.log "No resources found to adopt", MU::WARN, details: {"clouds" => clouds, "types" => types } diff --git a/bin/mu-deploy b/bin/mu-deploy index 67889c459..0a45e1f1f 100755 --- a/bin/mu-deploy +++ b/bin/mu-deploy @@ -135,7 +135,7 @@ if $opts[:update] # TODO consider whether this is useful/valid # old_conf = JSON.parse(File.read(deploy.deploy_dir+"/basket_of_kittens.json")) # stack_conf = old_conf.merge(stack_conf) - deploy.updateBasketofKittens(stack_conf) + deploy.updateBasketofKittens(stack_conf, skip_validation: true) deployer = MU::Deploy.new( deploy.environment, verbosity: verbosity, diff --git a/modules/mu.rb b/modules/mu.rb index ee4c2f442..2f1e37e1f 100644 --- a/modules/mu.rb +++ b/modules/mu.rb @@ -89,6 +89,10 @@ def diff(with, on = self, level: 0, parents: [], report: {}) return # XXX ...however we're flagging differences end return if on == with +if parents.include?("group") or parents.include?("groups") and on.is_a?(Array) and on.first.is_a?(Hash) + MU.log "this fecker under #{parents.join(" => ")}", MU::WARN, details: on.map { |o| o['name'] }.reject! { |r| !["gcp-cto", "gcp-sys-ops"].include?(r) } + MU.log "vs", MU::WARN, details: with.map { |o| o['name'] }.reject! { |r| !["gcp-cto", "gcp-sys-ops"].include?(r) } +end changes = [] if on.is_a?(Hash) @@ -118,13 +122,13 @@ def diff(with, on = self, level: 0, parents: [], report: {}) # sorting arrays full of weird, non-primitive types. done = [] on.sort.each { |elt| - if elt.is_a?(Hash) and MU::MommaCat.getChunkName(elt) - elt_namestr = MU::MommaCat.getChunkName(elt) + if elt.is_a?(Hash) and !MU::MommaCat.getChunkName(elt).first.nil? + elt_namestr, elt_location = MU::MommaCat.getChunkName(elt) with.sort.each { |other_elt| - other_elt_namestr = MU::MommaCat.getChunkName(other_elt) + other_elt_namestr, other_elt_location = MU::MommaCat.getChunkName(other_elt) # Case 1: The array element exists in both version of this array - if elt_namestr and other_elt_namestr and elt_namestr == other_elt_namestr + if elt_namestr and other_elt_namestr and elt_namestr == other_elt_namestr and (elt_location.nil? or other_elt_location.nil? or elt_location == other_elt_location) done << elt done << other_elt break if elt == other_elt # if they're identical, we're done @@ -143,7 +147,7 @@ def diff(with, on = self, level: 0, parents: [], report: {}) # Case 2: This array entry exists in the old version, but not the new one on_unique.each { |e| - namestr = MU::MommaCat.getChunkName(e) + namestr = MU::MommaCat.getChunkName(e).first report ||= {} if e.is_a?(Hash) @@ -155,7 +159,7 @@ def diff(with, on = self, level: 0, parents: [], report: {}) # Case 3: This array entry exists in the new version, but not the old one with_unique.each { |e| - namestr = MU::MommaCat.getChunkName(e) + namestr = MU::MommaCat.getChunkName(e).first report ||= {} if e.is_a?(Hash) diff --git a/modules/mu/adoption.rb b/modules/mu/adoption.rb index ed39a6bb6..5a5060c5c 100644 --- a/modules/mu/adoption.rb +++ b/modules/mu/adoption.rb @@ -30,7 +30,7 @@ class Incomplete < MU::MuNonFatal; end :omnibus => "Jam everything into one monolothic configuration" } - def initialize(clouds: MU::Cloud.supportedClouds, types: MU::Cloud.resource_types.keys, parent: nil, billing: nil, sources: nil, credentials: nil, group_by: :logical, savedeploys: false, diff: false, habitats: [], scrub_mu_isms: false, regions: []) + def initialize(clouds: MU::Cloud.supportedClouds, types: MU::Cloud.resource_types.keys, parent: nil, billing: nil, sources: nil, credentials: nil, group_by: :logical, savedeploys: false, diff: false, habitats: [], scrub_mu_isms: false, regions: [], merge: false) @scraped = {} @clouds = clouds @types = types @@ -47,6 +47,7 @@ def initialize(clouds: MU::Cloud.supportedClouds, types: MU::Cloud.resource_type @regions = regions @habitats ||= [] @scrub_mu_isms = scrub_mu_isms + @merge = merge end # Walk cloud providers with available credentials to discover resources @@ -125,6 +126,10 @@ def scrapeClouds() next end # XXX apply any filters (e.g. MU-ID tags) + if obj.cloud_id.nil? + MU.log "This damn thing gave me no cloud id, what do I even do with that", MU::ERR, details: obj + exit + end @scraped[type][obj.cloud_id] = obj } end @@ -266,6 +271,9 @@ def generateBaskets(prefix: "") kitten_cfg.delete("credentials") if @target_creds class_semaphore.synchronize { bok[res_class.cfg_plural] << kitten_cfg + if !kitten_cfg['cloud_id'] + MU.log "No cloud id in this #{res_class.cfg_name} kitten!", MU::ERR, details: kitten_cfg + end } count += 1 end @@ -331,7 +339,8 @@ def generateBaskets(prefix: "") end if deploy and @diff - prevcfg = MU::Config.manxify(vacuum(deploy.original_config, deploy: deploy)) + prev_vacuumed = vacuum(deploy.original_config, deploy: deploy) + prevcfg = MU::Config.manxify(prev_vacuumed) if !prevcfg MU.log "#{deploy.deploy_id} didn't have a working original config for me to compare", MU::ERR exit 1 @@ -341,6 +350,10 @@ def generateBaskets(prefix: "") if MU.muCfg['adopt_change_notify'] notifyChanges(deploy, report) end + if @merge + MU.log "Saving changes to #{deploy.deploy_id}" + deploy.updateBasketofKittens(newcfg) + end end } @boks @@ -350,8 +363,7 @@ def generateBaskets(prefix: "") # @param tier [Hash] # @param parent_key [String] - # @param formatting [String]: Should be one of +console+, +email+, or +slack+ - def crawlChangeReport(tier, parent_key = nil, formatting: "console") + def crawlChangeReport(tier, parent_key = nil, indent: "") report = [] if tier.is_a?(Array) tier.each { |a| @@ -368,54 +380,86 @@ def crawlChangeReport(tier, parent_key = nil, formatting: "console") "in" end - loc = "" - type_of = parent_key.sub(/s$/, '') if parent_key - - name = if tier[:value] and tier[:value].is_a?(Hash) - MU::MommaCat.getChunkName(tier[:value], type_of) - elsif parent_key - parent_key - end - + loc = name = "" + type_of = parent_key.sub(/s$|\[.*/, '') if parent_key if tier[:value] and tier[:value].is_a?(Hash) - if tier[:value]['project'] - loc = " #{preposition} \*"+tier[:value]['project']+"\*" - elsif tier[:value]['habitat'] and tier[:value]['habitat']['id'] - loc = " #{preposition} \*"+tier[:value]['habitat']['id']+"\*" - end + name, loc = MU::MommaCat.getChunkName(tier[:value], type_of) + elsif parent_key + name = parent_key end - text = "`"+(name ? name : type_of)+"`" - text ||= "" - text += " was #{tier[:action]}#{loc}" - - + path_str = [] + slack_path_str = "" if tier[:parents] and tier[:parents].size > 2 path = tier[:parents].clone path.shift path.shift - text += " under `"+path.join("/")+"`" + for c in (0..(path.size-1)) do + path_str << (" " * (c+2)) + (path[c] || "") + end + slack_path_str += " under `"+path.join("/")+"`" + end + path_str << "" if !path_str.empty? + + plain = (name ? name : type_of) if name or type_of + plain ||= "" # XXX but this is a problem + plain += " ("+loc+")" if loc and !loc.empty? + + slack = "`"+plain+"`" + slack += " was #{tier[:action]} #{preposition} \*#{loc}\*" if loc and !loc.empty? + + if tier[:action] == :added + color = "+ ".green + plain + plain = "+ " + plain + elsif tier[:action] == :removed + color = "- ".red + plain + plain = "- " + plain end - text += "." + plain = path_str.join(" => \n") + indent + plain + color = path_str.join(" => \n") + indent + color + + slack += slack_path_str+"." + myreport = { + "slack" => slack, + "plain" => plain, + "color" => color + } + append = "" if tier[:value] and (tier[:value].is_a?(Array) or tier[:value].is_a?(Hash)) - if tier[:value].is_a?(Hash) and (tier[:value].keys - ["id", "name", "type"]).size > 0 - report << { "text" => text, "details" => tier[:value] } + if tier[:value].is_a?(Hash) + if name + tier[:value].delete("entity") + tier[:value].delete(name.sub(/\[.*/, '')) if name + end + if (tier[:value].keys - ["id", "name", "type"]).size > 0 + myreport["details"] = tier[:value].clone + append = PP.pp(tier[:value], '').gsub(/(^|\n)/, '\1'+indent) + end else - report << { "text" => text } + append = indent+"["+tier[:value].map { |v| MU::MommaCat.getChunkName(v, type_of).reverse.join("/") || v.to_s.light_blue }.join(", ")+"]" end else tier[:value] ||= "" - report << { "text" => text+" New #{tier[:field] ? "`"+tier[:field]+"`" : :value}: \*#{tier[:value]}\*" } + myreport["slack"] = slack+" New #{tier[:field] ? "`"+tier[:field]+"`" : :value}: \*#{tier[:value]}\*" + append = tier[:value].to_s.bold end - else - tier.each_pair { |k, v| - next if !(v.is_a?(Hash) or v.is_a?(Array)) - sub_report = crawlChangeReport(v, k) - report.concat(sub_report) if sub_report and !sub_report.empty? - } + if append and !append.empty? + myreport["plain"] += " =>\n "+indent+append + myreport["color"] += " =>\n "+indent+append + end + + report << myreport if tier[:action] end + + # Just because we've got changes at this level doesn't mean there aren't + # more further down. + tier.each_pair { |k, v| + next if !(v.is_a?(Hash) or v.is_a?(Array)) + sub_report = crawlChangeReport(v, k, indent: indent+" ") + report.concat(sub_report) if sub_report and !sub_report.empty? + } end report @@ -423,40 +467,58 @@ def crawlChangeReport(tier, parent_key = nil, formatting: "console") def notifyChanges(deploy, report) -exit - if MU.muCfg['adopt_change_notify']['slack'] - snippet_threshold = MU.muCfg['adopt_change_notify']['slack_snippet_threshold'] || 5 - report.each_pair { |res_type, resources| - shortclass, _cfg_name, _cfg_plural, _classname = MU::Cloud.getResourceNames(res_type, false) - noun = shortclass ? shortclass.to_s : res_type.capitalize - - resources.each_pair { |name, data| - verb = if data[:action] - data[:action] + snippet_threshold = (MU.muCfg['adopt_change_notify'] && MU.muCfg['adopt_change_notify']['slack_snippet_threshold']) || 5 + + report.each_pair { |res_type, resources| + shortclass, _cfg_name, _cfg_plural, _classname = MU::Cloud.getResourceNames(res_type, false) + next if !shortclass # we don't really care about Mu metadata changes + resources.each_pair { |name, data| + if MU::MommaCat.getChunkName(data[:value], res_type).first.nil? + symbol = if data[:action] == :added + "+".green + elsif data[:action] == :removed + "-".red else - "modified" + "~".yellow end + puts (symbol+" "+res_type+"["+name+"]") + end - slacktext = "#{noun} \*#{name}\* was #{verb}" - snippets = [] + noun = shortclass ? shortclass.to_s : res_type.capitalize + verb = if data[:action] + data[:action].to_s + else + "modified" + end - if [:added, :removed].include?(data[:action]) and data[:value] - snippets << { text: "```"+JSON.pretty_generate(data[:value])+"```" } - else - changes = crawlChangeReport(data) + changes = crawlChangeReport(data, res_type) + + slacktext = "#{noun} \*#{name}\* was #{verb}" + snippets = [] + + if [:added, :removed].include?(data[:action]) and data[:value] + snippets << { text: "```"+JSON.pretty_generate(data[:value])+"```" } + else changes.each { |c| - slacktext += "\n • "+c["text"] + slacktext += "\n • "+c["slack"] if c["details"] details = JSON.pretty_generate(c["details"]) snippets << { text: "```"+JSON.pretty_generate(c["details"])+"```" } end } + end - deploy.sendAdminSlack(slacktext, scrub_mu_isms: MU.muCfg['adopt_scrub_mu_isms'], snippets: snippets) - end + changes.each { |c| + puts c["color"] } + puts "" + + if MU.muCfg['adopt_change_notify']['slack'] + deploy.sendAdminSlack(slacktext, scrub_mu_isms: MU.muCfg['adopt_scrub_mu_isms'], snippets: snippets, noop: true) + end + } - end + } if MU.muCfg['adopt_change_notify']['email'] end @@ -542,7 +604,7 @@ def vacuum(bok, origin: nil, save: false, deploy: nil) raise Incomplete if obj.nil? if save deploydata = obj.notify - deploy.notify(attrs[:cfg_plural], resource['name'], deploydata, triggering_node: obj) + deploy.notify(attrs[:cfg_plural], resource['name'], deploydata, triggering_node: obj) # XXX make sure this doesn't force a save end new_cfg = resolveReferences(resource, deploy, obj) new_cfg.delete("cloud_id") @@ -603,7 +665,6 @@ def vacuum(bok, origin: nil, save: false, deploy: nil) scrubSchemaDefaults(bok, MU::Config.schema) if save - deploy.updateBasketofKittens(bok) MU.log "Committing adopted deployment to #{MU.dataDir}/deployments/#{deploy.deploy_id}", MU::NOTICE, details: origin deploy.save!(force: true, origin: origin) end @@ -756,6 +817,10 @@ def generateStubDeploy(bok) if !@scraped[typename][kitten['cloud_id']] MU.log "No object in scraped tree for #{attrs[:cfg_name]} #{kitten['cloud_id']} (#{kitten['name']})", MU::ERR, details: kitten + if kitten['cloud_id'].nil? + pp caller + exit + end next end @@ -766,7 +831,8 @@ def generateStubDeploy(bok) deploy.addKitten( attrs[:cfg_plural], kitten['name'], - @scraped[typename][kitten['cloud_id']] + @scraped[typename][kitten['cloud_id']], + do_notify: true ) } end diff --git a/modules/mu/cloud/resource_base.rb b/modules/mu/cloud/resource_base.rb index b1d32baf8..492c58fb6 100644 --- a/modules/mu/cloud/resource_base.rb +++ b/modules/mu/cloud/resource_base.rb @@ -447,6 +447,7 @@ def describe(cloud_id: nil) res_name = @config['name'] if !@config.nil? @credentials ||= @config['credentials'] if !@config.nil? deploydata = nil + if !@deploy.nil? and @deploy.is_a?(MU::MommaCat) and !@deploy.deployment.nil? and !@deploy.deployment[res_type].nil? and @@ -884,6 +885,9 @@ def resourceInitHook deploydata.delete("#MUOBJECT") @deploy.notify(self.class.cfg_plural, @config['name'], deploydata, triggering_node: @cloudobj, delayed_save: @delayed_save) if !@deploy.nil? elsif method == :notify + if retval.nil? + MU.log self.to_s+" didn't return any metadata from notify", MU::WARN, details: @cloudobj.cloud_desc + end retval['cloud_id'] = @cloudobj.cloud_id.to_s if !@cloudobj.cloud_id.nil? retval['mu_name'] = @cloudobj.mu_name if !@cloudobj.mu_name.nil? @deploy.notify(self.class.cfg_plural, @config['name'], retval, triggering_node: @cloudobj, delayed_save: @delayed_save) if !@deploy.nil? diff --git a/modules/mu/mommacat.rb b/modules/mu/mommacat.rb index 04edc3d65..7344e77e8 100644 --- a/modules/mu/mommacat.rb +++ b/modules/mu/mommacat.rb @@ -167,6 +167,7 @@ def initialize(deploy_id, @need_deploy_flush = false @node_cert_semaphore = Mutex.new @deployment = deployment_data + @deployment['mu_public_ip'] = MU.mu_public_ip @private_key = nil @public_key = nil @@ -398,7 +399,7 @@ def removeKitten(object) # @param type [String]: # @param name [String]: # @param object [MU::Cloud]: - def addKitten(type, name, object) + def addKitten(type, name, object, do_notify: false) if !type or !name or !object or !object.mu_name raise MuError, "Nil arguments to addKitten are not allowed (got type: #{type}, name: #{name}, and '#{object}' to add)" end @@ -406,7 +407,7 @@ def addKitten(type, name, object) _shortclass, _cfg_name, type, _classname, attrs = MU::Cloud.getResourceNames(type) object.intoDeploy(self) - @kitten_semaphore.synchronize { + add_block = Proc.new { @kittens[type] ||= {} @kittens[type][object.habitat] ||= {} if attrs[:has_multiples] @@ -415,7 +416,20 @@ def addKitten(type, name, object) else @kittens[type][object.habitat][name] = object end + if do_notify + notify(type, name, object.notify, triggering_node: object, delayed_save: true) + end } + + begin + @kitten_semaphore.synchronize { + add_block.call() + } + rescue ThreadError => e + # already locked by a parent call to this method, so this should be safe + raise e if !e.message.match(/recursive locking/) + add_block.call() + end end # Encrypt a string with the deployment's public key. @@ -533,10 +547,9 @@ def SSHKey # @param remove [Boolean]: Remove this resource from the deploy structure, instead of adding it. # @return [void] def notify(type, key, data, mu_name: nil, remove: false, triggering_node: nil, delayed_save: false) - return if @no_artifacts begin - MU::MommaCat.lock("deployment-notification") + MU::MommaCat.lock("deployment-notification") if !@no_artifacts if !@need_deploy_flush or @deployment.nil? or @deployment.empty? loadDeploy(true) # make sure we're saving the latest and greatest @@ -575,7 +588,7 @@ def notify(type, key, data, mu_name: nil, remove: false, triggering_node: nil, d @deployment[type][key] = data MU.log "Adding to @deployment[#{type}][#{key}]", MU::DEBUG, details: data end - save!(key) if !delayed_save + save!(key) if !delayed_save and !@no_artifacts else have_deploy = true if @deployment[type].nil? or @deployment[type][key].nil? @@ -600,10 +613,10 @@ def notify(type, key, data, mu_name: nil, remove: false, triggering_node: nil, d end } end - save! if !delayed_save + save! if !delayed_save and !@no_artifacts end ensure - MU::MommaCat.unlock("deployment-notification") + MU::MommaCat.unlock("deployment-notification") if !@no_artifacts end end diff --git a/modules/mu/mommacat/naming.rb b/modules/mu/mommacat/naming.rb index 9b163d55e..bed81cce8 100644 --- a/modules/mu/mommacat/naming.rb +++ b/modules/mu/mommacat/naming.rb @@ -47,31 +47,33 @@ def self.guessName(desc, resourceclass, cloud_id: nil, tag_value: nil) end - # Given a piece of a BoK resource descriptor Hash, come up with a shorthand - # string to give it a name for human readers. If nothing reasonable can be + # Given a piece of a BoK resource descriptor Hash, come up with shorthand + # strings to give it a name for human readers. If nothing reasonable can be # extracted, returns nil. # @param obj [Hash] # @param array_of [String] - # @return [String,nil] + # @return [Array] def self.getChunkName(obj, array_of = nil) - return nil if obj.nil? + return [nil, nil] if obj.nil? if [String, Integer, Boolean].include?(obj.class) - return obj + return [obj, nil] end obj_type = array_of || obj['type'] obj_name = obj['name'] || obj['id'] || obj['mu_name'] || obj['cloud_id'] - if obj_name + + name_string = if obj_name if obj_type "#{obj_type}[#{obj_name}]" else - obj_name + obj_name.dup end else found_it = nil + using = nil ["entity", "role"].each { |subtype| if obj[subtype] and obj[subtype].is_a?(Hash) found_it = if obj[subtype]["id"] - obj[subtype]['id'] + obj[subtype]['id'].dup elsif obj[subtype]["type"] and obj[subtype]["name"] "#{obj[subtype]['type']}[#{obj[subtype]['name']}]" end @@ -80,6 +82,28 @@ def self.getChunkName(obj, array_of = nil) } found_it end + name_string.gsub!(/\[.+?\](\[.+?\]$)/, '\1') if name_string # source is frozen so we can't just do gsub! + + location = if obj['project'] + obj['project'] + elsif obj['habitat'] and (obj['habitat']['id'] or obj['habitat']['name']) + obj['habitat']['name'] || obj['habitat']['id'] + else + hab_str = nil + ['projects', 'habitats'].each { |key| + + if obj[key] and obj[key].is_a?(Array) + hab_str = obj[key].map { |p| + (p["name"] || p["id"]).gsub(/^.*?[^\/]+\/([^\/]+)$/, '\1') + }.join(", ") + name_string.gsub!(/^.*?[^\/]+\/([^\/]+)$/, '\1') if name_string + break + end + } + hab_str + end + + [name_string, location] end # Generate a three-character string which can be used to unique-ify the diff --git a/modules/mu/mommacat/storage.rb b/modules/mu/mommacat/storage.rb index 30dc4137f..65f94ab2e 100644 --- a/modules/mu/mommacat/storage.rb +++ b/modules/mu/mommacat/storage.rb @@ -91,12 +91,22 @@ def self.locks; # Overwrite this deployment's configuration with a new version. Save the # previous version as well. # @param new_conf [Hash]: A new configuration, fully resolved by {MU::Config} - def updateBasketofKittens(new_conf) + def updateBasketofKittens(new_conf, skip_validation: false) loadDeploy if new_conf == @original_config return end + # Make sure the new config that we were just handed resolves and makes + # sense + if !skip_validation + f = Tempfile.new(@deploy_id) + f.write JSON.parse(JSON.generate(new_conf)).to_yaml + conf_engine = MU::Config.new(f.path) + f.close + new_conf = conf_engine.config + end + backup = "#{deploy_dir}/basket_of_kittens.json.#{Time.now.to_i.to_s}" MU.log "Saving previous config of #{@deploy_id} to #{backup}" config = File.new(backup, File::CREAT|File::TRUNC|File::RDWR, 0600) @@ -106,7 +116,7 @@ def updateBasketofKittens(new_conf) config.close @original_config = new_conf.clone - save! # XXX this will happen later, more sensibly +# save! # XXX this will happen later, more sensibly MU.log "New config saved to #{deploy_dir}/basket_of_kittens.json" end diff --git a/modules/mu/providers/google/role.rb b/modules/mu/providers/google/role.rb index 3d024f5c2..41f069ee7 100644 --- a/modules/mu/providers/google/role.rb +++ b/modules/mu/providers/google/role.rb @@ -594,7 +594,7 @@ def self.find(**args) role = MU::Cloud::Google.iam(credentials: args[:credentials]).get_role(r) found[role.name] = role elsif !found[r] - MU.log "NEED TO GET #{r}", MU::WARN +# MU.log "NEED TO GET #{r}", MU::WARN end } end @@ -688,7 +688,7 @@ def toKitten(**args) ids, _names, _privs = MU::Cloud::Google::Role.privilege_service_to_name(@config['credentials']) cloud_desc.role_privileges.each { |priv| if !ids[priv.service_id] - MU.log "Role privilege defined for a service id with no name I can find, writing with raw id", MU::WARN, details: priv + MU.log "Role privilege defined for a service id with no name I can find, writing with raw id", MU::DEBUG, details: priv bok["import"] << priv.service_id+"/"+priv.privilege_name else bok["import"] << ids[priv.service_id]+"/"+priv.privilege_name @@ -900,7 +900,7 @@ def self.insertBinding(scopetype, scope, binding = nil, member_type: nil, member end role = MU::Cloud::Google.admin_directory(credentials: credentials).get_role(MU::Cloud::Google.customerID(credentials), binding.role_id) - MU.log "Failed to find entity #{binding.assigned_to} referenced in GSuite/Cloud Identity binding to role #{role.role_name}", MU::WARN, details: role + MU.log "Failed to find entity #{binding.assigned_to} referenced in GSuite/Cloud Identity binding to role #{role.role_name}", MU::DEBUG, details: role } resp = MU::Cloud::Google.resource_manager(credentials: credentials).get_organization_iam_policy(my_org.name) @@ -914,7 +914,7 @@ def self.insertBinding(scopetype, scope, binding = nil, member_type: nil, member } } end - MU::Cloud.resourceClass("Google", "Habitat").find(credentials: credentials).keys.each { |project| + MU::Cloud::Google.listHabitats(credentials).each { |project| begin MU::Cloud.resourceClass("Google", "Habitat").bindings(project, credentials: credentials).each { |binding| insertBinding("projects", project, binding) diff --git a/modules/mu/providers/google/user.rb b/modules/mu/providers/google/user.rb index 8d22f4eda..69dfc733a 100644 --- a/modules/mu/providers/google/user.rb +++ b/modules/mu/providers/google/user.rb @@ -26,10 +26,12 @@ def initialize(**args) # If we're being reverse-engineered from a cloud descriptor, use that # to determine what sort of account we are. if args[:from_cloud_desc] + @cloud_desc_cache = args[:from_cloud_desc] MU::Cloud::Google.admin_directory MU::Cloud::Google.iam if args[:from_cloud_desc].class == ::Google::Apis::AdminDirectoryV1::User @config['type'] = "interactive" + @cloud_id = args[:from_cloud_desc].primary_email elsif args[:from_cloud_desc].class == ::Google::Apis::IamV1::ServiceAccount @config['type'] = "service" @config['name'] = args[:from_cloud_desc].display_name @@ -48,6 +50,10 @@ def initialize(**args) @config['name'] end + if @config['type'] == "interactive" and @config['email'] + @cloud_id ||= @config['email'] + end + end # Called automatically by {MU::Deploy#createResources} @@ -195,6 +201,7 @@ def cloud_desc(use_cache: true) if @config['type'] == "interactive" or !@config['type'] @config['type'] ||= "interactive" if !@config['external'] + @cloud_id ||= @config['email'] @cloud_desc_cache = MU::Cloud::Google.admin_directory(credentials: @config['credentials']).get_user(@cloud_id) else return nil @@ -226,7 +233,7 @@ def notify else {} end - description.delete(:etag) + description.delete(:etag) if description description end From 9dd502862b19a4c2e61742e10ea9ec6bb5240f78 Mon Sep 17 00:00:00 2001 From: John Stange Date: Tue, 21 Apr 2020 00:55:02 -0400 Subject: [PATCH 085/124] Google::Role: shush about Google-internal directory service ids with no mappings, they're routine --- modules/mu/providers/google/role.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/mu/providers/google/role.rb b/modules/mu/providers/google/role.rb index 41f069ee7..ce19b1c5d 100644 --- a/modules/mu/providers/google/role.rb +++ b/modules/mu/providers/google/role.rb @@ -1099,7 +1099,7 @@ def self.validateConfig(role, configurator) MU.log "None of the directory service privileges available to credentials #{role['credentials']} map to the ones declared for role #{role['name']}", MU::ERR, details: role['import'].sort ok = false elsif missing.size > 0 - MU.log "Some directory service privileges declared for role #{role['name']} aren't available to credentials #{role['credentials']}, will skip", MU::WARN, details: missing + MU.log "Some directory service privileges declared for role #{role['name']} aren't available to credentials #{role['credentials']}, will skip", MU::DEBUG, details: missing end end end From 8eb970e4411d7d3ad37702cc2f83d8b21347015d Mon Sep 17 00:00:00 2001 From: John Stange Date: Wed, 22 Apr 2020 15:36:29 -0400 Subject: [PATCH 086/124] Google::User.find: Correctly return only matching results when given a cloud_id; Adoption: quash a lot of scenarios where we duplicate data --- bin/mu-adopt | 3 --- modules/mu.rb | 22 +++++++++------------- modules/mu/adoption.rb | 29 ++++++++++++++++++----------- modules/mu/mommacat/storage.rb | 11 ++++++----- modules/mu/providers/google/role.rb | 5 +++++ modules/mu/providers/google/user.rb | 2 ++ 6 files changed, 40 insertions(+), 32 deletions(-) diff --git a/bin/mu-adopt b/bin/mu-adopt index 53bff4200..ea25983e2 100755 --- a/bin/mu-adopt +++ b/bin/mu-adopt @@ -112,10 +112,7 @@ boks.each_pair { |appname, bok| File.open("#{appname}.yaml", "w") { |f| f.write JSON.parse(JSON.generate(bok)).to_yaml } - conf_engine = MU::Config.new("#{appname}.yaml") - stack_conf = conf_engine.config # puts stack_conf.to_yaml - MU.log "#{appname}.yaml validated successfully", MU::NOTICE MU::Cloud.resource_types.each_pair { |type, cfg| if bok[cfg[:cfg_plural]] MU.log "#{bok[cfg[:cfg_plural]].size.to_s} #{cfg[:cfg_plural]}", MU::NOTICE diff --git a/modules/mu.rb b/modules/mu.rb index 2f1e37e1f..c32f06494 100644 --- a/modules/mu.rb +++ b/modules/mu.rb @@ -82,17 +82,13 @@ def <=>(other) 0 end - # Recursively compare two hashes + # Recursively compare two Mu Basket of Kittens hashes and report the differences def diff(with, on = self, level: 0, parents: [], report: {}) return if with.nil? and on.nil? if with.nil? or on.nil? or with.class != on.class return # XXX ...however we're flagging differences end return if on == with -if parents.include?("group") or parents.include?("groups") and on.is_a?(Array) and on.first.is_a?(Hash) - MU.log "this fecker under #{parents.join(" => ")}", MU::WARN, details: on.map { |o| o['name'] }.reject! { |r| !["gcp-cto", "gcp-sys-ops"].include?(r) } - MU.log "vs", MU::WARN, details: with.map { |o| o['name'] }.reject! { |r| !["gcp-cto", "gcp-sys-ops"].include?(r) } -end changes = [] if on.is_a?(Hash) @@ -107,10 +103,10 @@ def diff(with, on = self, level: 0, parents: [], report: {}) end } on_unique.each { |k| - report[k] = { :action => :removed, :parents => parents, :value => on[k] } + report[k] = { :action => :removed, :parents => parents, :value => on[k].clone } } with_unique.each { |k| - report[k] = { :action => :added, :parents => parents, :value => with[k] } + report[k] = { :action => :added, :parents => parents, :value => with[k].clone } } elsif on.is_a?(Array) return if with == on @@ -151,9 +147,9 @@ def diff(with, on = self, level: 0, parents: [], report: {}) report ||= {} if e.is_a?(Hash) - report[namestr] = { :action => :removed, :parents => parents, :value => e } + report[namestr] = { :action => :removed, :parents => parents, :value => e.clone } else - report[namestr] = { :action => :removed, :parents => parents, :value => e } + report[namestr] = { :action => :removed, :parents => parents, :value => e.clone } end } @@ -163,20 +159,20 @@ def diff(with, on = self, level: 0, parents: [], report: {}) report ||= {} if e.is_a?(Hash) - report[namestr] = { :action => :added, :parents => parents, :value => e } + report[namestr] = { :action => :added, :parents => parents, :value => e.clone } else - report[namestr] = { :action => :added, :parents => parents, :value => e } + report[namestr] = { :action => :added, :parents => parents, :value => e.clone } end } # A plain old leaf node of data else if on != with - report = { :action => :changed, :parents => parents, :oldvalue => on, :value => with } + report = { :action => :changed, :parents => parents, :oldvalue => on, :value => with.clone } end end - report + report.freeze end # Implement a merge! that just updates each hash leaf as needed, not diff --git a/modules/mu/adoption.rb b/modules/mu/adoption.rb index 5a5060c5c..314b0aae2 100644 --- a/modules/mu/adoption.rb +++ b/modules/mu/adoption.rb @@ -265,7 +265,7 @@ def generateBaskets(prefix: "") end threads << Thread.new(obj_thr) { |obj| - kitten_cfg = obj.toKitten(rootparent: @default_parent, billing: @billing, habitats: @habitats) + kitten_cfg = obj.toKitten(rootparent: @default_parent, billing: @billing, habitats: @habitats, types: @types) if kitten_cfg print "." kitten_cfg.delete("credentials") if @target_creds @@ -331,6 +331,7 @@ def generateBaskets(prefix: "") # Now walk through all of the Refs in these objects, resolve them, and minimize # their config footprint MU.log "Minimizing footprint of #{count.to_s} found resources", MU::DEBUG + @boks[bok['appname']] = vacuum(bok, origin: origin, save: @savedeploys) if @diff and !deploy @@ -346,14 +347,19 @@ def generateBaskets(prefix: "") exit 1 end newcfg = MU::Config.manxify(@boks[bok['appname']]) + report = prevcfg.diff(newcfg) - if MU.muCfg['adopt_change_notify'] - notifyChanges(deploy, report) - end - if @merge - MU.log "Saving changes to #{deploy.deploy_id}" - deploy.updateBasketofKittens(newcfg) + if report + + if MU.muCfg['adopt_change_notify'] + notifyChanges(deploy, report.freeze) + end + if @merge + MU.log "Saving changes to #{deploy.deploy_id}" + deploy.updateBasketofKittens(newcfg, save_now: true) + end end + end } @boks @@ -405,6 +411,7 @@ def crawlChangeReport(tier, parent_key = nil, indent: "") plain = (name ? name : type_of) if name or type_of plain ||= "" # XXX but this is a problem plain += " ("+loc+")" if loc and !loc.empty? + color = plain slack = "`"+plain+"`" slack += " was #{tier[:action]} #{preposition} \*#{loc}\*" if loc and !loc.empty? @@ -491,7 +498,7 @@ def notifyChanges(deploy, report) "modified" end - changes = crawlChangeReport(data, res_type) + changes = crawlChangeReport(data.freeze, res_type) slacktext = "#{noun} \*#{name}\* was #{verb}" snippets = [] @@ -514,7 +521,7 @@ def notifyChanges(deploy, report) puts "" if MU.muCfg['adopt_change_notify']['slack'] - deploy.sendAdminSlack(slacktext, scrub_mu_isms: MU.muCfg['adopt_scrub_mu_isms'], snippets: snippets, noop: true) + deploy.sendAdminSlack(slacktext, scrub_mu_isms: MU.muCfg['adopt_scrub_mu_isms'], snippets: snippets) end } @@ -642,7 +649,7 @@ def vacuum(bok, origin: nil, save: false, deploy: nil) h.each { |v| newarr << scrub_globals.call(v, field) } - h = newarr + h = newarr.uniq end h } @@ -767,7 +774,7 @@ def resolveReferences(cfg, deploy, parent) MU.log "Dropping unresolved value", MU::WARN, details: value end } - cfg = new_array + cfg = new_array.uniq end cfg diff --git a/modules/mu/mommacat/storage.rb b/modules/mu/mommacat/storage.rb index 65f94ab2e..003bb4d9d 100644 --- a/modules/mu/mommacat/storage.rb +++ b/modules/mu/mommacat/storage.rb @@ -91,7 +91,7 @@ def self.locks; # Overwrite this deployment's configuration with a new version. Save the # previous version as well. # @param new_conf [Hash]: A new configuration, fully resolved by {MU::Config} - def updateBasketofKittens(new_conf, skip_validation: false) + def updateBasketofKittens(new_conf, skip_validation: false, save_now: false) loadDeploy if new_conf == @original_config return @@ -102,9 +102,8 @@ def updateBasketofKittens(new_conf, skip_validation: false) if !skip_validation f = Tempfile.new(@deploy_id) f.write JSON.parse(JSON.generate(new_conf)).to_yaml - conf_engine = MU::Config.new(f.path) + conf_engine = MU::Config.new(f.path) # will throw an exception if it's bad, adoption should catch this and cope reasonably f.close - new_conf = conf_engine.config end backup = "#{deploy_dir}/basket_of_kittens.json.#{Time.now.to_i.to_s}" @@ -116,8 +115,10 @@ def updateBasketofKittens(new_conf, skip_validation: false) config.close @original_config = new_conf.clone -# save! # XXX this will happen later, more sensibly - MU.log "New config saved to #{deploy_dir}/basket_of_kittens.json" + if save_now + save! + MU.log "New config saved to #{deploy_dir}/basket_of_kittens.json" + end end @lock_semaphore = Mutex.new diff --git a/modules/mu/providers/google/role.rb b/modules/mu/providers/google/role.rb index ce19b1c5d..d024e4c56 100644 --- a/modules/mu/providers/google/role.rb +++ b/modules/mu/providers/google/role.rb @@ -753,6 +753,9 @@ def toKitten(**args) if foreign { "id" => entity } else + shortclass, _cfg_name, _cfg_plural, _classname = MU::Cloud.getResourceNames(mu_entitytype) + MU.log "Role #{@cloud_id}: Skipping #{shortclass} binding for #{entity}; we are adopting that type and will set bindings from that resource", MU::DEBUG + next if args[:types].include?(shortclass) MU::Config::Ref.get( id: entity, cloud: "Google", @@ -778,6 +781,7 @@ def toKitten(**args) } } } + bok["bindings"] ||= [] refmap.each_pair { |entity, scopes| newbinding = { "entity" => entity } @@ -1125,6 +1129,7 @@ def self.validateConfig(role, configurator) MU::Config.addDependency(role, binding['entity']['name'], binding['entity']['type']) end } + role['bindings'].uniq! end ok diff --git a/modules/mu/providers/google/user.rb b/modules/mu/providers/google/user.rb index 69dfc733a..1e64715b4 100644 --- a/modules/mu/providers/google/user.rb +++ b/modules/mu/providers/google/user.rb @@ -363,8 +363,10 @@ def self.find(**args) else if cred_cfg['masquerade_as'] resp = MU::Cloud::Google.admin_directory(credentials: args[:credentials]).list_users(customer: MU::Cloud::Google.customerID(args[:credentials]), show_deleted: false) +# XXX this ain't exactly performant, do some caching or something if resp and resp.users resp.users.each { |u| + next if args[:cloud_id] and !args[:cloud_id] != u.primary_email found[u.primary_email] = u } end From 9f368967ea4ec4e15ab09028835ae6b8f2a43a09 Mon Sep 17 00:00:00 2001 From: John Stange Date: Wed, 22 Apr 2020 22:17:29 -0400 Subject: [PATCH 087/124] Adoption: a little smarter with name de-duplication and listing habitats on notifications --- modules/mu.rb | 27 ++++++++++++--------------- modules/mu/adoption.rb | 23 +++++++++++++++-------- modules/mu/providers/google/role.rb | 2 +- 3 files changed, 28 insertions(+), 24 deletions(-) diff --git a/modules/mu.rb b/modules/mu.rb index c32f06494..19727e352 100644 --- a/modules/mu.rb +++ b/modules/mu.rb @@ -83,7 +83,7 @@ def <=>(other) end # Recursively compare two Mu Basket of Kittens hashes and report the differences - def diff(with, on = self, level: 0, parents: [], report: {}) + def diff(with, on = self, level: 0, parents: [], report: {}, habitat: nil) return if with.nil? and on.nil? if with.nil? or on.nil? or with.class != on.class return # XXX ...however we're flagging differences @@ -96,7 +96,7 @@ def diff(with, on = self, level: 0, parents: [], report: {}) with_unique = (with.keys - on.keys) shared = (with.keys & on.keys) shared.each { |k| - report_data = diff(with[k], on[k], level: level+1, parents: parents + [k], report: report[k]) + report_data = diff(with[k], on[k], level: level+1, parents: parents + [k], report: report[k], habitat: habitat) if report_data and !report_data.empty? report ||= {} report[k] = report_data @@ -104,9 +104,11 @@ def diff(with, on = self, level: 0, parents: [], report: {}) } on_unique.each { |k| report[k] = { :action => :removed, :parents => parents, :value => on[k].clone } + report[k][:habitat] = habitat if habitat } with_unique.each { |k| report[k] = { :action => :added, :parents => parents, :value => with[k].clone } + report[k][:habitat] = habitat if habitat } elsif on.is_a?(Array) return if with == on @@ -128,7 +130,7 @@ def diff(with, on = self, level: 0, parents: [], report: {}) done << elt done << other_elt break if elt == other_elt # if they're identical, we're done - report_data = diff(other_elt, elt, level: level+1, parents: parents + [elt_namestr]) + report_data = diff(other_elt, elt, level: level+1, parents: parents + [elt_namestr], habitat: elt_location) if report_data and !report_data.empty? report ||= {} report[elt_namestr] = report_data @@ -143,32 +145,27 @@ def diff(with, on = self, level: 0, parents: [], report: {}) # Case 2: This array entry exists in the old version, but not the new one on_unique.each { |e| - namestr = MU::MommaCat.getChunkName(e).first + namestr, loc = MU::MommaCat.getChunkName(e) report ||= {} - if e.is_a?(Hash) - report[namestr] = { :action => :removed, :parents => parents, :value => e.clone } - else - report[namestr] = { :action => :removed, :parents => parents, :value => e.clone } - end + report[namestr] = { :action => :removed, :parents => parents, :value => e.clone } + report[namestr][:habitat] = loc if loc } # Case 3: This array entry exists in the new version, but not the old one with_unique.each { |e| - namestr = MU::MommaCat.getChunkName(e).first + namestr, loc = MU::MommaCat.getChunkName(e) report ||= {} - if e.is_a?(Hash) - report[namestr] = { :action => :added, :parents => parents, :value => e.clone } - else - report[namestr] = { :action => :added, :parents => parents, :value => e.clone } - end + report[namestr] = { :action => :added, :parents => parents, :value => e.clone } + report[namestr][:habitat] = loc if loc } # A plain old leaf node of data else if on != with report = { :action => :changed, :parents => parents, :oldvalue => on, :value => with.clone } + report[:habitat] = habitat if habitat end end diff --git a/modules/mu/adoption.rb b/modules/mu/adoption.rb index 314b0aae2..65758d94f 100644 --- a/modules/mu/adoption.rb +++ b/modules/mu/adoption.rb @@ -307,13 +307,13 @@ def generateBaskets(prefix: "") if sibling['name'] == kitten_cfg['name'] MU.log "#{res_class.cfg_name} name #{sibling['name']} unavailable, will attempt to rename duplicate object", MU::DEBUG, details: kitten_cfg if kitten_cfg['parent'] and kitten_cfg['parent'].respond_to?(:id) and kitten_cfg['parent'].id - kitten_cfg['name'] = kitten_cfg['name']+kitten_cfg['parent'].id + kitten_cfg['name'] = kitten_cfg['name']+"-"+kitten_cfg['parent'].id elsif kitten_cfg['project'] - kitten_cfg['name'] = kitten_cfg['name']+kitten_cfg['project'] + kitten_cfg['name'] = kitten_cfg['name']+"-"+kitten_cfg['project'] elsif kitten_cfg['region'] - kitten_cfg['name'] = kitten_cfg['name']+kitten_cfg['region'] + kitten_cfg['name'] = kitten_cfg['name']+"-"+kitten_cfg['region'] elsif kitten_cfg['cloud_id'] - kitten_cfg['name'] = kitten_cfg['name']+kitten_cfg['cloud_id'].gsub(/[^a-z0-9]/i, "-") + kitten_cfg['name'] = kitten_cfg['name']+"-"+kitten_cfg['cloud_id'].gsub(/[^a-z0-9]/i, "-") else raise MU::Config::DuplicateNameError, "Saw duplicate #{res_class.cfg_name} name #{sibling['name']} and couldn't come up with a good way to differentiate them" end @@ -386,8 +386,9 @@ def crawlChangeReport(tier, parent_key = nil, indent: "") "in" end - loc = name = "" + name = "" type_of = parent_key.sub(/s$|\[.*/, '') if parent_key + loc = tier[:habitat] if tier[:value] and tier[:value].is_a?(Hash) name, loc = MU::MommaCat.getChunkName(tier[:value], type_of) @@ -401,20 +402,23 @@ def crawlChangeReport(tier, parent_key = nil, indent: "") path = tier[:parents].clone path.shift path.shift + path.pop if path.last == name for c in (0..(path.size-1)) do path_str << (" " * (c+2)) + (path[c] || "") end - slack_path_str += " under `"+path.join("/")+"`" + slack_path_str += " under `"+path.join("/")+"`" if path.size > 0 end path_str << "" if !path_str.empty? plain = (name ? name : type_of) if name or type_of plain ||= "" # XXX but this is a problem + slack = "`"+plain+"`" + plain += " ("+loc+")" if loc and !loc.empty? color = plain - slack = "`"+plain+"`" - slack += " was #{tier[:action]} #{preposition} \*#{loc}\*" if loc and !loc.empty? + slack += " was #{tier[:action]}" + slack += "#{preposition} \*#{loc}\*" if loc and !loc.empty? and [Array, Hash].include?(tier[:value].class) if tier[:action] == :added color = "+ ".green + plain @@ -501,6 +505,9 @@ def notifyChanges(deploy, report) changes = crawlChangeReport(data.freeze, res_type) slacktext = "#{noun} \*#{name}\* was #{verb}" + if data[:habitat] + slacktext += " in \*#{data[:habitat]}\*" + end snippets = [] if [:added, :removed].include?(data[:action]) and data[:value] diff --git a/modules/mu/providers/google/role.rb b/modules/mu/providers/google/role.rb index d024e4c56..22df68275 100644 --- a/modules/mu/providers/google/role.rb +++ b/modules/mu/providers/google/role.rb @@ -755,7 +755,7 @@ def toKitten(**args) else shortclass, _cfg_name, _cfg_plural, _classname = MU::Cloud.getResourceNames(mu_entitytype) MU.log "Role #{@cloud_id}: Skipping #{shortclass} binding for #{entity}; we are adopting that type and will set bindings from that resource", MU::DEBUG - next if args[:types].include?(shortclass) + next if args[:types].include?(shortclass) and !MU::Cloud::Google::User.cannedServiceAcctName?(entity) MU::Config::Ref.get( id: entity, cloud: "Google", From 8f275ae09c32360c1f207f26de42e0b4050a2214 Mon Sep 17 00:00:00 2001 From: John Stange Date: Thu, 23 Apr 2020 19:41:27 -0400 Subject: [PATCH 088/124] Google::User and Google::Role: get a little better at knowing when to ignore service accounts and role bindings --- modules/mu/providers/google/role.rb | 17 ++++++++++++++--- modules/mu/providers/google/user.rb | 1 + 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/modules/mu/providers/google/role.rb b/modules/mu/providers/google/role.rb index 22df68275..9a465d385 100644 --- a/modules/mu/providers/google/role.rb +++ b/modules/mu/providers/google/role.rb @@ -742,20 +742,27 @@ def toKitten(**args) entity_types.each_pair { |entity_type, entities| mu_entitytype = (entity_type == "serviceAccount" ? "user" : entity_type)+"s" entities.each { |entity| + next if entity.nil? foreign = if entity_type == "serviceAccount" and entity.match(/@(.*?)\.iam\.gserviceaccount\.com/) !MU::Cloud::Google.listHabitats(@credentials).include?(Regexp.last_match[1]) end + entity_ref = if entity_type == "organizations" { "id" => ((org == my_org.name and @config['credentials']) ? @config['credentials'] : org) } elsif entity_type == "domain" { "id" => entity } else + shortclass, _cfg_name, _cfg_plural, _classname = MU::Cloud.getResourceNames(mu_entitytype) + if args[:types].include?(shortclass) and + !(entity_type == "serviceAccount" and + MU::Cloud::Google::User.cannedServiceAcctName?(entity)) + MU.log "Role #{@cloud_id}: Skipping #{shortclass} binding for #{entity}; we are adopting that type and will set bindings from that resource", MU::DEBUG + next + end + if foreign { "id" => entity } else - shortclass, _cfg_name, _cfg_plural, _classname = MU::Cloud.getResourceNames(mu_entitytype) - MU.log "Role #{@cloud_id}: Skipping #{shortclass} binding for #{entity}; we are adopting that type and will set bindings from that resource", MU::DEBUG - next if args[:types].include?(shortclass) and !MU::Cloud::Google::User.cannedServiceAcctName?(entity) MU::Config::Ref.get( id: entity, cloud: "Google", @@ -763,6 +770,10 @@ def toKitten(**args) ) end end + if entity_ref.nil? + MU.log "I somehow ended up with a nil entity reference for #{entity_type} #{entity}", MU::ERR, details: [ bok, bindings ] + next + end refmap ||= {} refmap[entity_ref] ||= {} refmap[entity_ref][scopetype] ||= [] diff --git a/modules/mu/providers/google/user.rb b/modules/mu/providers/google/user.rb index 1e64715b4..6e07c36c6 100644 --- a/modules/mu/providers/google/user.rb +++ b/modules/mu/providers/google/user.rb @@ -383,6 +383,7 @@ def self.cannedServiceAcctName?(name) name.match(/\b\d+\-compute@developer\.gserviceaccount\.com$/) or name.match(/\bproject-\d+@storage-transfer-service\.iam\.gserviceaccount\.com$/) or name.match(/\b\d+@cloudbuild\.gserviceaccount\.com$/) or + name.match(/\b\d+@cloudservices\.gserviceaccount\.com$/) or name.match(/\bservice-\d+@containerregistry\.iam\.gserviceaccount\.com$/) or name.match(/\bservice-\d+@container-analysis\.iam\.gserviceaccount\.com$/) or name.match(/\bservice-\d+@compute-system\.iam\.gserviceaccount\.com$/) or From a141bec49b023b75a0c25e4f0870e76b6bd945bf Mon Sep 17 00:00:00 2001 From: John Stange Date: Fri, 24 Apr 2020 15:08:26 -0400 Subject: [PATCH 089/124] Adoption: ensure that new deployment metadata gets injected into deploys into which we're merging diffed changes; Hash: brute-force sorting so that arrays of Hashes return a consistent result --- modules/mu.rb | 9 +++- modules/mu/adoption.rb | 59 ++++++++++++++-------- modules/mu/mommacat.rb | 4 +- modules/mu/mommacat/naming.rb | 2 +- modules/mu/mommacat/storage.rb | 78 +++++++++++++++++++---------- modules/mu/providers/google/role.rb | 14 ++---- 6 files changed, 105 insertions(+), 61 deletions(-) diff --git a/modules/mu.rb b/modules/mu.rb index 19727e352..cca53fc48 100644 --- a/modules/mu.rb +++ b/modules/mu.rb @@ -79,7 +79,9 @@ def <=>(other) } return 0 if self == other # that was easy! # compare elements and decide who's "bigger" based on their totals? - 0 + + # fine, try some brute force and just hope everything implements to_s + self.flatten.map { |e| e.to_s }.join() <=> other.flatten.map { |e| e.to_s }.join() end # Recursively compare two Mu Basket of Kittens hashes and report the differences @@ -91,11 +93,13 @@ def diff(with, on = self, level: 0, parents: [], report: {}, habitat: nil) return if on == with changes = [] + report ||= {} if on.is_a?(Hash) on_unique = (on.keys - with.keys) with_unique = (with.keys - on.keys) shared = (with.keys & on.keys) shared.each { |k| + report_data = diff(with[k], on[k], level: level+1, parents: parents + [k], report: report[k], habitat: habitat) if report_data and !report_data.empty? report ||= {} @@ -125,12 +129,13 @@ def diff(with, on = self, level: 0, parents: [], report: {}, habitat: nil) with.sort.each { |other_elt| other_elt_namestr, other_elt_location = MU::MommaCat.getChunkName(other_elt) + # Case 1: The array element exists in both version of this array if elt_namestr and other_elt_namestr and elt_namestr == other_elt_namestr and (elt_location.nil? or other_elt_location.nil? or elt_location == other_elt_location) done << elt done << other_elt break if elt == other_elt # if they're identical, we're done - report_data = diff(other_elt, elt, level: level+1, parents: parents + [elt_namestr], habitat: elt_location) + report_data = diff(other_elt, elt, level: level+1, parents: parents + [elt_namestr], habitat: (elt_location || habitat)) if report_data and !report_data.empty? report ||= {} report[elt_namestr] = report_data diff --git a/modules/mu/adoption.rb b/modules/mu/adoption.rb index 65758d94f..f4191708a 100644 --- a/modules/mu/adoption.rb +++ b/modules/mu/adoption.rb @@ -305,18 +305,7 @@ def generateBaskets(prefix: "") bok[res_class.cfg_plural].each { |sibling| next if kitten_cfg == sibling if sibling['name'] == kitten_cfg['name'] - MU.log "#{res_class.cfg_name} name #{sibling['name']} unavailable, will attempt to rename duplicate object", MU::DEBUG, details: kitten_cfg - if kitten_cfg['parent'] and kitten_cfg['parent'].respond_to?(:id) and kitten_cfg['parent'].id - kitten_cfg['name'] = kitten_cfg['name']+"-"+kitten_cfg['parent'].id - elsif kitten_cfg['project'] - kitten_cfg['name'] = kitten_cfg['name']+"-"+kitten_cfg['project'] - elsif kitten_cfg['region'] - kitten_cfg['name'] = kitten_cfg['name']+"-"+kitten_cfg['region'] - elsif kitten_cfg['cloud_id'] - kitten_cfg['name'] = kitten_cfg['name']+"-"+kitten_cfg['cloud_id'].gsub(/[^a-z0-9]/i, "-") - else - raise MU::Config::DuplicateNameError, "Saw duplicate #{res_class.cfg_name} name #{sibling['name']} and couldn't come up with a good way to differentiate them" - end + MU::Adoption.deDuplicateName(kitten_cfg, res_class) MU.log "De-duplication: Renamed #{res_class.cfg_name} name '#{sibling['name']}' => '#{kitten_cfg['name']}'", MU::NOTICE break end @@ -332,7 +321,8 @@ def generateBaskets(prefix: "") # their config footprint MU.log "Minimizing footprint of #{count.to_s} found resources", MU::DEBUG - @boks[bok['appname']] = vacuum(bok, origin: origin, save: @savedeploys) + generated_deploy = generateStubDeploy(bok) + @boks[bok['appname']] = vacuum(bok, origin: origin, deploy: generated_deploy, save: @savedeploys) if @diff and !deploy MU.log "diff flag set, but no comparable deploy provided for #{bok['appname']}", MU::ERR @@ -340,7 +330,7 @@ def generateBaskets(prefix: "") end if deploy and @diff - prev_vacuumed = vacuum(deploy.original_config, deploy: deploy) + prev_vacuumed = vacuum(deploy.original_config, deploy: deploy, keep_missing: true, copy_from: generated_deploy) prevcfg = MU::Config.manxify(prev_vacuumed) if !prevcfg MU.log "#{deploy.deploy_id} didn't have a working original config for me to compare", MU::ERR @@ -418,7 +408,7 @@ def crawlChangeReport(tier, parent_key = nil, indent: "") color = plain slack += " was #{tier[:action]}" - slack += "#{preposition} \*#{loc}\*" if loc and !loc.empty? and [Array, Hash].include?(tier[:value].class) + slack += " #{preposition} \*#{loc}\*" if loc and !loc.empty? and [Array, Hash].include?(tier[:value].class) if tier[:action] == :added color = "+ ".green + plain @@ -528,7 +518,7 @@ def notifyChanges(deploy, report) puts "" if MU.muCfg['adopt_change_notify']['slack'] - deploy.sendAdminSlack(slacktext, scrub_mu_isms: MU.muCfg['adopt_scrub_mu_isms'], snippets: snippets) + deploy.sendAdminSlack(slacktext, scrub_mu_isms: MU.muCfg['adopt_scrub_mu_isms'], snippets: snippets, noop: false) end } @@ -593,8 +583,7 @@ def scrubSchemaDefaults(conf_chunk, schema_chunk, depth = 0, type: nil) # Do the same for our main objects: if they all use the same credentials, # for example, remove the explicit +credentials+ attributes and set that # value globally, once. - def vacuum(bok, origin: nil, save: false, deploy: nil) - deploy ||= generateStubDeploy(bok) + def vacuum(bok, origin: nil, save: false, deploy: nil, copy_from: nil, keep_missing: false) globals = { 'cloud' => {}, @@ -614,11 +603,20 @@ def vacuum(bok, origin: nil, save: false, deploy: nil) end } obj = deploy.findLitterMate(type: attrs[:cfg_plural], name: resource['name']) + inject_metadata = save + if obj.nil? and copy_from + obj = copy_from.findLitterMate(type: attrs[:cfg_plural], name: resource['name']) + if obj + inject_metadata = true + obj.intoDeploy(deploy, force: true) + end + end + begin raise Incomplete if obj.nil? - if save + if inject_metadata deploydata = obj.notify - deploy.notify(attrs[:cfg_plural], resource['name'], deploydata, triggering_node: obj) # XXX make sure this doesn't force a save + deploy.notify(attrs[:cfg_plural], resource['name'], deploydata, triggering_node: obj) end new_cfg = resolveReferences(resource, deploy, obj) new_cfg.delete("cloud_id") @@ -632,7 +630,11 @@ def vacuum(bok, origin: nil, save: false, deploy: nil) end processed << new_cfg rescue Incomplete -#MU.log "#{attrs[:cfg_name]} #{resource['name']} didn't show up from findLitterMate", MU::WARN, details: deploy.original_config[attrs[:cfg_plural]].reject { |r| r['name'] != "" } + if keep_missing + processed << resource + else + MU.log "#{attrs[:cfg_name]} #{resource['name']} didn't show up from findLitterMate", MU::WARN, details: deploy.original_config[attrs[:cfg_plural]].reject { |r| r['name'] != "" } + end end } @@ -855,6 +857,21 @@ def generateStubDeploy(bok) deploy end + def self.deDuplicateName(kitten_cfg, res_class) + orig_name = kitten_cfg['name'].dup + if kitten_cfg['parent'] and kitten_cfg['parent'].respond_to?(:id) and kitten_cfg['parent'].id + kitten_cfg['name'] = kitten_cfg['name']+"-"+kitten_cfg['parent'].id + elsif kitten_cfg['project'] + kitten_cfg['name'] = kitten_cfg['name']+"-"+kitten_cfg['project'] + elsif kitten_cfg['region'] + kitten_cfg['name'] = kitten_cfg['name']+"-"+kitten_cfg['region'] + elsif kitten_cfg['cloud_id'] + kitten_cfg['name'] = kitten_cfg['name']+"-"+kitten_cfg['cloud_id'].gsub(/[^a-z0-9]/i, "-") + else + raise MU::Config::DuplicateNameError, "Saw duplicate #{res_class.cfg_name} name #{orig_name} and couldn't come up with a good way to differentiate them" + end + end + # Go through everything we've scraped and update our mappings of cloud ids # and bare name fields, so that resources can reference one another # portably by name. diff --git a/modules/mu/mommacat.rb b/modules/mu/mommacat.rb index 7344e77e8..5bffd68a5 100644 --- a/modules/mu/mommacat.rb +++ b/modules/mu/mommacat.rb @@ -549,7 +549,7 @@ def SSHKey def notify(type, key, data, mu_name: nil, remove: false, triggering_node: nil, delayed_save: false) begin - MU::MommaCat.lock("deployment-notification") if !@no_artifacts + MU::MommaCat.lock("deployment-notification", deploy_id: @deploy_id) if !@no_artifacts if !@need_deploy_flush or @deployment.nil? or @deployment.empty? loadDeploy(true) # make sure we're saving the latest and greatest @@ -616,7 +616,7 @@ def notify(type, key, data, mu_name: nil, remove: false, triggering_node: nil, d save! if !delayed_save and !@no_artifacts end ensure - MU::MommaCat.unlock("deployment-notification") if !@no_artifacts + MU::MommaCat.unlock("deployment-notification", deploy_id: @deploy_id) if !@no_artifacts end end diff --git a/modules/mu/mommacat/naming.rb b/modules/mu/mommacat/naming.rb index bed81cce8..109335b57 100644 --- a/modules/mu/mommacat/naming.rb +++ b/modules/mu/mommacat/naming.rb @@ -93,7 +93,7 @@ def self.getChunkName(obj, array_of = nil) ['projects', 'habitats'].each { |key| if obj[key] and obj[key].is_a?(Array) - hab_str = obj[key].map { |p| + hab_str = obj[key].sort.map { |p| (p["name"] || p["id"]).gsub(/^.*?[^\/]+\/([^\/]+)$/, '\1') }.join(", ") name_string.gsub!(/^.*?[^\/]+\/([^\/]+)$/, '\1') if name_string diff --git a/modules/mu/mommacat/storage.rb b/modules/mu/mommacat/storage.rb index 003bb4d9d..b13ef004a 100644 --- a/modules/mu/mommacat/storage.rb +++ b/modules/mu/mommacat/storage.rb @@ -88,21 +88,25 @@ def self.locks; } end + # Overwrite this deployment's configuration with a new version. Save the # previous version as well. # @param new_conf [Hash]: A new configuration, fully resolved by {MU::Config} - def updateBasketofKittens(new_conf, skip_validation: false, save_now: false) + def updateBasketofKittens(new_conf, skip_validation: false, new_metadata: nil, save_now: false) loadDeploy if new_conf == @original_config return end + scrub_with = nil + # Make sure the new config that we were just handed resolves and makes # sense if !skip_validation f = Tempfile.new(@deploy_id) f.write JSON.parse(JSON.generate(new_conf)).to_yaml conf_engine = MU::Config.new(f.path) # will throw an exception if it's bad, adoption should catch this and cope reasonably + scrub_with = conf_engine.config f.close end @@ -115,6 +119,21 @@ def updateBasketofKittens(new_conf, skip_validation: false, save_now: false) config.close @original_config = new_conf.clone + + MU::Cloud.resource_types.each_pair { |res_type, attrs| + next if !@deployment.has_key?(attrs[:cfg_plural]) + deletia = [] + @deployment[attrs[:cfg_plural]].each_pair { |res_name, data| + orig_cfg = findResourceConfig(attrs[:cfg_plural], res_name, scrub_with) + + if orig_cfg.nil? + MU.log "#{res_type} #{res_name} no longer configured, will remove deployment metadata", MU::NOTICE + deletia << res_name + end + } + @deployment[attrs[:cfg_plural]].reject! { |k, v| deletia.include?(k) } + } + if save_now save! MU.log "New config saved to #{deploy_dir}/basket_of_kittens.json" @@ -157,11 +176,11 @@ def self.unlockAll # @param id [String]: The lock identifier to release. # @param nonblock [Boolean]: Whether to block while waiting for the lock. In non-blocking mode, we simply return false if the lock is not available. # return [false, nil] - def self.lock(id, nonblock = false, global = false) + def self.lock(id, nonblock = false, global = false, deploy_id: MU.deploy_id) raise MuError, "Can't pass a nil id to MU::MommaCat.lock" if id.nil? if !global - lockdir = "#{deploy_dir(MU.deploy_id)}/locks" + lockdir = "#{deploy_dir(deploy_id)}/locks" else lockdir = File.expand_path(MU.dataDir+"/locks") end @@ -196,11 +215,11 @@ def self.lock(id, nonblock = false, global = false) # Release a flock() lock. # @param id [String]: The lock identifier to release. - def self.unlock(id, global = false) + def self.unlock(id, global = false, deploy_id: MU.deploy_id) raise MuError, "Can't pass a nil id to MU::MommaCat.unlock" if id.nil? lockdir = nil if !global - lockdir = "#{deploy_dir(MU.deploy_id)}/locks" + lockdir = "#{deploy_dir(deploy_id)}/locks" else lockdir = File.expand_path(MU.dataDir+"/locks") end @@ -536,28 +555,9 @@ def loadObjects(delay_descriptor_load) type = attrs[:cfg_plural] next if !@deployment.has_key?(type) + deletia = {} @deployment[type].each_pair { |res_name, data| - orig_cfg = nil - if @original_config.has_key?(type) - @original_config[type].each { |resource| - if resource["name"] == res_name - orig_cfg = resource - break - end - } - end - - # Some Server objects originated from ServerPools, get their - # configs from there - if type == "servers" and orig_cfg.nil? and - @original_config.has_key?("server_pools") - @original_config["server_pools"].each { |resource| - if resource["name"] == res_name - orig_cfg = resource - break - end - } - end + orig_cfg = findResourceConfig(type, res_name) if orig_cfg.nil? MU.log "Failed to locate original config for #{attrs[:cfg_name]} #{res_name} in #{@deploy_id}", MU::WARN if !["firewall_rules", "databases", "storage_pools", "cache_clusters", "alarms"].include?(type) # XXX shaddap @@ -590,6 +590,7 @@ def loadObjects(delay_descriptor_load) end end } + } end @@ -692,5 +693,30 @@ def loadDeploy(deployment_json_only = false, set_context_to_me: true) } end + def findResourceConfig(type, name, config = @original_config) + orig_cfg = nil + if config.has_key?(type) + config[type].each { |resource| + if resource["name"] == name + orig_cfg = resource + break + end + } + end + + # Some Server objects originated from ServerPools, get their + # configs from there + if type == "servers" and orig_cfg.nil? and config.has_key?("server_pools") + config["server_pools"].each { |resource| + if resource["name"] == name + orig_cfg = resource + break + end + } + end + + orig_cfg + end + end #class end #module diff --git a/modules/mu/providers/google/role.rb b/modules/mu/providers/google/role.rb index 9a465d385..b5ebf7cdb 100644 --- a/modules/mu/providers/google/role.rb +++ b/modules/mu/providers/google/role.rb @@ -760,15 +760,11 @@ def toKitten(**args) next end - if foreign - { "id" => entity } - else - MU::Config::Ref.get( - id: entity, - cloud: "Google", - type: mu_entitytype - ) - end + MU::Config::Ref.get( + id: entity, + cloud: "Google", + type: mu_entitytype + ) end if entity_ref.nil? MU.log "I somehow ended up with a nil entity reference for #{entity_type} #{entity}", MU::ERR, details: [ bok, bindings ] From c383471541d61cfd3ce7911fe5273a0fda832639 Mon Sep 17 00:00:00 2001 From: John Stange Date: Sat, 25 Apr 2020 01:10:47 -0400 Subject: [PATCH 090/124] MommaCat: if lazy loading causes a thread deadlock, give up gracefully; MU::Cloud: #windows? is also for ServerPools --- modules/Gemfile.lock | 87 ++++++++++++++++++----------------- modules/mu/cloud/server.rb | 22 +++++---- modules/mu/config/vpc.rb | 2 +- modules/mu/mommacat/search.rb | 10 +++- 4 files changed, 66 insertions(+), 55 deletions(-) diff --git a/modules/Gemfile.lock b/modules/Gemfile.lock index 0314919c6..c5dd6f59d 100644 --- a/modules/Gemfile.lock +++ b/modules/Gemfile.lock @@ -53,11 +53,11 @@ GEM addressable (2.5.2) public_suffix (>= 2.0.2, < 4.0) ast (2.4.0) - aws-eventstream (1.0.3) - aws-sdk-core (2.11.481) + aws-eventstream (1.1.0) + aws-sdk-core (2.11.494) aws-sigv4 (~> 1.0) jmespath (~> 1.0) - aws-sigv4 (1.1.1) + aws-sigv4 (1.1.2) aws-eventstream (~> 1.0, >= 1.0.2) azure-core (0.1.15) faraday (~> 0.9) @@ -130,7 +130,7 @@ GEM ms_rest_azure (~> 0.11.1) azure_mgmt_analysis_services (0.17.2) ms_rest_azure (~> 0.11.0) - azure_mgmt_api_management (0.18.4) + azure_mgmt_api_management (0.19.0) ms_rest_azure (~> 0.11.1) azure_mgmt_appconfiguration (0.17.1) ms_rest_azure (~> 0.11.1) @@ -152,7 +152,7 @@ GEM ms_rest_azure (~> 0.11.1) azure_mgmt_cdn (0.17.3) ms_rest_azure (~> 0.11.0) - azure_mgmt_cognitive_services (0.19.0) + azure_mgmt_cognitive_services (0.19.1) ms_rest_azure (~> 0.11.1) azure_mgmt_commerce (0.17.1) ms_rest_azure (~> 0.11.0) @@ -194,7 +194,7 @@ GEM ms_rest_azure (~> 0.11.0) azure_mgmt_edgegateway (0.18.0) ms_rest_azure (~> 0.11.0) - azure_mgmt_event_grid (0.18.0) + azure_mgmt_event_grid (0.19.0) ms_rest_azure (~> 0.11.1) azure_mgmt_event_hub (0.18.0) ms_rest_azure (~> 0.11.1) @@ -212,6 +212,8 @@ GEM ms_rest_azure (~> 0.11.1) azure_mgmt_key_vault (0.17.5) ms_rest_azure (~> 0.11.1) + azure_mgmt_kubernetes_configuration (0.17.0) + ms_rest_azure (~> 0.11.1) azure_mgmt_kusto (0.19.1) ms_rest_azure (~> 0.11.1) azure_mgmt_labservices (0.17.1) @@ -246,9 +248,9 @@ GEM ms_rest_azure (~> 0.11.0) azure_mgmt_mysql (0.17.0) ms_rest_azure (~> 0.11.1) - azure_mgmt_netapp (0.18.3) + azure_mgmt_netapp (0.19.0) ms_rest_azure (~> 0.11.1) - azure_mgmt_network (0.23.1) + azure_mgmt_network (0.23.2) ms_rest_azure (~> 0.11.1) azure_mgmt_notification_hubs (0.17.2) ms_rest_azure (~> 0.11.0) @@ -272,9 +274,9 @@ GEM ms_rest_azure (~> 0.11.0) azure_mgmt_privatedns (0.17.1) ms_rest_azure (~> 0.11.0) - azure_mgmt_recovery_services (0.17.3) - ms_rest_azure (~> 0.11.0) - azure_mgmt_recovery_services_backup (0.18.0) + azure_mgmt_recovery_services (0.18.0) + ms_rest_azure (~> 0.11.1) + azure_mgmt_recovery_services_backup (0.18.1) ms_rest_azure (~> 0.11.1) azure_mgmt_recovery_services_site_recovery (0.17.2) ms_rest_azure (~> 0.11.0) @@ -312,7 +314,7 @@ GEM ms_rest_azure (~> 0.11.1) azure_mgmt_stor_simple8000_series (0.17.2) ms_rest_azure (~> 0.11.0) - azure_mgmt_storage (0.19.3) + azure_mgmt_storage (0.20.1) ms_rest_azure (~> 0.11.1) azure_mgmt_storagecache (0.17.1) ms_rest_azure (~> 0.11.1) @@ -334,7 +336,7 @@ GEM ms_rest_azure (~> 0.11.1) azure_mgmt_web (0.17.5) ms_rest_azure (~> 0.11.1) - azure_sdk (0.53.0) + azure_sdk (0.55.0) azure-storage (~> 0.14.0.preview) azure_cognitiveservices_anomalydetector (~> 0.17.0) azure_cognitiveservices_autosuggest (~> 0.17.1) @@ -367,7 +369,7 @@ GEM azure_mgmt_advisor (~> 0.17.1) azure_mgmt_alerts_management (~> 0.17.0) azure_mgmt_analysis_services (~> 0.17.2) - azure_mgmt_api_management (~> 0.18.4) + azure_mgmt_api_management (~> 0.19.0) azure_mgmt_appconfiguration (~> 0.17.1) azure_mgmt_attestation (~> 0.17.0) azure_mgmt_authorization (~> 0.18.4) @@ -378,7 +380,7 @@ GEM azure_mgmt_billing (~> 0.17.2) azure_mgmt_bot_service (~> 0.17.0) azure_mgmt_cdn (~> 0.17.3) - azure_mgmt_cognitive_services (~> 0.19.0) + azure_mgmt_cognitive_services (~> 0.19.1) azure_mgmt_commerce (~> 0.17.1) azure_mgmt_compute (~> 0.19.1) azure_mgmt_consumption (~> 0.18.0) @@ -399,7 +401,7 @@ GEM azure_mgmt_devtestlabs (~> 0.18.0) azure_mgmt_dns (~> 0.17.4) azure_mgmt_edgegateway (~> 0.18.0) - azure_mgmt_event_grid (~> 0.18.0) + azure_mgmt_event_grid (~> 0.19.0) azure_mgmt_event_hub (~> 0.18.0) azure_mgmt_features (~> 0.17.2) azure_mgmt_hanaonazure (~> 0.18.0) @@ -408,6 +410,7 @@ GEM azure_mgmt_iot_central (~> 0.19.0) azure_mgmt_iot_hub (~> 0.17.3) azure_mgmt_key_vault (~> 0.17.5) + azure_mgmt_kubernetes_configuration (~> 0.17.0) azure_mgmt_kusto (~> 0.19.1) azure_mgmt_labservices (~> 0.17.1) azure_mgmt_links (~> 0.17.2) @@ -425,8 +428,8 @@ GEM azure_mgmt_monitor (~> 0.17.5) azure_mgmt_msi (~> 0.17.1) azure_mgmt_mysql (~> 0.17.0) - azure_mgmt_netapp (~> 0.18.3) - azure_mgmt_network (~> 0.23.1) + azure_mgmt_netapp (~> 0.19.0) + azure_mgmt_network (~> 0.23.2) azure_mgmt_notification_hubs (~> 0.17.2) azure_mgmt_operational_insights (~> 0.17.2) azure_mgmt_operations_management (~> 0.17.0) @@ -438,8 +441,8 @@ GEM azure_mgmt_powerbi_dedicated (~> 0.17.0) azure_mgmt_powerbi_embedded (~> 0.17.1) azure_mgmt_privatedns (~> 0.17.1) - azure_mgmt_recovery_services (~> 0.17.3) - azure_mgmt_recovery_services_backup (~> 0.18.0) + azure_mgmt_recovery_services (~> 0.18.0) + azure_mgmt_recovery_services_backup (~> 0.18.1) azure_mgmt_recovery_services_site_recovery (~> 0.17.2) azure_mgmt_redis (~> 0.17.3) azure_mgmt_relay (~> 0.17.2) @@ -458,7 +461,7 @@ GEM azure_mgmt_sql (~> 0.19.0) azure_mgmt_sqlvirtualmachine (~> 0.18.1) azure_mgmt_stor_simple8000_series (~> 0.17.2) - azure_mgmt_storage (~> 0.19.3) + azure_mgmt_storage (~> 0.20.1) azure_mgmt_storagecache (~> 0.17.1) azure_mgmt_storagesync (~> 0.18.0) azure_mgmt_stream_analytics (~> 0.17.2) @@ -486,11 +489,11 @@ GEM solve (~> 4.0) thor (>= 0.20) builder (3.2.4) - chef (14.14.29) + chef (14.15.6) addressable bundler (>= 1.10) - chef-config (= 14.14.29) - chef-zero (>= 13.0) + chef-config (= 14.15.6) + chef-zero (>= 13.0, < 15.0) diff-lcs (~> 1.2, >= 1.2.4) erubis (~> 2.7) ffi (~> 1.9, >= 1.9.25) @@ -516,7 +519,7 @@ GEM specinfra (~> 2.10) syslog-logger (~> 1.6) uuidtools (~> 2.1.5) - chef-config (14.14.29) + chef-config (14.15.6) addressable fuzzyurl mixlib-config (>= 2.2.12, < 4.0) @@ -563,15 +566,15 @@ GEM concurrent-ruby (1.1.6) cookbook-omnifetch (0.9.1) mixlib-archive (>= 0.4, < 2.0) - cucumber-core (6.0.0) - cucumber-gherkin (~> 10.0, >= 10.0.0) - cucumber-messages (~> 10.0, >= 10.0.1) - cucumber-tag_expressions (~> 2.0, >= 2.0.2) - cucumber-gherkin (10.0.0) - cucumber-messages (~> 10.0, >= 10.0.1) - cucumber-messages (10.0.3) + cucumber-core (7.0.0) + cucumber-gherkin (~> 13.0, >= 13.0.0) + cucumber-messages (~> 12.1, >= 12.1.1) + cucumber-tag-expressions (~> 2.0, >= 2.0.4) + cucumber-gherkin (13.0.0) + cucumber-messages (~> 12.0, >= 12.0.0) + cucumber-messages (12.1.1) protobuf-cucumber (~> 3.10, >= 3.10.8) - cucumber-tag_expressions (2.0.2) + cucumber-tag-expressions (2.0.4) daemons (1.3.1) declarative (0.0.10) declarative-option (0.1.0) @@ -610,13 +613,13 @@ GEM representable (~> 3.0) retriable (>= 2.0, < 4.0) signet (~> 0.12) - googleauth (0.11.0) + googleauth (0.12.0) faraday (>= 0.17.3, < 2.0) jwt (>= 1.4, < 3.0) memoist (~> 0.16) multi_json (~> 1.11) os (>= 0.9, < 2.0) - signet (~> 0.12) + signet (~> 0.14) gssapi (1.3.0) ffi (>= 1.0.1) gyoku (1.3.1) @@ -694,7 +697,7 @@ GEM octokit (4.18.0) faraday (>= 0.9) sawyer (~> 0.8.0, >= 0.5.3) - ohai (14.14.0) + ohai (14.15.0) chef-config (>= 12.8, < 15) ffi (~> 1.9) ffi-yajl (~> 2.2) @@ -707,11 +710,11 @@ GEM systemu (~> 2.6.4) wmi-lite (~> 1.0) openssl-oaep (0.1.0) - optimist (3.0.0) + optimist (3.0.1) os (1.1.0) paint (1.0.1) parallel (1.19.1) - parser (2.7.0.5) + parser (2.7.1.1) ast (~> 2.4.0) plist (3.5.0) polyglot (0.3.5) @@ -751,7 +754,7 @@ GEM rspec_junit_formatter (0.2.3) builder (< 4) rspec-core (>= 2, < 4, != 2.12.0) - rubocop (0.81.0) + rubocop (0.82.0) jaro_winkler (~> 1.5.1) parallel (~> 1.10) parser (>= 2.7.0.1) @@ -776,7 +779,7 @@ GEM rspec-its specinfra (~> 2.72) sfl (2.3) - signet (0.13.2) + signet (0.14.0) addressable (~> 2.3) faraday (>= 0.17.3, < 2.0) jwt (>= 1.5, < 3.0) @@ -786,7 +789,7 @@ GEM solve (4.0.3) molinillo (~> 0.6) semverse (>= 1.1, < 4.0) - specinfra (2.82.13) + specinfra (2.82.15) net-scp net-ssh (>= 2.7) net-telnet (= 0.1.1) @@ -803,7 +806,7 @@ GEM tomlrb (1.3.0) treetop (1.6.10) polyglot (~> 0.3) - tzinfo (1.2.6) + tzinfo (1.2.7) thread_safe (~> 0.1) uber (0.1.0) unf (0.1.4) diff --git a/modules/mu/cloud/server.rb b/modules/mu/cloud/server.rb index 1bf0f08b3..08d4794f4 100644 --- a/modules/mu/cloud/server.rb +++ b/modules/mu/cloud/server.rb @@ -18,20 +18,22 @@ module MU class Cloud # Generic methods for all Server/ServerPool implementations - class Server + [:Server, :ServerPool].each { |name| + Object.const_get("MU").const_get("Cloud").const_get(name).class_eval { - def windows? - return true if %w{win2k16 win2k12r2 win2k12 win2k8 win2k8r2 win2k19 windows}.include?(@config['platform']) - begin - return true if cloud_desc.respond_to?(:platform) and cloud_desc.platform == "Windows" + def windows? + return true if %w{win2k16 win2k12r2 win2k12 win2k8 win2k8r2 win2k19 windows}.include?(@config['platform']) + begin + return true if cloud_desc.respond_to?(:platform) and cloud_desc.platform == "Windows" # XXX ^ that's AWS-speak, doesn't cover GCP or anything else; maybe we should require cloud layers to implement this so we can just call @cloudobj.windows? - rescue MU::MuError - return false + rescue MU::MuError + return false + end + false end - false - end - end + } + } end diff --git a/modules/mu/config/vpc.rb b/modules/mu/config/vpc.rb index 44e56c8c5..a2fc2640a 100644 --- a/modules/mu/config/vpc.rb +++ b/modules/mu/config/vpc.rb @@ -795,7 +795,7 @@ def self.processReference(vpc_block, parent_type, parent, configurator, sibling_ @@reference_cache[vpc_block] ||= ext_vpc if ok end rescue StandardError => e - raise MuError, e.inspect, [caller, e.backtrace] + raise MuError.new e.inspect, details: { "my call stack" => caller, "exception call stack" => e.backtrace } ensure if !ext_vpc and vpc_block['cloud'] != "CloudFormation" MU.log "Couldn't resolve VPC reference to a unique live VPC in #{parent_type} #{parent['name']} (called by #{caller[0]})", MU::ERR, details: vpc_block diff --git a/modules/mu/mommacat/search.rb b/modules/mu/mommacat/search.rb index 0229e05c1..3fd40fab0 100644 --- a/modules/mu/mommacat/search.rb +++ b/modules/mu/mommacat/search.rb @@ -165,9 +165,15 @@ def findLitterMate(type: nil, name: nil, mu_name: nil, cloud_id: nil, created_on if !@kittens.has_key?(type) return nil if @original_config[type].nil? - loadObjects(false) + begin + loadObjects(false) + rescue ThreadError => e + if e.message !~ /deadlock/ + raise e + end + end if @object_load_fails or !@kittens[type] - MU.log "#{@deploy_id}'s original config has #{@original_config[type].size.to_s} #{type}, but loadObjects did not populate any into @kittens", MU::ERR, @deployment.keys + MU.log "#{@deploy_id}'s original config has #{@original_config[type].size.to_s} #{type}, but loadObjects could not populate any from deployment metadata", MU::ERR @object_load_fails = true return nil end From cec82d98acf3f8d14af46eaaff9f5eed9c9a620b Mon Sep 17 00:00:00 2001 From: ICRAS Mu Administrator Date: Tue, 28 Apr 2020 14:55:55 -0400 Subject: [PATCH 091/124] LoadBalancer: rules with no conditions are legal, at least in AWS, so allow them --- modules/mu/config/loadbalancer.rb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/mu/config/loadbalancer.rb b/modules/mu/config/loadbalancer.rb index ead393c1b..e4a8cc02d 100644 --- a/modules/mu/config/loadbalancer.rb +++ b/modules/mu/config/loadbalancer.rb @@ -309,14 +309,14 @@ def self.schema "items" => { "type" => "object", "description" => "Rules to route requests to different target groups based on the request path", - "required" => ["conditions", "order"], + "required" => ["order"], "additionalProperties" => false, "properties" => { "conditions" => { "type" => "array", "items" => { "type" => "object", - "description" => "Rule condition", + "description" => "Rule conditionl; if none are specified (or if none match) the default action will be performed.", "required" => ["field", "values"], "additionalProperties" => false, "properties" => { From 95b3003685ec67eecaba164b555252d4ef1c54a7 Mon Sep 17 00:00:00 2001 From: ICRAS Mu Administrator Date: Tue, 28 Apr 2020 20:35:03 -0400 Subject: [PATCH 092/124] fix references to MU::Cloud::AWS in CloudFormation layer --- modules/mu/providers/cloudformation/alarm.rb | 6 +++--- .../mu/providers/cloudformation/cache_cluster.rb | 6 +++--- modules/mu/providers/cloudformation/collection.rb | 6 +++--- modules/mu/providers/cloudformation/database.rb | 10 +++++----- modules/mu/providers/cloudformation/dnszone.rb | 6 +++--- .../mu/providers/cloudformation/firewall_rule.rb | 6 +++--- .../mu/providers/cloudformation/loadbalancer.rb | 6 +++--- modules/mu/providers/cloudformation/log.rb | 6 +++--- modules/mu/providers/cloudformation/server.rb | 14 +++++++------- modules/mu/providers/cloudformation/server_pool.rb | 10 +++++----- modules/mu/providers/cloudformation/vpc.rb | 6 +++--- 11 files changed, 41 insertions(+), 41 deletions(-) diff --git a/modules/mu/providers/cloudformation/alarm.rb b/modules/mu/providers/cloudformation/alarm.rb index 800f9ac79..40a61b5ab 100644 --- a/modules/mu/providers/cloudformation/alarm.rb +++ b/modules/mu/providers/cloudformation/alarm.rb @@ -129,7 +129,7 @@ def self.cleanup(*args) # @param config [MU::Config]: The calling MU::Config object # @return [Array]: List of required fields, and json-schema Hash of cloud-specific configuration parameters for this resource def self.schema(config) - MU::Cloud::AWS::Alarm.schema(config) + MU::Cloud.resourceClass("AWS", "Alarm").schema(config) end # Cloud-specific pre-processing of {MU::Config::BasketofKittens::servers}, bare and unvalidated. @@ -137,14 +137,14 @@ def self.schema(config) # @param configurator [MU::Config]: The overall deployment configurator of which this resource is a member # @return [Boolean]: True if validation succeeded, False otherwise def self.validateConfig(server, configurator) - MU::Cloud::AWS::Alarm.validateConfig(server, configurator) + MU::Cloud.resourceClass("AWS", "Alarm").validateConfig(server, configurator) end # Does this resource type exist as a global (cloud-wide) artifact, or # is it localized to a region/zone? # @return [Boolean] def self.isGlobal? - MU::Cloud::AWS::Alarm.isGlobal? + MU::Cloud.resourceClass("AWS", "Alarm").isGlobal? end diff --git a/modules/mu/providers/cloudformation/cache_cluster.rb b/modules/mu/providers/cloudformation/cache_cluster.rb index 4cd2ea632..436458b73 100644 --- a/modules/mu/providers/cloudformation/cache_cluster.rb +++ b/modules/mu/providers/cloudformation/cache_cluster.rb @@ -150,7 +150,7 @@ def self.cleanup(*args) # @param config [MU::Config]: The calling MU::Config object # @return [Array]: List of required fields, and json-schema Hash of cloud-specific configuration parameters for this resource def self.schema(config) - MU::Cloud::AWS::CacheCluster.schema(config) + MU::Cloud.resourceClass("AWS", "CacheCluster").schema(config) end # Cloud-specific pre-processing of {MU::Config::BasketofKittens::servers}, bare and unvalidated. @@ -158,14 +158,14 @@ def self.schema(config) # @param configurator [MU::Config]: The overall deployment configurator of which this resource is a member # @return [Boolean]: True if validation succeeded, False otherwise def self.validateConfig(server, configurator) - MU::Cloud::AWS::CacheCluster.validateConfig(server, configurator) + MU::Cloud.resourceClass("AWS", "CacheCluster").validateConfig(server, configurator) end # Does this resource type exist as a global (cloud-wide) artifact, or # is it localized to a region/zone? # @return [Boolean] def self.isGlobal? - MU::Cloud::AWS::CacheCluster.isGlobal? + MU::Cloud.resourceClass("AWS", "CacheCluster").isGlobal? end end diff --git a/modules/mu/providers/cloudformation/collection.rb b/modules/mu/providers/cloudformation/collection.rb index e1b3d58d4..99d8cdd09 100644 --- a/modules/mu/providers/cloudformation/collection.rb +++ b/modules/mu/providers/cloudformation/collection.rb @@ -100,7 +100,7 @@ def self.cleanup(*args) # @param config [MU::Config]: The calling MU::Config object # @return [Array]: List of required fields, and json-schema Hash of cloud-specific configuration parameters for this resource def self.schema(config) - MU::Cloud::AWS::Collection.schema(config) + MU::Cloud.resourceClass("AWS", "Collection").schema(config) end # Cloud-specific pre-processing of {MU::Config::BasketofKittens::servers}, bare and unvalidated. @@ -108,14 +108,14 @@ def self.schema(config) # @param configurator [MU::Config]: The overall deployment configurator of which this resource is a member # @return [Boolean]: True if validation succeeded, False otherwise def self.validateConfig(server, configurator) - MU::Cloud::AWS::Collection.validateConfig(server, configurator) + MU::Cloud.resourceClass("AWS", "Collection").validateConfig(server, configurator) end # Does this resource type exist as a global (cloud-wide) artifact, or # is it localized to a region/zone? # @return [Boolean] def self.isGlobal? - MU::Cloud::AWS::Collection.isGlobal? + MU::Cloud.resourceClass("AWS", "Collection").isGlobal? end end diff --git a/modules/mu/providers/cloudformation/database.rb b/modules/mu/providers/cloudformation/database.rb index 2daa2f844..7faf04689 100644 --- a/modules/mu/providers/cloudformation/database.rb +++ b/modules/mu/providers/cloudformation/database.rb @@ -64,8 +64,8 @@ def create basename = @config["name"].to_s basename = basename+@deploy.timestamp+MU.seed.downcase if !@config['scrub_mu_isms'] basename.gsub!(/[^a-z0-9]/i, "") - @config["db_name"] = MU::Cloud::AWS::Database.getName(basename, type: "dbname", config: @config) - @config['master_user'] = MU::Cloud::AWS::Database.getName(basename, type: "dbuser", config: @config) + @config["db_name"] = MU::Cloud.resourceClass("AWS", "Database").getName(basename, type: "dbname", config: @config) + @config['master_user'] = MU::Cloud.resourceClass("AWS", "Database").getName(basename, type: "dbuser", config: @config) if @config["create_cluster"] @cfm_name, @cfm_template = MU::Cloud::CloudFormation.cloudFormationBase("dbcluster", self, tags: @config['tags'], scrub_mu_isms: @config['scrub_mu_isms']) if @cfm_template.nil? @@ -249,7 +249,7 @@ def notify # @param config [MU::Config]: The calling MU::Config object # @return [Array]: List of required fields, and json-schema Hash of cloud-specific configuration parameters for this resource def self.schema(config) - MU::Cloud::AWS::Database.schema(config) + MU::Cloud.resourceClass("AWS", "Database").schema(config) end # Cloud-specific pre-processing of {MU::Config::BasketofKittens::servers}, bare and unvalidated. @@ -257,14 +257,14 @@ def self.schema(config) # @param configurator [MU::Config]: The overall deployment configurator of which this resource is a member # @return [Boolean]: True if validation succeeded, False otherwise def self.validateConfig(server, configurator) - MU::Cloud::AWS::Database.validateConfig(server, configurator) + MU::Cloud.resourceClass("AWS", "Database").validateConfig(server, configurator) end # Does this resource type exist as a global (cloud-wide) artifact, or # is it localized to a region/zone? # @return [Boolean] def self.isGlobal? - MU::Cloud::AWS::Database.isGlobal? + MU::Cloud.resourceClass("AWS", "Database").isGlobal? end diff --git a/modules/mu/providers/cloudformation/dnszone.rb b/modules/mu/providers/cloudformation/dnszone.rb index 15221b8fb..2f2ac70e4 100644 --- a/modules/mu/providers/cloudformation/dnszone.rb +++ b/modules/mu/providers/cloudformation/dnszone.rb @@ -257,7 +257,7 @@ def self.genericMuDNSEntry(*args) # @param config [MU::Config]: The calling MU::Config object # @return [Array]: List of required fields, and json-schema Hash of cloud-specific configuration parameters for this resource def self.schema(config) - MU::Cloud::AWS::DNSZone.schema(config) + MU::Cloud.resourceClass("AWS", "DNSZone").schema(config) end # Cloud-specific pre-processing of {MU::Config::BasketofKittens::servers}, bare and unvalidated. @@ -265,14 +265,14 @@ def self.schema(config) # @param configurator [MU::Config]: The overall deployment configurator of which this resource is a member # @return [Boolean]: True if validation succeeded, False otherwise def self.validateConfig(server, configurator) - MU::Cloud::AWS::DNSZone.validateConfig(server, configurator) + MU::Cloud.resourceClass("AWS", "DNSZone").validateConfig(server, configurator) end # Does this resource type exist as a global (cloud-wide) artifact, or # is it localized to a region/zone? # @return [Boolean] def self.isGlobal? - MU::Cloud::AWS::DNSZone.isGlobal? + MU::Cloud.resourceClass("AWS", "DNSZone").isGlobal? end end diff --git a/modules/mu/providers/cloudformation/firewall_rule.rb b/modules/mu/providers/cloudformation/firewall_rule.rb index 94ad98fc6..d9dca5080 100644 --- a/modules/mu/providers/cloudformation/firewall_rule.rb +++ b/modules/mu/providers/cloudformation/firewall_rule.rb @@ -137,7 +137,7 @@ def addRule(hosts, # @return [Boolean]: True if validation succeeded, False otherwise def self.validateConfig(acl, config) # Just use the AWS implemention - MU::Cloud::AWS::FirewallRule.validateConfig(acl, config) + MU::Cloud.resourceClass("AWS", "FirewallRule").validateConfig(acl, config) end private @@ -291,14 +291,14 @@ def self.cleanup(*args) # @param config [MU::Config]: The calling MU::Config object # @return [Array]: List of required fields, and json-schema Hash of cloud-specific configuration parameters for this resource def self.schema(config) - MU::Cloud::AWS::FirewallRule.schema(config) + MU::Cloud.resourceClass("AWS", "FirewallRule").schema(config) end # Does this resource type exist as a global (cloud-wide) artifact, or # is it localized to a region/zone? # @return [Boolean] def self.isGlobal? - MU::Cloud::AWS::FirewallRule.isGlobal? + MU::Cloud.resourceClass("AWS", "FirewallRule").isGlobal? end end #class diff --git a/modules/mu/providers/cloudformation/loadbalancer.rb b/modules/mu/providers/cloudformation/loadbalancer.rb index dd8274962..616eb624e 100644 --- a/modules/mu/providers/cloudformation/loadbalancer.rb +++ b/modules/mu/providers/cloudformation/loadbalancer.rb @@ -176,7 +176,7 @@ def self.cleanup(*args) # @param config [MU::Config]: The calling MU::Config object # @return [Array]: List of required fields, and json-schema Hash of cloud-specific configuration parameters for this resource def self.schema(config) - MU::Cloud::AWS::LoadBalancer.schema(config) + MU::Cloud.resourceClass("AWS", "LoadBalancer").schema(config) end # Cloud-specific pre-processing of {MU::Config::BasketofKittens::servers}, bare and unvalidated. @@ -184,14 +184,14 @@ def self.schema(config) # @param configurator [MU::Config]: The overall deployment configurator of which this resource is a member # @return [Boolean]: True if validation succeeded, False otherwise def self.validateConfig(server, configurator) - MU::Cloud::AWS::LoadBalancer.validateConfig(server, configurator) + MU::Cloud.resourceClass("AWS", "LoadBalancer").validateConfig(server, configurator) end # Does this resource type exist as a global (cloud-wide) artifact, or # is it localized to a region/zone? # @return [Boolean] def self.isGlobal? - MU::Cloud::AWS::LoadBalancer.isGlobal? + MU::Cloud.resourceClass("AWS", "LoadBalancer").isGlobal? end end diff --git a/modules/mu/providers/cloudformation/log.rb b/modules/mu/providers/cloudformation/log.rb index 04b09f4d9..ad8b6b9c6 100644 --- a/modules/mu/providers/cloudformation/log.rb +++ b/modules/mu/providers/cloudformation/log.rb @@ -153,7 +153,7 @@ def self.cleanup(*args) # @param config [MU::Config]: The calling MU::Config object # @return [Array]: List of required fields, and json-schema Hash of cloud-specific configuration parameters for this resource def self.schema(config) - MU::Cloud::AWS::Log.schema(config) + MU::Cloud.resourceClass("AWS", "Log").schema(config) end # Cloud-specific pre-processing of {MU::Config::BasketofKittens::servers}, bare and unvalidated. @@ -161,14 +161,14 @@ def self.schema(config) # @param configurator [MU::Config]: The overall deployment configurator of which this resource is a member # @return [Boolean]: True if validation succeeded, False otherwise def self.validateConfig(server, configurator) - MU::Cloud::AWS::Log.validateConfig(server, configurator) + MU::Cloud.resourceClass("AWS", "Log").validateConfig(server, configurator) end # Does this resource type exist as a global (cloud-wide) artifact, or # is it localized to a region/zone? # @return [Boolean] def self.isGlobal? - MU::Cloud::AWS::Log.isGlobal? + MU::Cloud.resourceClass("AWS", "Log").isGlobal? end end diff --git a/modules/mu/providers/cloudformation/server.rb b/modules/mu/providers/cloudformation/server.rb index 578c13ddc..b47859013 100644 --- a/modules/mu/providers/cloudformation/server.rb +++ b/modules/mu/providers/cloudformation/server.rb @@ -55,8 +55,8 @@ def initialize(mommacat: nil, kitten_cfg: nil, mu_name: nil, cloud_id: nil) scrub_mu_isms: @config['scrub_mu_isms'] ) - @disk_devices = MU::Cloud::AWS::Server.disk_devices - @ephemeral_mappings = MU::Cloud::AWS::Server.ephemeral_mappings + @disk_devices = MU::Cloud.resourceClass("AWS", "Server").disk_devices + @ephemeral_mappings = MU::Cloud.resourceClass("AWS", "Server").ephemeral_mappings if !mu_name.nil? @mu_name = mu_name @@ -190,7 +190,7 @@ def create cfm_volume_map = {} if @config["storage"] @config["storage"].each { |vol| - mapping, cfm_mapping = MU::Cloud::AWS::Server.convertBlockDeviceMapping(vol) + mapping, cfm_mapping = MU::Cloud.resourceClass("AWS", "Server").convertBlockDeviceMapping(vol) configured_storage << mapping # vol_name, vol_template = MU::Cloud::CloudFormation.cloudFormationBase("volume", name: "volume"+@cfm_name+mapping[:device_name]) # MU::Cloud::CloudFormation.setCloudFormationProp(vol_template[vol_name], "Size", mapping[:ebs][:volume_size].to_s) @@ -353,7 +353,7 @@ def self.imageTimeStamp(ami_id, credentials: nil, region: nil) # @param config [MU::Config]: The calling MU::Config object # @return [Array]: List of required fields, and json-schema Hash of cloud-specific configuration parameters for this resource def self.schema(config) - MU::Cloud::AWS::Server.schema(config) + MU::Cloud.resourceClass("AWS", "Server").schema(config) end # Confirm that the given instance size is valid for the given region. @@ -362,7 +362,7 @@ def self.schema(config) # @param region [String]: Region to check against # @return [String,nil] def self.validateInstanceType(size, region) - MU::Cloud::AWS::Server.validateInstanceType(size, region) + MU::Cloud.resourceClass("AWS", "Server").validateInstanceType(size, region) end # Cloud-specific pre-processing of {MU::Config::BasketofKittens::servers}, bare and unvalidated. @@ -370,14 +370,14 @@ def self.validateInstanceType(size, region) # @param configurator [MU::Config]: The overall deployment configurator of which this resource is a member # @return [Boolean]: True if validation succeeded, False otherwise def self.validateConfig(server, configurator) - MU::Cloud::AWS::Server.validateConfig(server, configurator) + MU::Cloud.resourceClass("AWS", "Server").validateConfig(server, configurator) end # Does this resource type exist as a global (cloud-wide) artifact, or # is it localized to a region/zone? # @return [Boolean] def self.isGlobal? - MU::Cloud::AWS::Server.isGlobal? + MU::Cloud.resourceClass("AWS", "Server").isGlobal? end end #class diff --git a/modules/mu/providers/cloudformation/server_pool.rb b/modules/mu/providers/cloudformation/server_pool.rb index a272b713a..50a058a87 100644 --- a/modules/mu/providers/cloudformation/server_pool.rb +++ b/modules/mu/providers/cloudformation/server_pool.rb @@ -129,13 +129,13 @@ def create if launch_desc["storage"] launch_desc["storage"].each { |vol| - mapping, cfm_mapping = MU::Cloud::AWS::Server.convertBlockDeviceMapping(vol) + mapping, cfm_mapping = MU::Cloud.resourceClass("AWS", "Server").convertBlockDeviceMapping(vol) if cfm_mapping.size > 0 MU::Cloud::CloudFormation.setCloudFormationProp(@cfm_template[@cfm_launch_name], "BlockDeviceMappings", cfm_mapping) end } end - MU::Cloud::AWS::Server.ephemeral_mappings.each { |mapping| + MU::Cloud.resourceClass("AWS", "Server.ephemeral_mappings").each { |mapping| MU::Cloud::CloudFormation.setCloudFormationProp(@cfm_template[@cfm_launch_name], "BlockDeviceMappings", { "DeviceName" => mapping[:device_name], "VirtualName" => mapping[:virtual_name] }) } @@ -263,7 +263,7 @@ def self.cleanup(*args) # @param config [MU::Config]: The calling MU::Config object # @return [Array]: List of required fields, and json-schema Hash of cloud-specific configuration parameters for this resource def self.schema(config) - MU::Cloud::AWS::ServerPool.schema(config) + MU::Cloud.resourceClass("AWS", "ServerPool").schema(config) end # Cloud-specific pre-processing of {MU::Config::BasketofKittens::servers}, bare and unvalidated. @@ -271,14 +271,14 @@ def self.schema(config) # @param configurator [MU::Config]: The overall deployment configurator of which this resource is a member # @return [Boolean]: True if validation succeeded, False otherwise def self.validateConfig(server, configurator) - MU::Cloud::AWS::ServerPool.validateConfig(server, configurator) + MU::Cloud.resourceClass("AWS", "ServerPool").validateConfig(server, configurator) end # Does this resource type exist as a global (cloud-wide) artifact, or # is it localized to a region/zone? # @return [Boolean] def self.isGlobal? - MU::Cloud::AWS::ServerPool.isGlobal? + MU::Cloud.resourceClass("AWS", "ServerPool").isGlobal? end end diff --git a/modules/mu/providers/cloudformation/vpc.rb b/modules/mu/providers/cloudformation/vpc.rb index 3b9e820c4..620502b6b 100644 --- a/modules/mu/providers/cloudformation/vpc.rb +++ b/modules/mu/providers/cloudformation/vpc.rb @@ -301,7 +301,7 @@ def self.cleanup(**args) # @param config [MU::Config]: The calling MU::Config object # @return [Array]: List of required fields, and json-schema Hash of cloud-specific configuration parameters for this resource def self.schema(config) - MU::Cloud::AWS::VPC.schema(config) + MU::Cloud.resourceClass("AWS", "VPC").schema(config) end # Cloud-specific pre-processing of {MU::Config::BasketofKittens::servers}, bare and unvalidated. @@ -309,14 +309,14 @@ def self.schema(config) # @param configurator [MU::Config]: The overall deployment configurator of which this resource is a member # @return [Boolean]: True if validation succeeded, False otherwise def self.validateConfig(server, configurator) - MU::Cloud::AWS::VPC.validateConfig(server, configurator) + MU::Cloud.resourceClass("AWS", "VPC").validateConfig(server, configurator) end # Does this resource type exist as a global (cloud-wide) artifact, or # is it localized to a region/zone? # @return [Boolean] def self.isGlobal? - MU::Cloud::AWS::VPC.isGlobal? + MU::Cloud.resourceClass("AWS", "VPC").isGlobal? end end #class From d47d517c04296ea8d80fda830650de6cec19b23e Mon Sep 17 00:00:00 2001 From: ICRAS Mu Administrator Date: Wed, 29 Apr 2020 00:35:59 -0400 Subject: [PATCH 093/124] MommaCat & Config: eliminate some circumstances where recursive calls to findLitterMate break stuff --- modules/mu/config/ref.rb | 2 +- modules/mu/mommacat/search.rb | 4 ++-- modules/mu/mommacat/storage.rb | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/modules/mu/config/ref.rb b/modules/mu/config/ref.rb index 05ab6b609..977e7e5db 100644 --- a/modules/mu/config/ref.rb +++ b/modules/mu/config/ref.rb @@ -270,7 +270,7 @@ def kitten(mommacat = @mommacat, shallow: false, debug: false) return @obj end - if mommacat + if mommacat and !caller.grep(/`findLitterMate'/) # XXX the dumbest MU.log "Looking for #{@type} #{@name} #{@id} in deploy #{mommacat.deploy_id}", loglevel @obj = mommacat.findLitterMate(type: @type, name: @name, cloud_id: @id, credentials: @credentials, debug: debug) if @obj # initialize missing attributes, if we can diff --git a/modules/mu/mommacat/search.rb b/modules/mu/mommacat/search.rb index 3fd40fab0..711db29cb 100644 --- a/modules/mu/mommacat/search.rb +++ b/modules/mu/mommacat/search.rb @@ -164,7 +164,7 @@ def findLitterMate(type: nil, name: nil, mu_name: nil, cloud_id: nil, created_on @kitten_semaphore.synchronize { if !@kittens.has_key?(type) - return nil if @original_config[type].nil? + return nil if !@original_config or @original_config[type].nil? begin loadObjects(false) rescue ThreadError => e @@ -173,7 +173,7 @@ def findLitterMate(type: nil, name: nil, mu_name: nil, cloud_id: nil, created_on end end if @object_load_fails or !@kittens[type] - MU.log "#{@deploy_id}'s original config has #{@original_config[type].size.to_s} #{type}, but loadObjects could not populate any from deployment metadata", MU::ERR + MU.log "#{@deploy_id}'s original config has #{@original_config[type].size.to_s} #{type}, but loadObjects could not populate any from deployment metadata", MU::ERR if !@object_load_fails @object_load_fails = true return nil end diff --git a/modules/mu/mommacat/storage.rb b/modules/mu/mommacat/storage.rb index b13ef004a..9f64f177f 100644 --- a/modules/mu/mommacat/storage.rb +++ b/modules/mu/mommacat/storage.rb @@ -564,8 +564,8 @@ def loadObjects(delay_descriptor_load) next end - if orig_cfg['vpc'] and orig_cfg['vpc'].is_a?(Hash) - ref = if orig_cfg['vpc']['id'] and orig_cfg['vpc']['id'].is_a?(Hash) + if orig_cfg['vpc'] + ref = if orig_cfg['vpc']['id'] orig_cfg['vpc']['id']['mommacat'] = self MU::Config::Ref.get(orig_cfg['vpc']['id']) else From b8bf182900ae5003528bdc80ddb81770f0550d22 Mon Sep 17 00:00:00 2001 From: ICRAS Mu Administrator Date: Wed, 29 Apr 2020 13:13:41 -0400 Subject: [PATCH 094/124] AWS::LoadBalancer: quick and dirty support for redirects as default actions or as rule actions --- modules/mu/cloud/resource_base.rb | 7 ++- modules/mu/config/loadbalancer.rb | 55 +++++++++++++++++++++--- modules/mu/providers/aws/loadbalancer.rb | 47 +++++++++++++++----- modules/mu/providers/aws/server.rb | 12 +++++- 4 files changed, 103 insertions(+), 18 deletions(-) diff --git a/modules/mu/cloud/resource_base.rb b/modules/mu/cloud/resource_base.rb index 492c58fb6..5919359f8 100644 --- a/modules/mu/cloud/resource_base.rb +++ b/modules/mu/cloud/resource_base.rb @@ -111,7 +111,12 @@ def initialize(**args) raise MuError, "Unknown error instantiating #{self}" if @cloudobj.nil? # These should actually call the method live instead of caching a static value PUBLIC_ATTRS.each { |a| - instance_variable_set(("@"+a.to_s).to_sym, @cloudobj.send(a)) + begin + instance_variable_set(("@"+a.to_s).to_sym, @cloudobj.send(a)) + rescue NoMethodError => e + MU.log "#{@cloudclass.name} failed to implement method '#{a}'", MU::ERR, details: e.message + raise e + end } @deploy ||= args[:mommacat] @deploy_id ||= @deploy.deploy_id if @deploy diff --git a/modules/mu/config/loadbalancer.rb b/modules/mu/config/loadbalancer.rb index e4a8cc02d..4f96b509b 100644 --- a/modules/mu/config/loadbalancer.rb +++ b/modules/mu/config/loadbalancer.rb @@ -64,6 +64,45 @@ def self.healthcheck } end + # Generate schema for a LoadBalancer redirect + # @return [Hash] + def self.redirect + { + "type" => "object", + "title" => "redirect", + "additionalProperties" => false, + "description" => "Instruct our LoadBalancer to redirect traffic to another host, port, and/or path.", + "properties" => { + "protocol" => { + "type" => "string", + "default" => "HTTPS" + }, + "port" => { + "type" => "integer", + "default" => 443 + }, + "host" => { + "type" => "string", + "default" => "\#{host}" + }, + "path" => { + "type" => "string", + "default" => "/\#{path}" + }, + "query" => { + "type" => "string", + "default" => "\#{query}" + }, + "status_code" => { + "type" => "integer", + "description" => "The HTTP status code when issuing a redirect", + "default" => 301, + "enum" => [301, 302] + }, + } + } + end + # Base configuration schema for a LoadBalancer # @return [Hash] def self.schema @@ -279,6 +318,7 @@ def self.schema "enum" => ["HTTP", "HTTPS", "TCP", "SSL", "UDP"], "description" => "Specifies the load balancer transport protocol to use for routing - HTTP, HTTPS, TCP, SSL, or UDP. SSL and UDP are only valid in Google Cloud." }, + "redirect" => MU::Config::LoadBalancer.redirect, "targetgroup" => { "type" => "string", "description" => "Which of our declared targetgroups should be the back-end for this listener's traffic" @@ -309,14 +349,14 @@ def self.schema "items" => { "type" => "object", "description" => "Rules to route requests to different target groups based on the request path", - "required" => ["order"], + "required" => ["order", "conditions"], "additionalProperties" => false, "properties" => { "conditions" => { "type" => "array", "items" => { "type" => "object", - "description" => "Rule conditionl; if none are specified (or if none match) the default action will be performed.", + "description" => "Rule conditionl; if none are specified (or if none match) the default action will be set.", "required" => ["field", "values"], "additionalProperties" => false, "properties" => { @@ -339,19 +379,24 @@ def self.schema "type" => "array", "items" => { "type" => "object", - "description" => "Rule action", - "required" => ["action", "targetgroup"], + "description" => "Rule action, which must specify one of +targetgroup+ or +redirect+", + "required" => ["action"], "additionalProperties" => false, "properties" => { "action" => { "type" => "string", "default" => "forward", "description" => "An action to take when a match occurs. Currently, only forwarding to a targetgroup is supported.", - "enum" => ["forward"] + "enum" => ["forward", "redirect"] }, + "redirect" => MU::Config::LoadBalancer.redirect, "targetgroup" => { "type" => "string", "description" => "Which of our declared targetgroups should be the recipient of this traffic. If left unspecified, will default to the default targetgroup of this listener." + }, + "redirect" => { + "type" => "string", + "description" => "Which of our declared targetgroups should be the recipient of this traffic. If left unspecified, will default to the default targetgroup of this listener." } } } diff --git a/modules/mu/providers/aws/loadbalancer.rb b/modules/mu/providers/aws/loadbalancer.rb index 494cb91c5..584e69ac9 100644 --- a/modules/mu/providers/aws/loadbalancer.rb +++ b/modules/mu/providers/aws/loadbalancer.rb @@ -239,16 +239,35 @@ def create end end + redirect_block = Proc.new { |r| + { + :protocol => r['protocol'], + :port => r['port'].to_s, + :host => r['host'], + :path => r['path'], + :query => r['query'], + :status_code => "HTTP_"+r['status_code'].to_s + } + } + if !@config['classic'] @config["listeners"].each { |l| - if !@targetgroups.has_key?(l['targetgroup']) - raise MuError, "Listener in #{@mu_name} configured for target group #{l['targetgroup']}, but I don't have data on a targetgroup by that name" - end - listen_descriptor = { - :default_actions => [{ + action = if l['redirect'] + { + :type => "redirect", + :redirect_config => redirect_block.call(l['redirect']) + } + else + if !@targetgroups.has_key?(l['targetgroup']) + raise MuError, "Listener in #{@mu_name} configured for target group #{l['targetgroup']}, but I don't have data on a targetgroup by that name" + end + { :target_group_arn => @targetgroups[l['targetgroup']].target_group_arn, :type => "forward" - }], + } + end + listen_descriptor = { + :default_actions => [ action ], :load_balancer_arn => lb.load_balancer_arn, :port => l['lb_port'], :protocol => l['lb_protocol'] @@ -276,10 +295,17 @@ def create :actions => [] } rule['actions'].each { |a| - rule_descriptor[:actions] << { - :target_group_arn => @targetgroups[a['targetgroup']].target_group_arn, - :type => a['action'] - } + rule_descriptor[:actions] << if a['action'] == "forward" + { + :target_group_arn => @targetgroups[a['targetgroup']].target_group_arn, + :type => a['action'] + } + elsif a['action'] == "redirect" + { + :redirect_config => redirect_block.call(rule['redirect']), + :type => a['action'] + } + end } MU::Cloud::AWS.elb2(region: @config['region'], credentials: @config['credentials']).create_rule(rule_descriptor) } @@ -904,6 +930,7 @@ def self.find(**args) return matches end + end end end diff --git a/modules/mu/providers/aws/server.rb b/modules/mu/providers/aws/server.rb index 7bc3db36d..5b36707e7 100644 --- a/modules/mu/providers/aws/server.rb +++ b/modules/mu/providers/aws/server.rb @@ -1555,7 +1555,11 @@ def self.terminateInstance(instance: nil, noop: false, id: nil, onlycloud: false return if !instance id ||= instance.instance_id - MU::MommaCat.lock(".cleanup-"+id) + begin + MU::MommaCat.lock(".cleanup-"+id) + rescue Errno::ENOENT => e + MU.log "No lock for terminating instance #{id} due to missing metadata", MU::DEBUG + end ips, names = getAddresses(instance, region: region, credentials: credentials) targets = ips +names @@ -1610,7 +1614,11 @@ def self.terminateInstance(instance: nil, noop: false, id: nil, onlycloud: false end MU.log "#{instance.instance_id}#{server_obj ? " ("+server_obj.mu_name+")" : ""} terminated" if !noop - MU::MommaCat.unlock(".cleanup-"+id) + begin + MU::MommaCat.unlock(".cleanup-"+id) + rescue Errno::ENOENT => e + MU.log "No lock for terminating instance #{id} due to missing metadata", MU::DEBUG + end end From 87b0d1e5be68b0c62fd09748457158e491416a57 Mon Sep 17 00:00:00 2001 From: ICRAS Mu Administrator Date: Wed, 29 Apr 2020 15:43:26 -0400 Subject: [PATCH 095/124] AWS::StoragePool: fix bug in notify; Config: dependency management effluvia --- modules/mu/config.rb | 8 ++-- modules/mu/mommacat.rb | 2 +- modules/mu/mommacat/storage.rb | 2 +- modules/mu/providers/aws/storage_pool.rb | 55 ++++++++---------------- 4 files changed, 26 insertions(+), 41 deletions(-) diff --git a/modules/mu/config.rb b/modules/mu/config.rb index a7ca830e4..d8b99743b 100644 --- a/modules/mu/config.rb +++ b/modules/mu/config.rb @@ -619,7 +619,8 @@ def insertKitten(descriptor, type, delay_validation = false, ignore_duplicates: if !descriptor["vpc"]["name"].nil? and haveLitterMate?(descriptor["vpc"]["name"], "vpcs") and descriptor["vpc"]['deploy_id'].nil? and - descriptor["vpc"]['id'].nil? + descriptor["vpc"]['id'].nil? and + !(cfg_name == "vpc" and descriptor['name'] == descriptor['vpc']['name']) MU::Config.addDependency(descriptor, descriptor['vpc']['name'], "vpc") siblingvpc = haveLitterMate?(descriptor["vpc"]["name"], "vpcs") @@ -745,7 +746,7 @@ def insertKitten(descriptor, type, delay_validation = false, ignore_duplicates: next if !acl_include["name"] and !acl_include["rule_name"] acl_include["name"] ||= acl_include["rule_name"] if haveLitterMate?(acl_include["name"], "firewall_rules") - MU::Config.addDependency(descriptor, acl_include["name"], "firewall_rule") + MU::Config.addDependency(descriptor, acl_include["name"], "firewall_rule", no_create_wait: (cfg_name == "vpc")) elsif acl_include["name"] MU.log shortclass.to_s+" #{descriptor['name']} depends on FirewallRule #{acl_include["name"]}, but no such rule declared.", MU::ERR ok = false @@ -884,6 +885,7 @@ def check_dependencies @config.each_pair { |type, values| next if !values.instance_of?(Array) _shortclass, cfg_name, _cfg_plural, _classname = MU::Cloud.getResourceNames(type, false) + next if !cfg_name values.each { |resource| next if !resource.kind_of?(Hash) or resource["dependencies"].nil? addme = [] @@ -942,7 +944,7 @@ def check_dependencies next if sib_dep['type'] != cfg_name or sib_dep['no_create_wait'] cousin = haveLitterMate?(sib_dep['name'], sib_dep['type']) if cousin and cousin['name'] == resource['name'] - MU.log "Circular dependency between #{type} #{resource['name']} <=> #{dependency['name']}", MU::ERR, details: [ resource['name'] => dependency, sibling['name'] => sib_dep ] + MU.log "Circular dependency between #{type} #{resource['name']} <=> #{dependency['type']} #{dependency['name']}", MU::ERR, details: [ resource['name'] => dependency, sibling['name'] => sib_dep ] ok = false end } diff --git a/modules/mu/mommacat.rb b/modules/mu/mommacat.rb index 5bffd68a5..66ce5fd51 100644 --- a/modules/mu/mommacat.rb +++ b/modules/mu/mommacat.rb @@ -783,7 +783,7 @@ def syncLitter(nodeclasses = [], triggering_node: nil, save_only: false) end siblings = findLitterMate(type: "server", return_all: true) - return if siblings.nil? or siblings.empty? + return if siblings.nil? or (siblings.respond_to?(:empty?) and siblings.empty?) update_servers = [] siblings.each_pair { |mu_name, node| diff --git a/modules/mu/mommacat/storage.rb b/modules/mu/mommacat/storage.rb index 9f64f177f..cc51e09c8 100644 --- a/modules/mu/mommacat/storage.rb +++ b/modules/mu/mommacat/storage.rb @@ -124,7 +124,7 @@ def updateBasketofKittens(new_conf, skip_validation: false, new_metadata: nil, s next if !@deployment.has_key?(attrs[:cfg_plural]) deletia = [] @deployment[attrs[:cfg_plural]].each_pair { |res_name, data| - orig_cfg = findResourceConfig(attrs[:cfg_plural], res_name, scrub_with) + orig_cfg = findResourceConfig(attrs[:cfg_plural], res_name, (scrub_with || @original_config)) if orig_cfg.nil? MU.log "#{res_type} #{res_name} no longer configured, will remove deployment metadata", MU::NOTICE diff --git a/modules/mu/providers/aws/storage_pool.rb b/modules/mu/providers/aws/storage_pool.rb index ce58b6f70..5e33029a3 100644 --- a/modules/mu/providers/aws/storage_pool.rb +++ b/modules/mu/providers/aws/storage_pool.rb @@ -67,7 +67,7 @@ def create if target['vpc']["subnet_name"] subnet_obj = vpc.getSubnet(name: target['vpc']["subnet_name"]) if subnet_obj.nil? - raise MuError, "Failed to locate subnet from #{subnet} in StoragePool #{@config['name']}:#{target['name']}" + raise MuError, "Failed to locate subnet from #{target['vpc']["subnet_name"]} in StoragePool #{@config['name']}:#{target['name']}" end target['vpc']['subnet_id'] = subnet_obj.cloud_id end @@ -261,49 +261,29 @@ def notify targets = {} if @config['mount_points'] && !@config['mount_points'].empty? + mount_targets = MU::Cloud::AWS.efs(region: @config['region'], credentials: @config['credentials']).describe_mount_targets( + file_system_id: storage_pool.file_system_id + ).mount_targets + @config['mount_points'].each { |mp| subnet = nil dependencies - mp_vpc = if mp['vpc'] and mp['vpc']['vpc_name'] - @deploy.findLitterMate(type: "vpc", name: mp['vpc']['vpc_name'], credentials: @config['credentials']) - elsif mp['vpc'] - MU::MommaCat.findStray( - @config['cloud'], - "vpcs", - deploy_id: mp['vpc']["deploy_id"], - credentials: @config['credentials'], - mu_name: mp['vpc']["mu_name"], - cloud_id: mp['vpc']['vpc_id'], - region: @config['region'], - dummy_ok: false - ).first -# XXX non-sibling, findStray version - end + mp_vpc = MU::Config::Ref.get(mp['vpc']).kitten - mount_targets = MU::Cloud::AWS.efs(region: @config['region'], credentials: @config['credentials']).describe_mount_targets( - file_system_id: storage_pool.file_system_id - ).mount_targets -# subnet_obj = mp_vpc.subnets.select { |s| -# s.name == mp["vpc"]["subnet_name"] or s.cloud_id == mp["vpc"]["subnet_id"] -# }.first + subnet_obj = mp_vpc.subnets.select { |s| + s.name == mp["vpc"]["subnet_name"] or s.cloud_id == mp["vpc"]["subnet_id"] + }.first mount_target = nil - mp_vpc.subnets.each { |subnet_obj| - mount_targets.map { |t| - subnet_cidr_obj = NetAddr::IPv4Net.parse(subnet_obj.ip_block) - if subnet_cidr_obj.contains(NetAddr::IPv4.parse(t.ip_address)) - mount_target = t - subnet = subnet_obj.cloud_desc - end - } - break if mount_target + mount_targets.each { |t| + subnet_cidr_obj = NetAddr::IPv4Net.parse(subnet_obj.ip_block) + if subnet_cidr_obj.contains(NetAddr::IPv4.parse(t.ip_address)) + mount_target = t + subnet = subnet_obj.cloud_desc + break + end } - # mount_target = MU::Cloud::AWS.efs(region: @config['region'], credentials: @config['credentials']).describe_mount_targets( - # mount_target_id: mp["cloud_id"] - # ).mount_targets.first - - targets[mp["name"]] = { "owner_id" => mount_target.owner_id, "cloud_id" => mount_target.mount_target_id, @@ -493,6 +473,9 @@ def self.validateConfig(pool, configurator) if pool['mount_points'] && !pool['mount_points'].empty? pool['mount_points'].each{ |mp| + if mp['vpc'] and mp['vpc']['name'] + MU::Config.addDependency(pool, mp['vpc']['name'], "vpc") + end if mp['ingress_rules'] fwname = "storage-#{mp['name']}" acl = { From d41e81124ad28ce113dc895f800aa271090b5212 Mon Sep 17 00:00:00 2001 From: ICRAS Mu Administrator Date: Thu, 30 Apr 2020 00:46:45 -0400 Subject: [PATCH 096/124] MommaCat: more resource lookup fixlets --- modules/Gemfile.lock | 4 ++-- modules/mu/cloud/resource_base.rb | 2 +- modules/mu/mommacat/search.rb | 3 ++- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/modules/Gemfile.lock b/modules/Gemfile.lock index c5dd6f59d..03f18138b 100644 --- a/modules/Gemfile.lock +++ b/modules/Gemfile.lock @@ -54,7 +54,7 @@ GEM public_suffix (>= 2.0.2, < 4.0) ast (2.4.0) aws-eventstream (1.1.0) - aws-sdk-core (2.11.494) + aws-sdk-core (2.11.495) aws-sigv4 (~> 1.0) jmespath (~> 1.0) aws-sigv4 (1.1.2) @@ -475,7 +475,7 @@ GEM azure_service_fabric (~> 0.18.0) azure_service_fabric (0.18.0) ms_rest_azure (~> 0.11.1) - berkshelf (7.0.9) + berkshelf (7.0.10) chef (>= 13.6.52) chef-config cleanroom (~> 1.0) diff --git a/modules/mu/cloud/resource_base.rb b/modules/mu/cloud/resource_base.rb index 5919359f8..9b1987e5a 100644 --- a/modules/mu/cloud/resource_base.rb +++ b/modules/mu/cloud/resource_base.rb @@ -545,7 +545,7 @@ def dependencies(use_cache: false, debug: false) sib_by_name = @deploy.findLitterMate(name: @config['vpc']['name'], type: "vpcs", return_all: true, habitat: @config['vpc']['project'], debug: debug) if sib_by_name.is_a?(Hash) if sib_by_name.size == 1 - @vpc = matches.first + @vpc = sib_by_name.values.first MU.log "Single VPC match for #{self}", loglevel, details: @vpc.to_s else # XXX ok but this is the wrong place for this really the config parser needs to sort this out somehow diff --git a/modules/mu/mommacat/search.rb b/modules/mu/mommacat/search.rb index 711db29cb..ab394782a 100644 --- a/modules/mu/mommacat/search.rb +++ b/modules/mu/mommacat/search.rb @@ -210,9 +210,10 @@ def findLitterMate(type: nil, name: nil, mu_name: nil, cloud_id: nil, created_on } } + return matches if return_all and matches.size >= 1 + return matches.values.first if matches.size == 1 - return matches if return_all and matches.size > 1 } return nil From f97ca141eb60c1904f82d84396a3015050e82fbe Mon Sep 17 00:00:00 2001 From: John Stange Date: Thu, 30 Apr 2020 02:44:46 -0400 Subject: [PATCH 097/124] VPC lookup micro-bug, redundant LoadBalancer schema key --- modules/Gemfile.lock | 22 +++++++++++----------- modules/mu/config/loadbalancer.rb | 4 ---- modules/mu/mommacat/storage.rb | 2 +- 3 files changed, 12 insertions(+), 16 deletions(-) diff --git a/modules/Gemfile.lock b/modules/Gemfile.lock index 03f18138b..f4be049e6 100644 --- a/modules/Gemfile.lock +++ b/modules/Gemfile.lock @@ -54,10 +54,10 @@ GEM public_suffix (>= 2.0.2, < 4.0) ast (2.4.0) aws-eventstream (1.1.0) - aws-sdk-core (2.11.495) + aws-sdk-core (2.11.497) aws-sigv4 (~> 1.0) jmespath (~> 1.0) - aws-sigv4 (1.1.2) + aws-sigv4 (1.1.3) aws-eventstream (~> 1.0, >= 1.0.2) azure-core (0.1.15) faraday (~> 0.9) @@ -156,7 +156,7 @@ GEM ms_rest_azure (~> 0.11.1) azure_mgmt_commerce (0.17.1) ms_rest_azure (~> 0.11.0) - azure_mgmt_compute (0.19.1) + azure_mgmt_compute (0.19.2) ms_rest_azure (~> 0.11.1) azure_mgmt_consumption (0.18.0) ms_rest_azure (~> 0.11.1) @@ -314,9 +314,9 @@ GEM ms_rest_azure (~> 0.11.1) azure_mgmt_stor_simple8000_series (0.17.2) ms_rest_azure (~> 0.11.0) - azure_mgmt_storage (0.20.1) + azure_mgmt_storage (0.21.0) ms_rest_azure (~> 0.11.1) - azure_mgmt_storagecache (0.17.1) + azure_mgmt_storagecache (0.18.0) ms_rest_azure (~> 0.11.1) azure_mgmt_storagesync (0.18.0) ms_rest_azure (~> 0.11.1) @@ -336,7 +336,7 @@ GEM ms_rest_azure (~> 0.11.1) azure_mgmt_web (0.17.5) ms_rest_azure (~> 0.11.1) - azure_sdk (0.55.0) + azure_sdk (0.56.0) azure-storage (~> 0.14.0.preview) azure_cognitiveservices_anomalydetector (~> 0.17.0) azure_cognitiveservices_autosuggest (~> 0.17.1) @@ -382,7 +382,7 @@ GEM azure_mgmt_cdn (~> 0.17.3) azure_mgmt_cognitive_services (~> 0.19.1) azure_mgmt_commerce (~> 0.17.1) - azure_mgmt_compute (~> 0.19.1) + azure_mgmt_compute (~> 0.19.2) azure_mgmt_consumption (~> 0.18.0) azure_mgmt_container_instance (~> 0.17.4) azure_mgmt_container_registry (~> 0.18.3) @@ -461,8 +461,8 @@ GEM azure_mgmt_sql (~> 0.19.0) azure_mgmt_sqlvirtualmachine (~> 0.18.1) azure_mgmt_stor_simple8000_series (~> 0.17.2) - azure_mgmt_storage (~> 0.20.1) - azure_mgmt_storagecache (~> 0.17.1) + azure_mgmt_storage (~> 0.21.0) + azure_mgmt_storagecache (~> 0.18.0) azure_mgmt_storagesync (~> 0.18.0) azure_mgmt_stream_analytics (~> 0.17.2) azure_mgmt_subscriptions (~> 0.18.2) @@ -714,7 +714,7 @@ GEM os (1.1.0) paint (1.0.1) parallel (1.19.1) - parser (2.7.1.1) + parser (2.7.1.2) ast (~> 2.4.0) plist (3.5.0) polyglot (0.3.5) @@ -789,7 +789,7 @@ GEM solve (4.0.3) molinillo (~> 0.6) semverse (>= 1.1, < 4.0) - specinfra (2.82.15) + specinfra (2.82.16) net-scp net-ssh (>= 2.7) net-telnet (= 0.1.1) diff --git a/modules/mu/config/loadbalancer.rb b/modules/mu/config/loadbalancer.rb index 4f96b509b..0d1ff22ef 100644 --- a/modules/mu/config/loadbalancer.rb +++ b/modules/mu/config/loadbalancer.rb @@ -393,10 +393,6 @@ def self.schema "targetgroup" => { "type" => "string", "description" => "Which of our declared targetgroups should be the recipient of this traffic. If left unspecified, will default to the default targetgroup of this listener." - }, - "redirect" => { - "type" => "string", - "description" => "Which of our declared targetgroups should be the recipient of this traffic. If left unspecified, will default to the default targetgroup of this listener." } } } diff --git a/modules/mu/mommacat/storage.rb b/modules/mu/mommacat/storage.rb index cc51e09c8..c2f344e05 100644 --- a/modules/mu/mommacat/storage.rb +++ b/modules/mu/mommacat/storage.rb @@ -565,7 +565,7 @@ def loadObjects(delay_descriptor_load) end if orig_cfg['vpc'] - ref = if orig_cfg['vpc']['id'] + ref = if orig_cfg['vpc']['id'] and orig_cfg['vpc']['id'].is_a?(Hash) orig_cfg['vpc']['id']['mommacat'] = self MU::Config::Ref.get(orig_cfg['vpc']['id']) else From f1ce5afb2cf838852915b81a3d4c395f41984f14 Mon Sep 17 00:00:00 2001 From: John Stange Date: Thu, 30 Apr 2020 21:29:42 -0400 Subject: [PATCH 098/124] AWS: lunatic writes dynamic pagination for API response calls, film at 11 --- modules/mu/providers/aws.rb | 78 +++++++++++++++++++++++++++++++++---- 1 file changed, 71 insertions(+), 7 deletions(-) diff --git a/modules/mu/providers/aws.rb b/modules/mu/providers/aws.rb index 4bba05dd8..a68033275 100644 --- a/modules/mu/providers/aws.rb +++ b/modules/mu/providers/aws.rb @@ -1454,19 +1454,83 @@ def initialize(region: nil, api: "EC2", credentials: nil) # Catch-all for AWS client methods. Essentially a pass-through with some # rescues for known silly endpoint behavior. def method_missing(method_sym, *arguments) + # make sure error symbols are loaded for our exception handling later require "aws-sdk-core" + require "aws-sdk-core/rds" + require "aws-sdk-core/ec2" + require "aws-sdk-core/route53" + require "aws-sdk-core/iam" + require "aws-sdk-core/efs" + require "aws-sdk-core/pricing" + require "aws-sdk-core/apigateway" + require "aws-sdk-core/ecs" + require "aws-sdk-core/eks" + require "aws-sdk-core/cloudwatchlogs" + require "aws-sdk-core/elasticloadbalancing" + require "aws-sdk-core/elasticloadbalancingv2" + require "aws-sdk-core/autoscaling" + require "aws-sdk-core/client_waiters" + require "aws-sdk-core/waiters/errors" retries = 0 begin MU.log "Calling #{method_sym} in #{@region}", MU::DEBUG, details: arguments - retval = nil - if !arguments.nil? and arguments.size == 1 - retval = @api.method(method_sym).call(arguments[0]) - elsif !arguments.nil? and arguments.size > 0 - retval = @api.method(method_sym).call(*arguments) - else - retval = @api.method(method_sym).call + + retval = if !arguments.nil? and arguments.size == 1 + @api.method(method_sym).call(arguments[0]) + elsif !arguments.nil? and arguments.size > 0 + @api.method(method_sym).call(*arguments) + else + @api.method(method_sym).call + end + + if !retval.nil? + begin + page_markers = [:marker, :next_token] + paginator = nil + new_page = nil + [:next_token, :marker].each { |m| + if !retval.nil? and retval.respond_to?(m) + paginator = m + new_page = retval.send(paginator) + break + end + } + + if paginator and new_page and !new_page.empty? + resp = retval.respond_to?(:__getobj__) ? retval.__getobj__ : retval + concat_to = resp.class.instance_methods(false).reject { |m| + m.to_s.match(/=$/) or m == paginator or resp.send(m).nil? or !resp.send(m).is_a?(Array) + } + if concat_to.size != 1 + MU.log "Tried to figure out where I might append paginated results for a #{resp.class.name}, but failed", MU::DEBUG, details: concat_to + else + concat_to = concat_to.first + new_args = arguments ? arguments.dup : [{}] + begin + if new_args.is_a?(Array) and new_args.size == 1 and new_args.first.is_a?(Hash) + new_args[0][paginator] = new_page + elsif new_args.is_a?(Hash) + new_args[paginator] = new_page + end + + resp = if !arguments.nil? and arguments.size == 1 + @api.method(method_sym).call(new_args[0]) + elsif !arguments.nil? and arguments.size > 0 + @api.method(method_sym).call(*new_args) + end + resp = resp.__getobj__ if resp.respond_to?(:__getobj__) + retval.send(concat_to).concat(resp.send(concat_to)) + new_page = resp.send(paginator) if !resp.nil? + end while !resp.nil? and new_page.nil? and new_page.empty? + end + end + rescue StandardError => e + MU.log "Made a good-faith effort to auto-paginate API call to #{method_sym} and failed with #{e.message}", MU::DEBUG, details: arguments + raise e + end end + return retval rescue Aws::RDS::Errors::Throttling, Aws::EC2::Errors::InternalError, Aws::EC2::Errors::RequestLimitExceeded, Aws::EC2::Errors::Unavailable, Aws::Route53::Errors::Throttling, Aws::ElasticLoadBalancing::Errors::HttpFailureException, Aws::EC2::Errors::Http503Error, Aws::AutoScaling::Errors::Http503Error, Aws::AutoScaling::Errors::InternalFailure, Aws::AutoScaling::Errors::ServiceUnavailable, Aws::Route53::Errors::ServiceUnavailable, Aws::ElasticLoadBalancing::Errors::Throttling, Aws::RDS::Errors::ClientUnavailable, Aws::Waiters::Errors::UnexpectedError, Aws::ElasticLoadBalancing::Errors::ServiceUnavailable, Aws::ElasticLoadBalancingV2::Errors::Throttling, Seahorse::Client::NetworkingError, Aws::IAM::Errors::Throttling, Aws::EFS::Errors::ThrottlingException, Aws::Pricing::Errors::ThrottlingException, Aws::APIGateway::Errors::TooManyRequestsException, Aws::ECS::Errors::ThrottlingException, Net::ReadTimeout, Faraday::TimeoutError, Aws::CloudWatchLogs::Errors::ThrottlingException => e if e.class.name == "Seahorse::Client::NetworkingError" and e.message.match(/Name or service not known/) From ebae1dcb3ef6c2f0a63f14e1a2e964cd1cce215a Mon Sep 17 00:00:00 2001 From: John Stange Date: Fri, 1 May 2020 03:33:34 -0400 Subject: [PATCH 099/124] AWS::Database: most of the important parts of #toKitten are done --- modules/mu/providers/aws/database.rb | 79 +++++++++++++++++++++++----- modules/mu/providers/aws/vpc.rb | 30 +++++++++++ 2 files changed, 97 insertions(+), 12 deletions(-) diff --git a/modules/mu/providers/aws/database.rb b/modules/mu/providers/aws/database.rb index 2f07dd21a..f2ecf9cc1 100644 --- a/modules/mu/providers/aws/database.rb +++ b/modules/mu/providers/aws/database.rb @@ -205,7 +205,6 @@ def self.find(**args) } if args[:cluster] or !args.has_key?(:cluster) fetch.call("cluster") - pp found end if !args[:cluster] fetch.call("instance") @@ -232,7 +231,7 @@ def self.find(**args) return found end - + # Reverse-map our cloud description into a runnable config hash. # We assume that any values we have in +@config+ are placeholders, and # calculate our own accordingly based on what's live in the cloud. @@ -242,22 +241,78 @@ def toKitten(**_args) "region" => @config['region'], "credentials" => @credentials, "cloud_id" => @cloud_id, - "create_cluster" => @config['create_cluster'] } + bok["create_cluster"] = true if @config['create_cluster'] + + # Don't adopt cluster members, they'll be picked up by the parent + # cluster + if !bok["create_cluster"] and cloud_desc.db_cluster_identifier and !cloud_desc.db_cluster_identifier.empty? + return nil + end noun = bok["create_cluster"] ? "cluster" : "db" tags = MU::Cloud::AWS.rds(credentials: @credentials, region: @config['region']).list_tags_for_resource( resource_name: MU::Cloud::AWS::Database.getARN(@cloud_id, noun, "rds", region: @config['region'], credentials: @credentials) ).tag_list -MU.log "tags for #{noun} #{@cloud_id}", MU::WARN, details: tags - bok["name"] = @cloud_id -# cloud_desc.db_cluster_members -# arn = MU::Cloud::AWS::Database.getARN(resource.send(id_method), arn_type, "rds", region: region, credentials: credentials) -# tags = MU::Cloud::AWS.rds(credentials: credentials, region: region).list_tags_for_resource(resource_name: arn).tag_list - -# pp cloud_desc - exit if bok["create_cluster"] -# realname = MU::Adoption.tagsToName(bok['tags']) + if tags and !tags.empty? + bok['tags'] = MU.structToHash(tags, stringify_keys: true) + bok['name'] = MU::Adoption.tagsToName(bok['tags']) + end + bok["name"] ||= @cloud_id + bok['engine'] = cloud_desc.engine + bok['engine_version'] = cloud_desc.engine_version + bok['master_user'] = cloud_desc.master_username + bok['backup_retention_period'] = cloud_desc.backup_retention_period + + params = if bok['create_cluster'] + MU::Cloud::AWS.rds(credentials: @credentials, region: @config['region']).describe_db_cluster_parameters( + db_cluster_parameter_group_name: cloud_desc.db_cluster_parameter_group + ).parameters + else + MU::Cloud::AWS.rds(credentials: @credentials, region: @config['region']).describe_db_parameters( + db_parameter_group_name: cloud_desc.db_parameter_groups.first.db_parameter_group_name + ).parameters + end + + params.reject! { |p| ["engine-default", "system"].include?(p.source) } + if params and params.size > 0 + bok['parameter_group_parameters'] = params.map { |p| + { "key" => p.parameter_name, "value" => p.parameter_value } + } + end + + if bok['create_cluster'] + bok['cluster_node_count'] = cloud_desc.db_cluster_members.size + + sizes = [] + vpcs = [] + # we have no sensible way to handle heterogenous cluster members, so + # for now just assume they're all the same + cloud_desc.db_cluster_members.each { |db| + member = MU::Cloud::AWS::Database.find(cloud_id: db.db_instance_identifier, region: @config['region'], credentials: @credentials).values.first +# MU.log "derp", MU::NOTICE, details: member + sizes << member.db_instance_class + if member.db_subnet_group and member.db_subnet_group.vpc_id + vpcs << member.db_subnet_group + end + bok + } + sizes.uniq! + vpcs.uniq! + bok['size'] = sizes.sort.first if !sizes.empty? + if !vpcs.empty? + myvpc = MU::MommaCat.findStray("AWS", "vpc", cloud_id: vpcs.sort.first.vpc_id, credentials: @credentials, region: @config['region'], dummy_ok: true, no_deploy_search: true).first + bok['vpc'] = myvpc.getReference(vpcs.sort.first.subnets.map { |s| s.subnet_identifier }) + end + else + bok['size'] = cloud_desc.db_instance_class + if cloud_desc.db_subnet_group + myvpc = MU::MommaCat.findStray("AWS", "vpc", cloud_id: cloud_desc.db_subnet_group.vpc_id, credentials: @credentials, region: @config['region'], dummy_ok: true, no_deploy_search: true).first + bok['vpc'] = myvpc.getReference(cloud_desc.db_subnet_group.subnets.map { |s| s.subnet_identifier }) + end + end + + MU.log bok['name'], MU::NOTICE, details: bok bok end diff --git a/modules/mu/providers/aws/vpc.rb b/modules/mu/providers/aws/vpc.rb index 9d26ea84c..02b791351 100644 --- a/modules/mu/providers/aws/vpc.rb +++ b/modules/mu/providers/aws/vpc.rb @@ -1295,6 +1295,36 @@ def self.defaultVpc(region, credentials) cfg_fragment end + # Return a {MU::Config::Ref} that indicates this VPC. + # @param subnet_ids [Array]: Optional list of subnet ids with which to infer a +subnet_pref+ parameter. + # @return [MU::Config::Ref] + def getReference(subnet_ids = []) + have_private = have_public = false + subnets.each { |s| + next if subnet_ids and !subnet_ids.empty? and !subnet_ids.include?(s.cloud_id) + if s.private? + have_private = true + else + have_public = true + end + } + subnet_pref = if have_private == have_public + "any" + elsif have_private + "all_private" + elsif have_public + "all_public" + end + MU::Config::Ref.get( + id: @cloud_id, + cloud: "AWS", + credentials: @credentials, + region: @config['region'], + type: "vpcs", + subnet_pref: subnet_pref + ) + end + private def peerWith(peer) From 6ffde724c63f70ba99877a0ca034a9fc6486a4de Mon Sep 17 00:00:00 2001 From: John Stange Date: Fri, 1 May 2020 04:30:07 -0400 Subject: [PATCH 100/124] AWS: fix inverted loop logic in auto-pagination --- modules/mu/providers/aws.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/mu/providers/aws.rb b/modules/mu/providers/aws.rb index a68033275..1cdd1632d 100644 --- a/modules/mu/providers/aws.rb +++ b/modules/mu/providers/aws.rb @@ -1522,7 +1522,7 @@ def method_missing(method_sym, *arguments) resp = resp.__getobj__ if resp.respond_to?(:__getobj__) retval.send(concat_to).concat(resp.send(concat_to)) new_page = resp.send(paginator) if !resp.nil? - end while !resp.nil? and new_page.nil? and new_page.empty? + end while !resp.nil? and !new_page.nil? and !new_page.empty? end end rescue StandardError => e From db3daf7b2b0839f463179e5e17eab1bcc6b7c687 Mon Sep 17 00:00:00 2001 From: John Stange Date: Tue, 5 May 2020 14:19:40 -0400 Subject: [PATCH 101/124] Config::Ref: add a #delete method; MommaCat: quash multiple INFO msgs about the daemon being up; Google: allow picking an org for credentials that can see multiple --- modules/mu/config/ref.rb | 16 +++++++++++++++- modules/mu/mommacat/daemon.rb | 5 ++++- modules/mu/providers/google.rb | 7 +++++-- 3 files changed, 24 insertions(+), 4 deletions(-) diff --git a/modules/mu/config/ref.rb b/modules/mu/config/ref.rb index 977e7e5db..5a387975c 100644 --- a/modules/mu/config/ref.rb +++ b/modules/mu/config/ref.rb @@ -140,6 +140,13 @@ def [](attribute) end end + # Unset an attribute. Sort of. We can't actually do that, so nil it out + # and we get the behavior we want. + def delete(attribute) + attribute = ("@"+attribute).to_sym if attribute.to_s !~ /^@/ + instance_variable_set(attribute.to_sym, nil) + end + # Base configuration schema for declared kittens referencing other cloud objects. This is essentially a set of filters that we're going to pass to {MU::MommaCat.findStray}. # @param aliases [Array]: Key => value mappings to set backwards-compatibility aliases for attributes, such as the ubiquitous +vpc_id+ (+vpc_id+ => +id+). # @return [Hash] @@ -272,7 +279,14 @@ def kitten(mommacat = @mommacat, shallow: false, debug: false) if mommacat and !caller.grep(/`findLitterMate'/) # XXX the dumbest MU.log "Looking for #{@type} #{@name} #{@id} in deploy #{mommacat.deploy_id}", loglevel - @obj = mommacat.findLitterMate(type: @type, name: @name, cloud_id: @id, credentials: @credentials, debug: debug) + begin + @obj = mommacat.findLitterMate(type: @type, name: @name, cloud_id: @id, credentials: @credentials, debug: debug) + rescue StandardError => e + if e.message =~ /deadlock/ + MU.log "Saw a recursive deadlock trying to fetch kitten for Ref object in deploy #{mmommacat.deploy_id}", MU::ERR, details: to_h + end + raise e + end if @obj # initialize missing attributes, if we can @id ||= @obj.cloud_id @mommacat ||= mommacat diff --git a/modules/mu/mommacat/daemon.rb b/modules/mu/mommacat/daemon.rb index df0576c15..4887f1593 100644 --- a/modules/mu/mommacat/daemon.rb +++ b/modules/mu/mommacat/daemon.rb @@ -342,6 +342,8 @@ def self.start return $?.exitstatus end + @@notified_on_pid = {} + # Return true if the Momma Cat daemon appears to be running # @return [Boolean] def self.status @@ -352,7 +354,8 @@ def self.status pid = File.read(daemonPidFile).chomp.to_i begin Process.getpgid(pid) - MU.log "Momma Cat running with pid #{pid.to_s}" + MU.log "Momma Cat running with pid #{pid.to_s}", (@@notified_on_pid[pid] ? MU::DEBUG : MU::INFO) # shush + @@notified_on_pid[pid] = true return true rescue Errno::ESRCH end diff --git a/modules/mu/providers/google.rb b/modules/mu/providers/google.rb index 9ba5f56b6..57b5e7b2a 100644 --- a/modules/mu/providers/google.rb +++ b/modules/mu/providers/google.rb @@ -236,7 +236,7 @@ def self.projectToRef(project, config: nil, credentials: nil) # @param sibling_only [Boolean] # @return [MU::Config::Habitat,nil] def self.projectLookup(name, deploy = MU.mommacat, raise_on_fail: true, sibling_only: false) - project_obj = deploy.findLitterMate(type: "habitats", name: name) if deploy + project_obj = deploy.findLitterMate(type: "habitats", name: name) if deploy if !caller.grep(/`findLitterMate'/) # XXX the dumbest if !project_obj and !sibling_only resp = MU::MommaCat.findStray( @@ -1025,8 +1025,10 @@ def self.getOrg(credentials = nil, with_id: nil) "default" end + with_id ||= creds['org'] if creds['org'] return @@orgmap[credname] if @@orgmap.has_key?(credname) resp = MU::Cloud::Google.resource_manager(credentials: credname).search_organizations + if resp and resp.organizations # XXX no idea if it's possible to be a member of multiple orgs if !with_id @@ -1034,7 +1036,8 @@ def self.getOrg(credentials = nil, with_id: nil) return resp.organizations.first else resp.organizations.each { |org| - if org.name == with_id + if org.name == with_id or org.display_name == with_id or + org.name == "organizations/#{with_id}" @@orgmap[credname] = org return org end From ac2c9b526472f0aa69a7036b930c17e561af8670 Mon Sep 17 00:00:00 2001 From: John Stange Date: Tue, 5 May 2020 14:23:30 -0400 Subject: [PATCH 102/124] Google::Folder: better .find and #notify behavior --- modules/mu/mommacat/search.rb | 4 ++-- modules/mu/providers/google/folder.rb | 6 +++--- modules/mu/providers/google/habitat.rb | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/modules/mu/mommacat/search.rb b/modules/mu/mommacat/search.rb index ab394782a..3179f8765 100644 --- a/modules/mu/mommacat/search.rb +++ b/modules/mu/mommacat/search.rb @@ -164,7 +164,7 @@ def findLitterMate(type: nil, name: nil, mu_name: nil, cloud_id: nil, created_on @kitten_semaphore.synchronize { if !@kittens.has_key?(type) - return nil if !@original_config or @original_config[type].nil? + return nil if !@original_config or @original_config[type].nil? or @original_config[type].empty? begin loadObjects(false) rescue ThreadError => e @@ -173,7 +173,7 @@ def findLitterMate(type: nil, name: nil, mu_name: nil, cloud_id: nil, created_on end end if @object_load_fails or !@kittens[type] - MU.log "#{@deploy_id}'s original config has #{@original_config[type].size.to_s} #{type}, but loadObjects could not populate any from deployment metadata", MU::ERR if !@object_load_fails + MU.log "#{@deploy_id}'s original config has #{@original_config[type].size == 1 ? "a" : @original_config[type].size.to_s} #{type}, but loadObjects could not populate anything from deployment metadata", MU::ERR if !@object_load_fails @object_load_fails = true return nil end diff --git a/modules/mu/providers/google/folder.rb b/modules/mu/providers/google/folder.rb index bef792522..c0b3b0ae1 100644 --- a/modules/mu/providers/google/folder.rb +++ b/modules/mu/providers/google/folder.rb @@ -138,7 +138,7 @@ def cloud_desc(use_cache: true) # Return the metadata for this folders's configuration # @return [Hash] def notify - desc = MU.structToHash(MU::Cloud::Google.folder(credentials: @config['credentials']).get_folder("folders/"+@cloud_id)) + desc = MU.structToHash(cloud_desc) desc["mu_name"] = @mu_name desc["parent"] = @parent desc["cloud_id"] = @cloud_id @@ -236,10 +236,10 @@ def self.cleanup(noop: false, ignoremaster: false, credentials: nil, flags: {}) # @return [Hash]: The cloud provider's complete descriptions of matching resources def self.find(**args) found = {} - # Recursively search a GCP folder hierarchy for a folder matching our # supplied name or identifier. def self.find_matching_folder(parent, name: nil, id: nil, credentials: nil) + resp = MU::Cloud::Google.folder(credentials: credentials).list_folders(parent: parent) if resp and resp.folders resp.folders.each { |f| @@ -278,6 +278,7 @@ def self.find_matching_folder(parent, name: nil, id: nil, credentials: nil) end else resp = MU::Cloud::Google.folder(credentials: args[:credentials]).list_folders(parent: parent) + if resp and resp.folders resp.folders.each { |folder| next if folder.lifecycle_state == "DELETE_REQUESTED" @@ -310,7 +311,6 @@ def toKitten(**args) bok['cloud_id'] = cloud_desc.name bok['name'] = cloud_desc.display_name#+bok['cloud_id'] # only way to guarantee uniqueness if cloud_desc.parent.match(/^folders\/(.*)/) -MU.log bok['display_name']+" generating reference", MU::NOTICE, details: cloud_desc.parent bok['parent'] = MU::Config::Ref.get( id: cloud_desc.parent, cloud: "Google", diff --git a/modules/mu/providers/google/habitat.rb b/modules/mu/providers/google/habitat.rb index b1d386b88..ec4b86538 100644 --- a/modules/mu/providers/google/habitat.rb +++ b/modules/mu/providers/google/habitat.rb @@ -285,7 +285,7 @@ def self.find(**args) next if p.lifecycle_state == "DELETE_REQUESTED" found[p.project_id] = p } - @@list_projects_cache = found + @@list_projects_cache = found.clone end found From 3c7bb7d836fcc08369ec5043283a4231cacfd3ef Mon Sep 17 00:00:00 2001 From: John Stange Date: Tue, 5 May 2020 19:13:24 -0400 Subject: [PATCH 103/124] Adoption: have --diff cope competently when a diff resolves to multiple stored deploys --- modules/mu/adoption.rb | 95 ++++++++++++++++++++++++++++++------------ 1 file changed, 69 insertions(+), 26 deletions(-) diff --git a/modules/mu/adoption.rb b/modules/mu/adoption.rb index f4191708a..391da522e 100644 --- a/modules/mu/adoption.rb +++ b/modules/mu/adoption.rb @@ -67,7 +67,6 @@ def scrapeClouds() cloudclass.listCredentials.each { |credset| next if @sources and !@sources.include?(credset) - cfg = cloudclass.credConfig(credset) if cfg and cfg['restrict_to_habitats'] cfg['restrict_to_habitats'] << cfg['project'] if cfg['project'] @@ -117,7 +116,7 @@ def scrapeClouds() if found and found.size > 0 if resclass.cfg_plural == "habitats" - found.reject! { |h| !cloudclass.listHabitats(credset).include?(h) } + found.reject! { |h| !cloudclass.listHabitats(credset).include?(h.cloud_id) } end MU.log "Found #{found.size.to_s} raw #{resclass.cfg_plural} in #{cloud}" @scraped[type] ||= {} @@ -207,7 +206,34 @@ def generateBaskets(prefix: "") prefix = "mu" if prefix.empty? # so that appnames aren't ever empty end + # Find any previous deploys with this particular profile, which we'll use + # later for --diff. + @existing_deploys = {} + @existing_deploys_by_id = {} + @origins = {} + @types_found_in = {} + groupings.each_pair { |appname, types| + allowed_types = @types.map { |t| MU::Cloud.resource_types[t][:cfg_plural] } + next if (types & allowed_types).size == 0 + origin = { + "appname" => prefix+appname, + "types" => (types & allowed_types).sort, + "habitats" => @habitats.sort, + "group_by" => @group_by.to_s + } + + @existing_deploys[appname] = MU::MommaCat.findMatchingDeploy(origin) + @existing_deploys_by_id[@existing_deploys[appname].deploy_id] = @existing_deploys[appname] + @origins[appname] = origin + origin['types'].each { |t| + @types_found_in[t] = @existing_deploys[appname] + } + } + groupings.each_pair { |appname, types| + allowed_types = @types.map { |t| MU::Cloud.resource_types[t][:cfg_plural] } + next if (types & allowed_types).size == 0 + bok = { "appname" => prefix+appname } if @scrub_mu_isms bok["scrub_mu_isms"] = true @@ -217,22 +243,12 @@ def generateBaskets(prefix: "") end count = 0 - allowed_types = @types.map { |t| MU::Cloud.resource_types[t][:cfg_plural] } - next if (types & allowed_types).size == 0 - origin = { - "appname" => bok['appname'], - "types" => (types & allowed_types).sort, - "habitats" => @habitats.sort, - "group_by" => @group_by.to_s - } - - deploy = MU::MommaCat.findMatchingDeploy(origin) if @diff - if !deploy - MU.log "--diff was set but I failed to find a deploy like me to compare to", MU::ERR, details: origin + if !@existing_deploys[appname] + MU.log "--diff was set but I failed to find a deploy like '#{appname}' to compare to (have #{@existing_deploys.keys.join(", ")})", MU::ERR, details: @origins[appname] exit 1 else - MU.log "Will diff current live resources against #{deploy.deploy_id}", MU::NOTICE, details: origin + MU.log "Will diff current live resources against #{@existing_deploys[appname].deploy_id}", MU::NOTICE, details: @origins[appname] end end @@ -322,31 +338,31 @@ def generateBaskets(prefix: "") MU.log "Minimizing footprint of #{count.to_s} found resources", MU::DEBUG generated_deploy = generateStubDeploy(bok) - @boks[bok['appname']] = vacuum(bok, origin: origin, deploy: generated_deploy, save: @savedeploys) + @boks[bok['appname']] = vacuum(bok, origin: @origins[appname], deploy: generated_deploy, save: @savedeploys) - if @diff and !deploy + if @diff and !@existing_deploys[appname] MU.log "diff flag set, but no comparable deploy provided for #{bok['appname']}", MU::ERR exit 1 end - if deploy and @diff - prev_vacuumed = vacuum(deploy.original_config, deploy: deploy, keep_missing: true, copy_from: generated_deploy) + if @diff + prev_vacuumed = vacuum(@existing_deploys[appname].original_config, deploy: @existing_deploys[appname], keep_missing: true, copy_from: generated_deploy) prevcfg = MU::Config.manxify(prev_vacuumed) if !prevcfg - MU.log "#{deploy.deploy_id} didn't have a working original config for me to compare", MU::ERR + MU.log "#{@existing_deploys[appname].deploy_id} didn't have a working original config for me to compare", MU::ERR exit 1 end newcfg = MU::Config.manxify(@boks[bok['appname']]) - report = prevcfg.diff(newcfg) + if report if MU.muCfg['adopt_change_notify'] - notifyChanges(deploy, report.freeze) + notifyChanges(@existing_deploys[appname], report.freeze) end if @merge - MU.log "Saving changes to #{deploy.deploy_id}" - deploy.updateBasketofKittens(newcfg, save_now: true) + MU.log "Saving changes to #{@existing_deploys[appname].deploy_id}" + @existing_deploys[appname].updateBasketofKittens(newcfg, save_now: true) end end @@ -689,11 +705,33 @@ def vacuum(bok, origin: nil, save: false, deploy: nil, copy_from: nil, keep_miss end def resolveReferences(cfg, deploy, parent) + mask_deploy_id = false + + check_deploy_id = Proc.new { |cfgblob| + (deploy and + (cfgblob.is_a?(MU::Config::Ref) or cfgblob.is_a?(Hash)) and + cfgblob['deploy_id'] and + cfgblob['deploy_id'] != deploy.deploy_id and + @diff and + @types_found_in[cfgblob['type']] and + @types_found_in[cfgblob['type']].deploy_id == cfgblob['deploy_id'] + ) + } + + mask_deploy_id = check_deploy_id.call(cfg) + if cfg.is_a?(MU::Config::Ref) - cfg.kitten(deploy) || cfg.kitten + if mask_deploy_id + cfg.delete("deploy_id") + cfg.delete("mommacat") + cfg.kitten(deploy) + else + cfg.kitten(deploy) || cfg.kitten + end + hashcfg = cfg.to_h - if cfg.kitten(deploy) + if cfg.kitten littermate = deploy.findLitterMate(type: cfg.type, name: cfg.name, cloud_id: cfg.id, habitat: cfg.habitat) if littermate and littermate.config['name'] @@ -786,6 +824,11 @@ def resolveReferences(cfg, deploy, parent) cfg = new_array.uniq end + if mask_deploy_id or check_deploy_id.call(cfg) + cfg.delete("deploy_id") + MU.log "#{parent} in #{deploy.deploy_id} references something in #{@types_found_in[cfg['type']].deploy_id}, ditching extraneous deploy_id", MU::DEBUG, details: cfg.to_h + end + cfg end From 3505d0936ba75f0b1a5c7d1f69a6730d088fbd23 Mon Sep 17 00:00:00 2001 From: John Stange Date: Wed, 6 May 2020 11:30:38 -0400 Subject: [PATCH 104/124] AWS::Database: partially-correct regrooming behavior --- modules/Gemfile.lock | 10 +- modules/mu/providers/aws.rb | 1 + modules/mu/providers/aws/database.rb | 205 ++++++++++++++++++--------- 3 files changed, 144 insertions(+), 72 deletions(-) diff --git a/modules/Gemfile.lock b/modules/Gemfile.lock index f4be049e6..5695e727b 100644 --- a/modules/Gemfile.lock +++ b/modules/Gemfile.lock @@ -54,7 +54,7 @@ GEM public_suffix (>= 2.0.2, < 4.0) ast (2.4.0) aws-eventstream (1.1.0) - aws-sdk-core (2.11.497) + aws-sdk-core (2.11.501) aws-sigv4 (~> 1.0) jmespath (~> 1.0) aws-sigv4 (1.1.3) @@ -739,8 +739,8 @@ GEM rspec-core (~> 3.9.0) rspec-expectations (~> 3.9.0) rspec-mocks (~> 3.9.0) - rspec-core (3.9.1) - rspec-support (~> 3.9.1) + rspec-core (3.9.2) + rspec-support (~> 3.9.3) rspec-expectations (3.9.1) diff-lcs (>= 1.2.0, < 2.0) rspec-support (~> 3.9.0) @@ -750,7 +750,7 @@ GEM rspec-mocks (3.9.1) diff-lcs (>= 1.2.0, < 2.0) rspec-support (~> 3.9.0) - rspec-support (3.9.2) + rspec-support (3.9.3) rspec_junit_formatter (0.2.3) builder (< 4) rspec-core (>= 2, < 4, != 2.12.0) @@ -833,7 +833,7 @@ GEM rubyzip (~> 2.0) winrm (~> 2.0) wmi-lite (1.0.5) - yard (0.9.24) + yard (0.9.25) zeitwerk (2.3.0) PLATFORMS diff --git a/modules/mu/providers/aws.rb b/modules/mu/providers/aws.rb index 1cdd1632d..da992993e 100644 --- a/modules/mu/providers/aws.rb +++ b/modules/mu/providers/aws.rb @@ -1519,6 +1519,7 @@ def method_missing(method_sym, *arguments) elsif !arguments.nil? and arguments.size > 0 @api.method(method_sym).call(*new_args) end + break if resp.nil? resp = resp.__getobj__ if resp.respond_to?(:__getobj__) retval.send(concat_to).concat(resp.send(concat_to)) new_page = resp.send(paginator) if !resp.nil? diff --git a/modules/mu/providers/aws/database.rb b/modules/mu/providers/aws/database.rb index f2ecf9cc1..7445f7b14 100644 --- a/modules/mu/providers/aws/database.rb +++ b/modules/mu/providers/aws/database.rb @@ -79,6 +79,7 @@ def initialize(**args) end @mu_name.gsub(/(--|-$)/i, "").gsub(/(_)/, "-").gsub!(/^[^a-z]/i, "") + @config["parameter_group_name"] ||= @mu_name if args[:from_cloud_desc] and args[:from_cloud_desc].is_a?(Aws::RDS::Types::DBCluster) @config['create_cluster'] = true @@ -128,8 +129,7 @@ def create createSubnetGroup if @config.has_key?("parameter_group_family") - @config["parameter_group_name"] = @mu_name - createDBParameterGroup(true) + manageDbParameterGroup(true) end @config["cluster_identifier"] ||= @cloud_id @@ -194,14 +194,10 @@ def self.find(**args) else fetch = Proc.new { |noun| - marker = nil - begin - resp = MU::Cloud::AWS.rds(credentials: args[:credentials], region: args[:region]).send("describe_db_#{noun}s".to_sym) - marker = resp.marker - resp.send("db_#{noun}s").each { |db| - found[db.send("db_#{noun}_identifier".to_sym)] = db - } - end while !marker.nil? + resp = MU::Cloud::AWS.rds(credentials: args[:credentials], region: args[:region]).send("describe_db_#{noun}s".to_sym) + resp.send("db_#{noun}s").each { |db| + found[db.send("db_#{noun}_identifier".to_sym)] = db + } } if args[:cluster] or !args.has_key?(:cluster) fetch.call("cluster") @@ -242,15 +238,14 @@ def toKitten(**_args) "credentials" => @credentials, "cloud_id" => @cloud_id, } - bok["create_cluster"] = true if @config['create_cluster'] # Don't adopt cluster members, they'll be picked up by the parent # cluster - if !bok["create_cluster"] and cloud_desc.db_cluster_identifier and !cloud_desc.db_cluster_identifier.empty? + if !@config["create_cluster"] and cloud_desc.db_cluster_identifier and !cloud_desc.db_cluster_identifier.empty? return nil end - noun = bok["create_cluster"] ? "cluster" : "db" + noun = @config["create_cluster"] ? "cluster" : "db" tags = MU::Cloud::AWS.rds(credentials: @credentials, region: @config['region']).list_tags_for_resource( resource_name: MU::Cloud::AWS::Database.getARN(@cloud_id, noun, "rds", region: @config['region'], credentials: @credentials) ).tag_list @@ -263,6 +258,7 @@ def toKitten(**_args) bok['engine_version'] = cloud_desc.engine_version bok['master_user'] = cloud_desc.master_username bok['backup_retention_period'] = cloud_desc.backup_retention_period + bok["create_cluster"] = true if @config['create_cluster'] params = if bok['create_cluster'] MU::Cloud::AWS.rds(credentials: @credentials, region: @config['region']).describe_db_cluster_parameters( @@ -276,13 +272,31 @@ def toKitten(**_args) params.reject! { |p| ["engine-default", "system"].include?(p.source) } if params and params.size > 0 - bok['parameter_group_parameters'] = params.map { |p| + bok[(bok['create_cluster'] ? "cluster_" : "")+'parameter_group_parameters'] = params.map { |p| { "key" => p.parameter_name, "value" => p.parameter_value } } end + bok['add_firewall_rules'] = cloud_desc.vpc_security_groups.map { |sg| + MU::Config::Ref.get( + id: sg.vpc_security_group_id, + cloud: "AWS", + credentials: @credentials, + region: @config['region'], + type: "firewall_rules", + ) + } + bok['preferred_backup_window'] = cloud_desc.preferred_backup_window + bok['preferred_maintenance_window'] = cloud_desc.preferred_maintenance_window + bok['backup_retention_period'] = cloud_desc.backup_retention_period if cloud_desc.backup_retention_period > 1 + bok['multi_az_on_groom'] = true if cloud_desc.multi_az + bok['storage_encrypted'] = true if cloud_desc.storage_encrypted + bok['auto_minor_version_upgrade'] = true if cloud_desc.auto_minor_version_upgrade + if bok['create_cluster'] bok['cluster_node_count'] = cloud_desc.db_cluster_members.size + bok['cluster_mode'] = cloud_desc.engine_mode + bok['port'] = cloud_desc.port sizes = [] vpcs = [] @@ -290,7 +304,7 @@ def toKitten(**_args) # for now just assume they're all the same cloud_desc.db_cluster_members.each { |db| member = MU::Cloud::AWS::Database.find(cloud_id: db.db_instance_identifier, region: @config['region'], credentials: @credentials).values.first -# MU.log "derp", MU::NOTICE, details: member +MU.log "derp", MU::WARN, details: member sizes << member.db_instance_class if member.db_subnet_group and member.db_subnet_group.vpc_id vpcs << member.db_subnet_group @@ -310,10 +324,30 @@ def toKitten(**_args) myvpc = MU::MommaCat.findStray("AWS", "vpc", cloud_id: cloud_desc.db_subnet_group.vpc_id, credentials: @credentials, region: @config['region'], dummy_ok: true, no_deploy_search: true).first bok['vpc'] = myvpc.getReference(cloud_desc.db_subnet_group.subnets.map { |s| s.subnet_identifier }) end + bok['storage_type'] = cloud_desc.storage_type + bok['storage'] = cloud_desc.allocated_storage + bok['license_model'] = cloud_desc.license_model + bok['publicly_accessible'] = true if cloud_desc.publicly_accessible + bok['port'] = cloud_desc.endpoint.port + + if cloud_desc.read_replica_source_db_instance_identifier + bok['read_replica_of'] = MU::Config::Ref.get( + id: cloud_desc.read_replica_source_db_instance_identifier.split(/:/).last, + name: cloud_desc.read_replica_source_db_instance_identifier.split(/:/).last, + cloud: "AWS", + region: cloud_desc.read_replica_source_db_instance_identifier.split(/:/)[3], + credentials: @credentials, + type: "databases", + ) + end end - MU.log bok['name'], MU::NOTICE, details: bok - + if cloud_desc.enabled_cloudwatch_logs_exports and + cloud_desc.enabled_cloudwatch_logs_exports.size > 0 +MU.log bok['name'], MU::NOTICE, details: { "desc" => cloud_desc, "bok" => bok } +# bok['cloudwatch_logs'] = + end +MU.log bok['name'], MU::NOTICE, details: cloud_desc if bok['name'] == "pgcluster" bok end @@ -383,25 +417,39 @@ def createSubnetGroup end # Create a database parameter group. - def createDBParameterGroup(cluster = false) + def manageDbParameterGroup(cluster = false, create: true) + name_param = cluster ? :db_cluster_parameter_group_name : :db_parameter_group_name + fieldname = cluster ? "cluster_parameter_group_parameters" : "db_parameter_group_parameters" + params = { db_parameter_group_family: @config["parameter_group_family"], description: "Parameter group for #{@mu_name}", tags: @tags.each_key.map { |k| { :key => k, :value => @tags[k] } } } - params[cluster ? :db_cluster_parameter_group_name : :db_parameter_group_name] = @config["parameter_group_name"] - MU.log "Creating a #{cluster ? "cluster" : "database" } parameter group #{@config["parameter_group_name"]}" + params[name_param] = @config["parameter_group_name"] + + if create + MU.log "Creating a #{cluster ? "cluster" : "database" } parameter group #{@config["parameter_group_name"]}" + + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).send(cluster ? :create_db_cluster_parameter_group : :create_db_parameter_group, params) + end - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).send(cluster ? :create_db_cluster_parameter_group : :create_db_parameter_group, params) - fieldname = cluster ? "cluster_parameter_group_parameters" : "db_parameter_group_parameters" - if @config[fieldname] && !@config[fieldname].empty? + if @config[fieldname] and !@config[fieldname].empty? + + old_values = MU::Cloud::AWS.rds(credentials: @credentials, region: @config['region']).send(cluster ? :describe_db_cluster_parameters : :describe_db_parameters, { name_param => @config["parameter_group_name"] } ).parameters + old_values.map! { |p| [p.parameter_name, p.parameter_value] }.flatten + old_values = old_values.to_h + params = [] @config[fieldname].each { |item| + next if old_values[item["name"]] == item['value'] params << {parameter_name: item['name'], parameter_value: item['value'], apply_method: item['apply_method']} } + return if params.empty? + + MU.log "Modifiying parameter group #{@config["parameter_group_name"]}", MU::NOTICE, details: params - MU.log "Modifiying parameter group #{@config["parameter_group_name"]}" if cluster MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_cluster_parameter_group( db_cluster_parameter_group_name: @config["parameter_group_name"], @@ -419,6 +467,7 @@ def createDBParameterGroup(cluster = false) # Called automatically by {MU::Deploy#createResources} def groom if @config["create_cluster"] + manageDbParameterGroup(true, create: false) @config['cluster_node_count'] ||= 1 if @config['cluster_mode'] == "serverless" MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_current_db_cluster_capacity( @@ -427,24 +476,48 @@ def groom ) end else + manageDbParameterGroup(create: false) # Run SQL on deploy if @config['run_sql_on_deploy'] run_sql_commands end - # set multi-az on deploy - if @config['multi_az_on_deploy'] - if !database.multi_az - MU.log "Setting multi-az on #{@config['identifier']}" - MU.retrier([Aws::RDS::Errors::InvalidParameterValue, Aws::RDS::Errors::InvalidDBInstanceState], wait: 15, max: 15) { - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_instance( - db_instance_identifier: @config['identifier'], - apply_immediately: true, - multi_az: true - ) - } + mods = { + db_instance_identifier: @cloud_id, +# master_user_password: @config["password"], + } + + if !cloud_desc.multi_az and (@config['multi_az_on_deploy'] or @config['multi_az_on_create']) + mods[:multi_az] = true + end + +#pp cloud_desc + [:backup_retention_period, :preferred_backup_window, :preferred_maintenance_window, :allow_major_version_upgrade, :auto_minor_version_upgrade].each { |field| + if @config[field.to_s] and + cloud_desc.respond_to?(field) and + cloud_desc.send(field) != @config[field.to_s] + mods[field] = @config[field.to_s] end + } + + mods[:allocated_storage] = @config['storage'] if cloud_desc.allocated_storage != @config['storage'] + + if @config['cloudwatch_logs'] and cloud_desc.enabled_cloudwatch_logs_exports.sort != @config['cloudwatch_logs'].sort + mods[:cloudwatch_logs_export_configuration] = { + enable_log_types: @config['cloudwatch_logs'], + disable_log_types: cloud_desc.enabled_cloudwatch_logs_exports - @config['cloudwatch_logs'] + } + end +# XXX it's a stupid array +# db_parameter_group_name: @config["parameter_group_name"], + + if mods.size > 1 + mods[:apply_immediately] = true + MU.log "Modifying RDS instance #{@cloud_id}", MU::NOTICE, details: mods + wait_until_available + MU::Cloud::AWS.rds(region: @config['region'], credentials: @credentials).modify_db_instance(mods) + wait_until_available end end @@ -770,41 +843,37 @@ def self.get_supported_engines(region = MU.myRegion, credentials = nil, engine: end engines = {} - marker = nil - begin - resp = MU::Cloud::AWS.rds(credentials: credentials, region: region).describe_db_engine_versions(marker: marker) - marker = resp.marker - - if resp and resp.db_engine_versions - resp.db_engine_versions.each { |version| - engines[version.engine] ||= { - "versions" => [], - "families" => [], - "features" => {}, - "raw" => {} - } - engines[version.engine]['versions'] << version.engine_version - engines[version.engine]['families'] << version.db_parameter_group_family - engines[version.engine]['raw'][version.engine_version] = version - [:supports_read_replica, :supports_log_exports_to_cloudwatch_logs].each { |feature| - if version.respond_to?(feature) and version.send(feature) == true - engines[version.engine]['features'][version.engine_version] ||= [] - engines[version.engine]['features'][version.engine_version] << feature - end - } + resp = MU::Cloud::AWS.rds(credentials: credentials, region: region).describe_db_engine_versions + if resp and resp.db_engine_versions + resp.db_engine_versions.each { |version| + engines[version.engine] ||= { + "versions" => [], + "families" => [], + "features" => {}, + "raw" => {} } - engines.each_key { |e| - engines[e]["versions"].uniq! - engines[e]["versions"].sort! { |a, b| MU.version_sort(a, b) } - engines[e]["families"].uniq! + engines[version.engine]['versions'] << version.engine_version + engines[version.engine]['families'] << version.db_parameter_group_family + engines[version.engine]['raw'][version.engine_version] = version + [:supports_read_replica, :supports_log_exports_to_cloudwatch_logs].each { |feature| + if version.respond_to?(feature) and version.send(feature) == true + engines[version.engine]['features'][version.engine_version] ||= [] + engines[version.engine]['features'][version.engine_version] << feature + end } - else - MU.log "Failed to get list of valid RDS engine versions in #{db['region']}, proceeding without proper validation", MU::WARN - end - end while !marker.nil? + } + engines.each_key { |e| + engines[e]["versions"].uniq! + engines[e]["versions"].sort! { |a, b| MU.version_sort(a, b) } + engines[e]["families"].uniq! + } + + else + MU.log "Failed to get list of valid RDS engine versions in #{db['region']}, proceeding without proper validation", MU::WARN + end @@engine_cache[credentials][region] = engines return engine ? @@engine_cache[credentials][region][engine] : @@engine_cache[credentials][region] @@ -1099,7 +1168,7 @@ def add_basic if @config.has_key?("parameter_group_family") @config["parameter_group_name"] = @mu_name - createDBParameterGroup + manageDbParameterGroup end createDb @@ -1118,7 +1187,7 @@ def add_cluster_node @config["creation_style"] = "new" if @config["creation_style"] != "new" if @config.has_key?("parameter_group_family") @config["parameter_group_name"] = @mu_name - createDBParameterGroup + manageDbParameterGroup end createDb @@ -1132,11 +1201,13 @@ def create_basic params[:engine_version] = @config["engine_version"] params[:vpc_security_group_ids] = @config["vpc_security_group_ids"] params[:preferred_maintenance_window] = @config["preferred_maintenance_window"] if @config["preferred_maintenance_window"] + params[:backup_retention_period] = @config["backup_retention_period"] if @config["backup_retention_period"] if @config['create_cluster'] params[:database_name] = @config["db_name"] params[:db_cluster_parameter_group_name] = @config["parameter_group_name"] if @config["parameter_group_name"] else + params[:enable_cloudwatch_logs_exports] = @config['cloudwatch_logs'] if @config['cloudwatch_logs'] and !@config['cloudwatch_logs'].empty? params[:db_name] = @config["db_name"] if !@config['add_cluster_node'] params[:db_parameter_group_name] = @config["parameter_group_name"] if @config["parameter_group_name"] end From d90992cffe157b6a68756995a351e8634e13e6bc Mon Sep 17 00:00:00 2001 From: John Stange Date: Wed, 6 May 2020 13:36:57 -0400 Subject: [PATCH 105/124] AWS: fix auto-pagination of API replies for calls that normally have no arguments --- modules/mu/providers/aws.rb | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/modules/mu/providers/aws.rb b/modules/mu/providers/aws.rb index da992993e..416140917 100644 --- a/modules/mu/providers/aws.rb +++ b/modules/mu/providers/aws.rb @@ -1508,17 +1508,24 @@ def method_missing(method_sym, *arguments) concat_to = concat_to.first new_args = arguments ? arguments.dup : [{}] begin - if new_args.is_a?(Array) and new_args.size == 1 and new_args.first.is_a?(Hash) - new_args[0][paginator] = new_page + if new_args.is_a?(Array) + new_args << {} if new_args.empty? + if new_args.size == 1 and new_args.first.is_a?(Hash) + new_args[0][paginator] = new_page + else + MU.log "I don't know how to insert a #{paginator} into these arguments for #{method_sym}", MU::WARN, details: new_args + end elsif new_args.is_a?(Hash) new_args[paginator] = new_page end - resp = if !arguments.nil? and arguments.size == 1 - @api.method(method_sym).call(new_args[0]) - elsif !arguments.nil? and arguments.size > 0 - @api.method(method_sym).call(*new_args) - end + MU.log "Attempting magic pagination for #{method_sym}", MU::DEBUG, details: new_args + +# resp = if !arguments.nil? and arguments.size == 1 +# @api.method(method_sym).call(new_args[0]) +# elsif !arguments.nil? and arguments.size > 0 + resp = @api.method(method_sym).call(*new_args) +# end break if resp.nil? resp = resp.__getobj__ if resp.respond_to?(:__getobj__) retval.send(concat_to).concat(resp.send(concat_to)) From 8eb478cfacb38feed241118dbd296007c7417dd7 Mon Sep 17 00:00:00 2001 From: John Stange Date: Thu, 7 May 2020 01:47:28 -0400 Subject: [PATCH 106/124] AWS::Database: mostly complete regroom behavior which should automatically guard attributes our config doesn't even support yet --- modules/mu/providers/aws/database.rb | 207 ++++++++++++++++++++------- modules/tests/rds.yaml | 4 +- modules/tests/regrooms/rds.yaml | 111 ++++++++++++++ 3 files changed, 266 insertions(+), 56 deletions(-) create mode 100644 modules/tests/regrooms/rds.yaml diff --git a/modules/mu/providers/aws/database.rb b/modules/mu/providers/aws/database.rb index 7445f7b14..16d08548f 100644 --- a/modules/mu/providers/aws/database.rb +++ b/modules/mu/providers/aws/database.rb @@ -64,6 +64,76 @@ class Database < MU::Cloud::Database } }.freeze + MODIFIABLE = { + "instance" => [ + :allocated_storage, + :db_instance_class, + :db_subnet_group_name, + :db_security_groups, + :vpc_security_group_ids, + :apply_immediately, + :master_user_password, + :db_parameter_group_name, + :backup_retention_period, + :preferred_backup_window, + :preferred_maintenance_window, + :multi_az, + :engine_version, + :allow_major_version_upgrade, + :auto_minor_version_upgrade, + :license_model, + :iops, + :option_group_name, + :new_db_instance_identifier, + :storage_type, + :tde_credential_arn, + :tde_credential_password, + :ca_certificate_identifier, + :domain, + :copy_tags_to_snapshot, + :monitoring_interval, + :db_port_number, + :publicly_accessible, + :monitoring_role_arn, + :domain_iam_role_name, + :promotion_tier, + :enable_iam_database_authentication, + :enable_performance_insights, + :performance_insights_kms_key_id, + :performance_insights_retention_period, + :cloudwatch_logs_export_configuration, + :processor_features, + :use_default_processor_features, + :deletion_protection, + :max_allocated_storage, + :certificate_rotation_restart + ], + "cluster" => [ + :new_db_cluster_identifier, + :apply_immediately, + :backup_retention_period, + :db_cluster_parameter_group_name, + :vpc_security_group_ids, + :port, + :master_user_password, + :option_group_name, + :preferred_backup_window, + :preferred_maintenance_window, + :enable_iam_database_authentication, + :backtrack_window, + :cloudwatch_logs_export_configuration, + :engine_version, + :allow_major_version_upgrade, + :db_instance_parameter_group_name, + :domain, + :domain_iam_role_name, + :scaling_configuration, + :deletion_protection, + :enable_http_endpoint, + :copy_tags_to_snapshot, + ] + } + # Initialize this cloud resource object. Calling +super+ will invoke the initializer defined under {MU::Cloud}, which should set the attribtues listed in {MU::Cloud::PUBLIC_ATTRS} as well as applicable dependency shortcuts, like +@vpc+, for us. # @param args [Hash]: Hash of named arguments passed via Ruby's double-splat def initialize(**args) @@ -79,7 +149,9 @@ def initialize(**args) end @mu_name.gsub(/(--|-$)/i, "").gsub(/(_)/, "-").gsub!(/^[^a-z]/i, "") - @config["parameter_group_name"] ||= @mu_name + if @config.has_key?("parameter_group_family") + @config["parameter_group_name"] ||= @mu_name + end if args[:from_cloud_desc] and args[:from_cloud_desc].is_a?(Aws::RDS::Types::DBCluster) @config['create_cluster'] = true @@ -418,6 +490,7 @@ def createSubnetGroup # Create a database parameter group. def manageDbParameterGroup(cluster = false, create: true) + return if !@config["parameter_group_name"] name_param = cluster ? :db_cluster_parameter_group_name : :db_parameter_group_name fieldname = cluster ? "cluster_parameter_group_parameters" : "db_parameter_group_parameters" @@ -448,26 +521,50 @@ def manageDbParameterGroup(cluster = false, create: true) } return if params.empty? - MU.log "Modifiying parameter group #{@config["parameter_group_name"]}", MU::NOTICE, details: params + MU.log "Modifying parameter group #{@config["parameter_group_name"]}", MU::NOTICE, details: params.map { |p| { p[:parameter_name] => p[:parameter_value] } } - if cluster - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_cluster_parameter_group( - db_cluster_parameter_group_name: @config["parameter_group_name"], - parameters: params - ) - else - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_parameter_group( - db_parameter_group_name: @config["parameter_group_name"], - parameters: params - ) - end + MU.retrier([Aws::RDS::Errors::InvalidDBParameterGroupState], wait: 30, max: 10) { + if cluster + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_cluster_parameter_group( + db_cluster_parameter_group_name: @config["parameter_group_name"], + parameters: params + ) + else + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_parameter_group( + db_parameter_group_name: @config["parameter_group_name"], + parameters: params + ) + end + } end end # Called automatically by {MU::Deploy#createResources} def groom + cloud_desc(use_cache: false) + + noun = @config['create_cluster'] ? "cluster" : "instance" + manageDbParameterGroup(@config["create_cluster"], create: false) + + mods = { + "db_#{noun}_identifier".to_sym => @cloud_id + } + + basicParams.each_pair { |k, v| + next if v.nil? or !MODIFIABLE[noun].include?(k) + if cloud_desc.respond_to?(k) and cloud_desc.send(k) != v + mods[k] = v + end + } + + if @config['cloudwatch_logs'] and cloud_desc.enabled_cloudwatch_logs_exports.sort != @config['cloudwatch_logs'].sort + mods[:cloudwatch_logs_export_configuration] = { + enable_log_types: @config['cloudwatch_logs'], + disable_log_types: cloud_desc.enabled_cloudwatch_logs_exports - @config['cloudwatch_logs'] + } + end + if @config["create_cluster"] - manageDbParameterGroup(true, create: false) @config['cluster_node_count'] ||= 1 if @config['cluster_mode'] == "serverless" MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_current_db_cluster_capacity( @@ -476,49 +573,38 @@ def groom ) end else - manageDbParameterGroup(create: false) - # Run SQL on deploy if @config['run_sql_on_deploy'] run_sql_commands end - mods = { - db_instance_identifier: @cloud_id, -# master_user_password: @config["password"], - } - if !cloud_desc.multi_az and (@config['multi_az_on_deploy'] or @config['multi_az_on_create']) mods[:multi_az] = true end -#pp cloud_desc - [:backup_retention_period, :preferred_backup_window, :preferred_maintenance_window, :allow_major_version_upgrade, :auto_minor_version_upgrade].each { |field| - if @config[field.to_s] and - cloud_desc.respond_to?(field) and - cloud_desc.send(field) != @config[field.to_s] - mods[field] = @config[field.to_s] - end - } +# if !@config['member_of_cluster'] and !@config['read_replica_of'] +# [:backup_retention_period, :preferred_backup_window, :preferred_maintenance_window, :allow_major_version_upgrade, :auto_minor_version_upgrade].each { |field| +# if @config[field.to_s] and +# cloud_desc.respond_to?(field) and +# cloud_desc.send(field) != @config[field.to_s] +# mods[field] = @config[field.to_s] +# end +# } - mods[:allocated_storage] = @config['storage'] if cloud_desc.allocated_storage != @config['storage'] +# XXX how do we guard this? do we? +# master_user_password: @config["password"], +# end - if @config['cloudwatch_logs'] and cloud_desc.enabled_cloudwatch_logs_exports.sort != @config['cloudwatch_logs'].sort - mods[:cloudwatch_logs_export_configuration] = { - enable_log_types: @config['cloudwatch_logs'], - disable_log_types: cloud_desc.enabled_cloudwatch_logs_exports - @config['cloudwatch_logs'] - } - end # XXX it's a stupid array # db_parameter_group_name: @config["parameter_group_name"], + end - if mods.size > 1 - mods[:apply_immediately] = true - MU.log "Modifying RDS instance #{@cloud_id}", MU::NOTICE, details: mods - wait_until_available - MU::Cloud::AWS.rds(region: @config['region'], credentials: @credentials).modify_db_instance(mods) - wait_until_available - end + if mods.size > 1 + MU.log "Modifying RDS instance #{@cloud_id}", MU::NOTICE, details: mods + mods[:apply_immediately] = true + wait_until_available + MU::Cloud::AWS.rds(region: @config['region'], credentials: @credentials).send("modify_db_#{noun}".to_sym, mods) + wait_until_available end end @@ -998,7 +1084,11 @@ def genericParams vpc_security_group_ids: @config["vpc_security_group_ids"], tags: @tags.each_key.map { |k| { :key => k, :value => @tags[k] } } } - paramhash[:db_subnet_group_name] = @config["subnet_group_name"].downcase if @vpc + + if @vpc and @config["subnet_group_name"] + paramhash[:db_subnet_group_name] = @config["subnet_group_name"].downcase + end + if @config['cloudwatch_logs'] paramhash[:enable_cloudwatch_logs_exports ] = @config['cloudwatch_logs'] end @@ -1133,7 +1223,7 @@ def self.validate_engine(db) engine_cfg = get_supported_engines(db['region'], db['credentials'], engine: db['engine']) if !engine_cfg or engine_cfg['versions'].empty? or engine_cfg['families'].empty? - MU.log "RDS engine #{db['engine']} reports no supported versions in #{db['region']}", MU::ERR, details: engines.keys.sort + MU.log "RDS engine #{db['engine']} reports no supported versions in #{db['region']}", MU::ERR, details: engine_cfg return false end @@ -1167,7 +1257,6 @@ def add_basic end if @config.has_key?("parameter_group_family") - @config["parameter_group_name"] = @mu_name manageDbParameterGroup end @@ -1186,15 +1275,13 @@ def add_cluster_node @config["subnet_group_name"] = @config['cluster_identifier'] if @vpc @config["creation_style"] = "new" if @config["creation_style"] != "new" if @config.has_key?("parameter_group_family") - @config["parameter_group_name"] = @mu_name manageDbParameterGroup end createDb end - # creation_style = new, existing, new_snapshot, existing_snapshot - def create_basic + def basicParams params = genericParams params[:storage_encrypted] = @config["storage_encrypted"] params[:master_user_password] = @config['password'] @@ -1220,7 +1307,9 @@ def create_basic params[:multi_az] = @config['multi_az_on_create'] end - if !@config['add_cluster_node'] + noun = @config['create_cluster'] ? "cluster" : "instance" + + if noun == "cluster" or !params[:db_cluster_identifier] params[:backup_retention_period] = @config["backup_retention_period"] params[:preferred_backup_window] = @config["preferred_backup_window"] params[:master_username] = @config['master_user'] @@ -1228,15 +1317,25 @@ def create_basic params[:iops] = @config["iops"] if @config['storage_type'] == "io1" end - noun = @config['create_cluster'] ? "cluster" : "instance" + params + end + + # creation_style = new, existing, new_snapshot, existing_snapshot + def create_basic + params = basicParams + + clean_parent_opts = Proc.new { + [:storage_encrypted, :master_user_password, :engine_version, :allocated_storage, :backup_retention_period, :preferred_backup_window, :master_username, :db_name, :database_name].each { |p| params.delete(p) } + } MU.retrier([Aws::RDS::Errors::InvalidParameterValue], max: 5, wait: 10) { if %w{existing_snapshot new_snapshot}.include?(@config["creation_style"]) - [:storage_encrypted, :master_user_password, :engine_version, :allocated_storage, :backup_retention_period, :preferred_backup_window, :master_username, :db_name, :database_name].each { |p| params.delete(p) } + clean_parent_opts.call MU.log "Creating database #{noun} #{@cloud_id} from snapshot #{@config["snapshot_id"]}" MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).send("restore_db_#{noun}_from_#{noun == "instance" ? "db_" : ""}snapshot".to_sym, params) else - MU.log "Creating pristine database #{noun} #{@cloud_id} (#{@config['name']}) in #{@config['region']}" + clean_parent_opts.call if noun == "instance" and params[:db_cluster_identifier] + MU.log "Creating pristine database #{noun} #{@cloud_id} (#{@config['name']}) in #{@config['region']}", MU::NOTICE, details: params MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).send("create_db_#{noun}".to_sym, params) end } @@ -1376,7 +1475,6 @@ def createDb mod_config[:vpc_security_group_ids] = @config["vpc_security_group_ids"] end - if !@config["read_replica_of"] mod_config[:preferred_backup_window] = @config["preferred_backup_window"] mod_config[:backup_retention_period] = @config["backup_retention_period"] @@ -1535,6 +1633,7 @@ def self.terminate_rds_instance(db, noop: false, skipsnapshots: false, region: M db ||= db_obj.cloud_desc ["parameter_group_name", "subnet_group_name"].each { |attr| if db_obj.config[attr] + known ||= [] known << db_obj.config[attr] end } diff --git a/modules/tests/rds.yaml b/modules/tests/rds.yaml index 23c6bd02a..18ad23008 100644 --- a/modules/tests/rds.yaml +++ b/modules/tests/rds.yaml @@ -8,8 +8,7 @@ databases: size: db.t3.medium engine: postgres engine_version: "10" - allow_major_version_upgrade: true - auto_minor_version_upgrade: false + auto_minor_version_upgrade: true backup_retention_period: 10 cluster_node_count: 2 create_cluster: true @@ -38,6 +37,7 @@ databases: name: rdstests region: us-east-1 create_read_replica: true + allow_major_version_upgrade: true read_replica_region: us-east-2 cloudwatch_logs: - slowquery diff --git a/modules/tests/regrooms/rds.yaml b/modules/tests/regrooms/rds.yaml new file mode 100644 index 000000000..fa032fb37 --- /dev/null +++ b/modules/tests/regrooms/rds.yaml @@ -0,0 +1,111 @@ +# clouds: AWS +--- +appname: smoketest +vpcs: +- name: rdstests +databases: +- name: pgcluster + size: db.t3.medium + engine: postgres + engine_version: "10" + allow_major_version_upgrade: true + auto_minor_version_upgrade: false + backup_retention_period: 8 + cluster_node_count: 2 + create_cluster: true + cluster_parameter_group_parameters: + - name: log_disconnections + value: "0" + - name: authentication_timeout + value: "35" + vpc: + name: rdstests + master_user: Bob + +#- name: mysqlcluster +# size: db.t3.medium +# engine: aurora +# cluster_mode: serverless +# create_cluster: true +# vpc: +# name: rdstests + +- name: maria-base + size: db.t2.small + engine: mariadb + db_parameter_group_parameters: + - name: autocommit + value: "1" + vpc: + name: rdstests + region: us-east-1 + create_read_replica: true + read_replica_region: us-east-2 + cloudwatch_logs: + - slowquery + - error + multi_az_on_create: true + master_user: Bob +- name: maria-from-snap + size: db.t2.small + engine: mariadb + vpc: + name: rdstests + creation_style: new_snapshot + source: + name: maria-base +- name: maria-point-in-time + creation_style: point_in_time + size: db.t2.micro + engine: mariadb + cloudwatch_logs: + - error + - general + source: + name: maria-base + vpc: + name: rdstests + +- name: oracle-base + size: db.m5.large + engine: oracle + vpc: + name: rdstests +- name: oracle-from-snap + size: db.m5.large + engine: oracle + vpc: + name: rdstests + creation_style: new_snapshot + source: + name: oracle-base +- name: oracle-point-in-time + size: db.m5.large + engine: oracle + vpc: + name: rdstests + creation_style: point_in_time + source: + name: oracle-base + +- name: sqlserver-base + size: db.t2.small + engine: sqlserver-ex + vpc: + name: rdstests +- name: sqlserver-from-snap + size: db.t2.small + engine: sqlserver-ex + vpc: + name: rdstests + creation_style: new_snapshot + source: + name: sqlserver-base +- name: sqlserver-point-in-time + size: db.t2.small + engine: sqlserver-ex + vpc: + name: rdstests + creation_style: point_in_time + source: + name: sqlserver-base From 8db8187645c589d0f07d936c6a7749ac02c2f2ee Mon Sep 17 00:00:00 2001 From: John Stange Date: Thu, 7 May 2020 11:56:11 -0400 Subject: [PATCH 107/124] start bumping release versions --- cloud-mu.gemspec | 4 ++-- modules/Gemfile.lock | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cloud-mu.gemspec b/cloud-mu.gemspec index ff097c411..c5433e127 100644 --- a/cloud-mu.gemspec +++ b/cloud-mu.gemspec @@ -17,8 +17,8 @@ end Gem::Specification.new do |s| s.name = 'cloud-mu' - s.version = '3.1.6' - s.date = '2020-03-20' + s.version = '3.2.0alpha1' + s.date = '2020-05-07' s.require_paths = ['modules'] s.required_ruby_version = '>= 2.4' s.summary = "The eGTLabs Mu toolkit for unified cloud deployments" diff --git a/modules/Gemfile.lock b/modules/Gemfile.lock index 5695e727b..ad71c6904 100644 --- a/modules/Gemfile.lock +++ b/modules/Gemfile.lock @@ -10,7 +10,7 @@ GIT PATH remote: .. specs: - cloud-mu (3.1.6) + cloud-mu (3.2.0alpha1) addressable (~> 2.5) aws-sdk-core (< 3) azure_sdk (~> 0.52) From 064d3d0cfcbea11350c011be242b258a2752e0f6 Mon Sep 17 00:00:00 2001 From: John Stange Date: Thu, 7 May 2020 12:38:45 -0400 Subject: [PATCH 108/124] AWS::Database, Adoption: teeny tiny bugs --- modules/mu/adoption.rb | 12 +++++++----- modules/mu/providers/aws/database.rb | 2 +- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/modules/mu/adoption.rb b/modules/mu/adoption.rb index 391da522e..92121518b 100644 --- a/modules/mu/adoption.rb +++ b/modules/mu/adoption.rb @@ -223,11 +223,13 @@ def generateBaskets(prefix: "") } @existing_deploys[appname] = MU::MommaCat.findMatchingDeploy(origin) - @existing_deploys_by_id[@existing_deploys[appname].deploy_id] = @existing_deploys[appname] - @origins[appname] = origin - origin['types'].each { |t| - @types_found_in[t] = @existing_deploys[appname] - } + if @existing_deploys[appname] + @existing_deploys_by_id[@existing_deploys[appname].deploy_id] = @existing_deploys[appname] + @origins[appname] = origin + origin['types'].each { |t| + @types_found_in[t] = @existing_deploys[appname] + } + end } groupings.each_pair { |appname, types| diff --git a/modules/mu/providers/aws/database.rb b/modules/mu/providers/aws/database.rb index 16d08548f..58d321a1a 100644 --- a/modules/mu/providers/aws/database.rb +++ b/modules/mu/providers/aws/database.rb @@ -363,7 +363,6 @@ def toKitten(**_args) bok['backup_retention_period'] = cloud_desc.backup_retention_period if cloud_desc.backup_retention_period > 1 bok['multi_az_on_groom'] = true if cloud_desc.multi_az bok['storage_encrypted'] = true if cloud_desc.storage_encrypted - bok['auto_minor_version_upgrade'] = true if cloud_desc.auto_minor_version_upgrade if bok['create_cluster'] bok['cluster_node_count'] = cloud_desc.db_cluster_members.size @@ -392,6 +391,7 @@ def toKitten(**_args) end else bok['size'] = cloud_desc.db_instance_class + bok['auto_minor_version_upgrade'] = true if cloud_desc.auto_minor_version_upgrade if cloud_desc.db_subnet_group myvpc = MU::MommaCat.findStray("AWS", "vpc", cloud_id: cloud_desc.db_subnet_group.vpc_id, credentials: @credentials, region: @config['region'], dummy_ok: true, no_deploy_search: true).first bok['vpc'] = myvpc.getReference(cloud_desc.db_subnet_group.subnets.map { |s| s.subnet_identifier }) From 1ee10b04f4d9ea0e3dfe4d7d4ecfef62006374ea Mon Sep 17 00:00:00 2001 From: John Stange Date: Thu, 7 May 2020 15:08:38 -0400 Subject: [PATCH 109/124] AWS::Database: make sure regrooms pick up firewall rule changes --- modules/mu/providers/aws/database.rb | 77 +++++++++++++++------------- modules/tests/regrooms/rds.yaml | 16 +++++- 2 files changed, 55 insertions(+), 38 deletions(-) diff --git a/modules/mu/providers/aws/database.rb b/modules/mu/providers/aws/database.rb index 58d321a1a..aba276914 100644 --- a/modules/mu/providers/aws/database.rb +++ b/modules/mu/providers/aws/database.rb @@ -71,7 +71,6 @@ class Database < MU::Cloud::Database :db_subnet_group_name, :db_security_groups, :vpc_security_group_ids, - :apply_immediately, :master_user_password, :db_parameter_group_name, :backup_retention_period, @@ -110,7 +109,6 @@ class Database < MU::Cloud::Database ], "cluster" => [ :new_db_cluster_identifier, - :apply_immediately, :backup_retention_period, :db_cluster_parameter_group_name, :vpc_security_group_ids, @@ -198,7 +196,7 @@ def create if @config["create_cluster"] getPassword - createSubnetGroup + manageSubnetGroup if @config.has_key?("parameter_group_family") manageDbParameterGroup(true) @@ -375,7 +373,7 @@ def toKitten(**_args) # for now just assume they're all the same cloud_desc.db_cluster_members.each { |db| member = MU::Cloud::AWS::Database.find(cloud_id: db.db_instance_identifier, region: @config['region'], credentials: @credentials).values.first -MU.log "derp", MU::WARN, details: member + sizes << member.db_instance_class if member.db_subnet_group and member.db_subnet_group.vpc_id vpcs << member.db_subnet_group @@ -445,7 +443,7 @@ def allTags end # Create a subnet group for a database. - def createSubnetGroup + def manageSubnetGroup # Finding subnets, creating security groups/adding holes, create subnet group subnet_ids = [] @@ -468,21 +466,26 @@ def createSubnetGroup if subnet_ids.empty? raise MuError, "Couldn't find subnets in #{@vpc} to add to #{@config["subnet_group_name"]}. Make sure the subnets are valid and publicly_accessible is set correctly" else - # Create subnet group - resp = MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_subnet_group( - db_subnet_group_name: @config["subnet_group_name"], - db_subnet_group_description: @config["subnet_group_name"], - subnet_ids: subnet_ids, - tags: @tags.each_key.map { |k| { :key => k, :value => @tags[k] } } + resp = MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).describe_db_subnet_groups( + db_subnet_group_name: @config["subnet_group_name"] ) - @config["subnet_group_name"] = resp.db_subnet_group.db_subnet_group_name - - if @dependencies.has_key?('firewall_rule') - @config["vpc_security_group_ids"] = [] - @dependencies['firewall_rule'].each_value { |sg| - @config["vpc_security_group_ids"] << sg.cloud_id - } + if !resp or !resp.db_subnet_groups or resp.db_subnet_groups.empty? + # Create subnet group + resp = MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_subnet_group( + db_subnet_group_name: @config["subnet_group_name"], + db_subnet_group_description: @config["subnet_group_name"], + subnet_ids: subnet_ids, + tags: @tags.each_key.map { |k| { :key => k, :value => @tags[k] } } + ) + else +# XXX ensure subnet group matches our config? end + + myFirewallRules.each { |sg| + next if sg.cloud_desc.vpc_id != @vpc.cloud_id + @config["vpc_security_group_ids"] ||= [] + @config["vpc_security_group_ids"] << sg.cloud_id + } end allowBastionAccess @@ -542,9 +545,10 @@ def manageDbParameterGroup(cluster = false, create: true) # Called automatically by {MU::Deploy#createResources} def groom cloud_desc(use_cache: false) + manageSubnetGroup if @vpc + manageDbParameterGroup(@config["create_cluster"], create: false) noun = @config['create_cluster'] ? "cluster" : "instance" - manageDbParameterGroup(@config["create_cluster"], create: false) mods = { "db_#{noun}_identifier".to_sym => @cloud_id @@ -557,6 +561,17 @@ def groom end } + existing_sgs = cloud_desc.vpc_security_groups.map { |sg| + sg.vpc_security_group_id + }.sort + + if !@config["add_cluster_node"] and !@config["member_of_cluster"] and + @config["vpc_security_group_ids"] and + existing_sgs != @config["vpc_security_group_ids"].sort + mods[:vpc_security_group_ids] = @config["vpc_security_group_ids"] + end + + if @config['cloudwatch_logs'] and cloud_desc.enabled_cloudwatch_logs_exports.sort != @config['cloudwatch_logs'].sort mods[:cloudwatch_logs_export_configuration] = { enable_log_types: @config['cloudwatch_logs'], @@ -582,15 +597,6 @@ def groom mods[:multi_az] = true end -# if !@config['member_of_cluster'] and !@config['read_replica_of'] -# [:backup_retention_period, :preferred_backup_window, :preferred_maintenance_window, :allow_major_version_upgrade, :auto_minor_version_upgrade].each { |field| -# if @config[field.to_s] and -# cloud_desc.respond_to?(field) and -# cloud_desc.send(field) != @config[field.to_s] -# mods[field] = @config[field.to_s] -# end -# } - # XXX how do we guard this? do we? # master_user_password: @config["password"], # end @@ -661,12 +667,10 @@ def allowHost(cidr) end # Otherwise go get our generic EC2 ruleset and punch a hole in it - if @dependencies.has_key?('firewall_rule') - @dependencies['firewall_rule'].each_value { |sg| - sg.addRule([cidr], proto: "tcp", port: cloud_desc.endpoint.port) - break - } - end + myFirewallRules.each { |sg| + sg.addRule([cidr], proto: "tcp", port: cloud_desc.endpoint.port) + break + } end # Return the metadata for this ContainerCluster @@ -1029,7 +1033,7 @@ def self.validate_master_password(db) # Cloud-specific pre-processing of {MU::Config::BasketofKittens::databases}, bare and unvalidated. # @param db [Hash]: The resource to process and validate - # @param _configurator [MU::Config]: The overall deployment configurator of which this resource is a member + # @param _configurator [MU::Config]: The overall deployment configurator of which this resource is a ember # @return [Boolean]: True if validation succeeded, False otherwise def self.validateConfig(db, _configurator) ok = true @@ -1112,6 +1116,7 @@ def genericParams auto_minor_version_upgrade: @config["auto_minor_version_upgrade"], license_model: @config["license_model"], db_subnet_group_name: @config["subnet_group_name"], + vpc_security_group_ids: @config["vpc_security_group_ids"], publicly_accessible: @config["publicly_accessible"], copy_tags_to_snapshot: true, tags: @tags.each_key.map { |k| { :key => k, :value => @tags[k] } } @@ -1251,7 +1256,7 @@ def add_basic getPassword if @config['source'].nil? or @config['region'] != @config['source'].region - createSubnetGroup if @vpc + manageSubnetGroup if @vpc else MU.log "Note: Read Replicas automatically reside in the same subnet group as the source database, if they're both in the same region. This replica may not land in the VPC you intended.", MU::WARN end diff --git a/modules/tests/regrooms/rds.yaml b/modules/tests/regrooms/rds.yaml index fa032fb37..3bb5d763f 100644 --- a/modules/tests/regrooms/rds.yaml +++ b/modules/tests/regrooms/rds.yaml @@ -3,6 +3,14 @@ appname: smoketest vpcs: - name: rdstests +firewall_rules: +- name: world + vpc: + name: rdstests + rules: + - port: 3307 + hosts: + - 0.0.0.0/0 databases: - name: pgcluster size: db.t3.medium @@ -20,7 +28,7 @@ databases: value: "35" vpc: name: rdstests - master_user: Bob + master_user: Jimmy #- name: mysqlcluster # size: db.t3.medium @@ -45,12 +53,15 @@ databases: - slowquery - error multi_az_on_create: true - master_user: Bob + master_user: Stoki - name: maria-from-snap size: db.t2.small engine: mariadb + port: 3307 vpc: name: rdstests + add_firewall_rules: + - name: world creation_style: new_snapshot source: name: maria-base @@ -71,6 +82,7 @@ databases: engine: oracle vpc: name: rdstests + master_user: helen - name: oracle-from-snap size: db.m5.large engine: oracle From adc570c48ca11f07ed582cc54b49778ca2805fde Mon Sep 17 00:00:00 2001 From: John Stange Date: Thu, 7 May 2020 15:46:41 -0400 Subject: [PATCH 110/124] Adoption: don't include the 'new value' bit of Slack diff notification on adds and removes, it's redundant --- modules/mu/adoption.rb | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/modules/mu/adoption.rb b/modules/mu/adoption.rb index 92121518b..c903f5793 100644 --- a/modules/mu/adoption.rb +++ b/modules/mu/adoption.rb @@ -461,7 +461,10 @@ def crawlChangeReport(tier, parent_key = nil, indent: "") end else tier[:value] ||= "" - myreport["slack"] = slack+" New #{tier[:field] ? "`"+tier[:field]+"`" : :value}: \*#{tier[:value]}\*" + myreport["slack"] = slack + if ![:added, :removed].include?(tier[:action]) + myreport["slack"] += " New #{tier[:field] ? "`"+tier[:field]+"`" : :value}: \*#{tier[:value]}\*" + end append = tier[:value].to_s.bold end if append and !append.empty? From 43a8df331a1319411e36c4c6d75f8c7a7be36162 Mon Sep 17 00:00:00 2001 From: John Stange Date: Thu, 7 May 2020 23:24:07 -0400 Subject: [PATCH 111/124] mu-configure: roll new adoption and google options into config schema --- bin/mu-configure | 38 +++++++++++++++++++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) diff --git a/bin/mu-configure b/bin/mu-configure index 3076d8584..ebb122548 100755 --- a/bin/mu-configure +++ b/bin/mu-configure @@ -113,12 +113,44 @@ $CONFIGURABLES = { "desc" => "Disable the Momma Cat grooming daemon. Nodes which require asynchronous Ansible/Chef bootstraps will not function. This option is only honored in gem-based installations.", "boolean" => true }, + "adopt_change_notify" => { + "title" => "Adoption Change Notifications", + "subtree" => { + "slack" => { + "title" => "Send to Slack", + "desc" => "Report modifications to adopted resources, detected by mu-adopt --diff, to the Slack webhook and channel configured under Slack Configuration.", + "boolean" => true + }, + "slack_snippet_threshold" => { + "title" => "Attachment Threshold", + "desc" => "If a list of details about a modified resources is longer than this number of lines (in JSON), it will be sent as an \"attachment,\" which in Slack means a blockquote that displays a few lines with a \"Show more\" button. The internal default is 5 lines." + }, +# "email" => { +# "title" => "Send Email", +# "desc" => "", +# "boolean" => true +# } + } + }, "adopt_scrub_mu_isms" => { - "title" => "Disable Momma Cat", + "title" => "Scrub Mu-isms from Baskets of Kittens", "default" => false, "desc" => "Ordinarily, Mu will automatically name, tag and generate auxiliary resources in a standard Mu-ish fashion that allows for deployment of multiple clones of a given stack. Toggling this flag will change the default behavior of mu-adopt, when it creates stack descriptors from found resources, to enable or disable this behavior (see also mu-adopt's --scrub option).", "boolean" => true }, + "slack" => { + "title" => "Slack Configuration", + "subtree" => { + "webhook" => { + "title" => "Webhook", + "desc" => "The hooks.slack.com URL for the webook to which we'll send deploy notifications" + }, + "channel" => { + "title" => "Channel", + "desc" => "The channel name (without leading #) to which alerts should be sent." + } + } + }, "mommacat_port" => { "title" => "Momma Cat Listen Port", "pattern" => /^[0-9]+$/i, @@ -247,6 +279,10 @@ $CONFIGURABLES = { "required" => false, "desc" => "For Google Cloud projects which are attached to a GSuite domain. GCP service accounts cannot view or manage GSuite resources (groups, users, etc) directly, but must instead masquerade as a GSuite user which has delegated authority to the service account. See also: https://developers.google.com/identity/protocols/OAuth2ServiceAccount#delegatingauthority" }, + "org" => { + "title" => "Default Org/Domain", + "desc" => "For credential sets which have access to multiple GSuite or Cloud Identity orgs, you must specify a default organization (e.g. my.domain.com)." + }, "customer_id" => { "title" => "GSuite Customer ID", "required" => false, From 7a77396a831bcc974e6fb5e9ecbbf0c805e563aa Mon Sep 17 00:00:00 2001 From: John Stange Date: Fri, 8 May 2020 01:18:54 -0400 Subject: [PATCH 112/124] add some missing YARD comments --- modules/mu/cloud/dnszone.rb | 6 ++++++ modules/mu/providers/aws/database.rb | 1 + 2 files changed, 7 insertions(+) diff --git a/modules/mu/cloud/dnszone.rb b/modules/mu/cloud/dnszone.rb index e722ad4c9..59f4fcdb1 100644 --- a/modules/mu/cloud/dnszone.rb +++ b/modules/mu/cloud/dnszone.rb @@ -20,11 +20,17 @@ class Cloud # Generic methods for all DNSZone implementations class DNSZone + # Set a generic .platform-mu DNS entry for a resource, and return the name + # that was set. def self.genericMuDNSEntry(*flags) # XXX have this switch on a global config for where Mu puts its DNS MU::Cloud.resourceClass(MU::Config.defaultCloud, "DNSZone").genericMuDNSEntry(flags.first) end + # Wrapper for {MU::Cloud::AWS::DNSZone.manageRecord}. Spawns threads to create all + # requested records in background and returns immediately. + # @param cfg [Array]: An array of parsed {MU::Config::BasketofKittens::dnszones::records} objects. + # @param target [String]: Optional target for the records to be created. Overrides targets embedded in cfg records. def self.createRecordsFromConfig(*flags) cloudclass = MU::Cloud.resourceClass(MU::Config.defaultCloud, "DNSZone") if !flags.nil? and flags.size == 1 diff --git a/modules/mu/providers/aws/database.rb b/modules/mu/providers/aws/database.rb index aba276914..fc36741bd 100644 --- a/modules/mu/providers/aws/database.rb +++ b/modules/mu/providers/aws/database.rb @@ -64,6 +64,7 @@ class Database < MU::Cloud::Database } }.freeze + # List of parameters that are legal to set in +modify_db_instance+ and +modify_db_cluster+ MODIFIABLE = { "instance" => [ :allocated_storage, From b3f09a2abcceab77d731e5808b5432f4c0fe8340 Mon Sep 17 00:00:00 2001 From: John Stange Date: Fri, 8 May 2020 11:42:32 -0400 Subject: [PATCH 113/124] LoadBalancer: infer ports/protocols from redirect configs when applicable --- modules/mu/config/loadbalancer.rb | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/modules/mu/config/loadbalancer.rb b/modules/mu/config/loadbalancer.rb index 0d1ff22ef..ac3207626 100644 --- a/modules/mu/config/loadbalancer.rb +++ b/modules/mu/config/loadbalancer.rb @@ -300,7 +300,7 @@ def self.schema "type" => "array", "items" => { "type" => "object", - "required" => ["lb_protocol", "lb_port", "instance_protocol", "instance_port"], + "required" => ["lb_protocol", "lb_port"], "additionalProperties" => false, "description" => "A list of port/protocols which this Load Balancer should answer.", "properties" => { @@ -446,6 +446,10 @@ def self.validate(lb, _configurator) "proto" => l["instance_protocol"], "port" => l["instance_port"] } + if l["redirect"] + tg["proto"] ||= l["redirect"]["protocol"] + tg["port"] ||= l["redirect"]["port"] + end l['healthcheck'] ||= lb['healthcheck'] if lb['healthcheck'] if l["healthcheck"] hc_target = l['healthcheck']['target'].match(/^([^:]+):(\d+)(.*)/) From d265a6ea4601f6103e351f144cafb32d659e6d47 Mon Sep 17 00:00:00 2001 From: John Stange Date: Mon, 11 May 2020 02:18:09 -0400 Subject: [PATCH 114/124] Adoption: verbiage tweaks in Slack change notifications --- modules/mu/adoption.rb | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/modules/mu/adoption.rb b/modules/mu/adoption.rb index c903f5793..470336d3e 100644 --- a/modules/mu/adoption.rb +++ b/modules/mu/adoption.rb @@ -31,6 +31,7 @@ class Incomplete < MU::MuNonFatal; end } def initialize(clouds: MU::Cloud.supportedClouds, types: MU::Cloud.resource_types.keys, parent: nil, billing: nil, sources: nil, credentials: nil, group_by: :logical, savedeploys: false, diff: false, habitats: [], scrub_mu_isms: false, regions: [], merge: false) + @scraped = {} @clouds = clouds @types = types @@ -425,9 +426,6 @@ def crawlChangeReport(tier, parent_key = nil, indent: "") plain += " ("+loc+")" if loc and !loc.empty? color = plain - slack += " was #{tier[:action]}" - slack += " #{preposition} \*#{loc}\*" if loc and !loc.empty? and [Array, Hash].include?(tier[:value].class) - if tier[:action] == :added color = "+ ".green + plain plain = "+ " + plain @@ -435,10 +433,13 @@ def crawlChangeReport(tier, parent_key = nil, indent: "") color = "- ".red + plain plain = "- " + plain end + + slack += " #{tier[:action]} #{preposition} \*#{loc}\*" if loc and !loc.empty? and [Array, Hash].include?(tier[:value].class) + plain = path_str.join(" => \n") + indent + plain color = path_str.join(" => \n") + indent + color - slack += slack_path_str+"." + slack += " "+slack_path_str+"." myreport = { "slack" => slack, "plain" => plain, @@ -458,15 +459,19 @@ def crawlChangeReport(tier, parent_key = nil, indent: "") end else append = indent+"["+tier[:value].map { |v| MU::MommaCat.getChunkName(v, type_of).reverse.join("/") || v.to_s.light_blue }.join(", ")+"]" + slack += " #{tier[:action].to_s}: "+tier[:value].map { |v| MU::MommaCat.getChunkName(v, type_of).reverse.join("/") || v.to_s }.join(", ") end else tier[:value] ||= "" - myreport["slack"] = slack + slack += " was #{tier[:action]}" if ![:added, :removed].include?(tier[:action]) myreport["slack"] += " New #{tier[:field] ? "`"+tier[:field]+"`" : :value}: \*#{tier[:value]}\*" end append = tier[:value].to_s.bold end + + myreport["slack"] = slack + if append and !append.empty? myreport["plain"] += " =>\n "+indent+append myreport["color"] += " =>\n "+indent+append @@ -538,15 +543,13 @@ def notifyChanges(deploy, report) } puts "" - if MU.muCfg['adopt_change_notify']['slack'] + if MU.muCfg['adopt_change_notify'] and MU.muCfg['adopt_change_notify']['slack'] deploy.sendAdminSlack(slacktext, scrub_mu_isms: MU.muCfg['adopt_scrub_mu_isms'], snippets: snippets, noop: false) end } } - if MU.muCfg['adopt_change_notify']['email'] - end end def scrubSchemaDefaults(conf_chunk, schema_chunk, depth = 0, type: nil) From 1b193d1c80f2f5e3f82e4bc274d7cf240e03f0ec Mon Sep 17 00:00:00 2001 From: John Stange Date: Tue, 12 May 2020 17:09:06 -0400 Subject: [PATCH 115/124] a series of unrelated problems with giving things names --- modules/Gemfile.lock | 12 +++++------- modules/mu/master.rb | 2 +- modules/mu/mommacat/naming.rb | 7 ++++--- modules/mu/providers/aws/database.rb | 20 +++++++++++--------- modules/mu/providers/aws/server.rb | 2 +- 5 files changed, 22 insertions(+), 21 deletions(-) diff --git a/modules/Gemfile.lock b/modules/Gemfile.lock index ad71c6904..207363a27 100644 --- a/modules/Gemfile.lock +++ b/modules/Gemfile.lock @@ -44,17 +44,17 @@ PATH GEM remote: https://rubygems.org/ specs: - activesupport (6.0.2.2) + activesupport (6.0.3) concurrent-ruby (~> 1.0, >= 1.0.2) i18n (>= 0.7, < 2) minitest (~> 5.1) tzinfo (~> 1.1) - zeitwerk (~> 2.2) + zeitwerk (~> 2.2, >= 2.2.2) addressable (2.5.2) public_suffix (>= 2.0.2, < 4.0) ast (2.4.0) aws-eventstream (1.1.0) - aws-sdk-core (2.11.501) + aws-sdk-core (2.11.504) aws-sigv4 (~> 1.0) jmespath (~> 1.0) aws-sigv4 (1.1.3) @@ -634,7 +634,6 @@ GEM inifile (3.0.0) iniparse (1.5.0) ipaddress (0.8.3) - jaro_winkler (1.5.4) jmespath (1.4.0) json-schema (2.8.1) addressable (>= 2.4) @@ -741,7 +740,7 @@ GEM rspec-mocks (~> 3.9.0) rspec-core (3.9.2) rspec-support (~> 3.9.3) - rspec-expectations (3.9.1) + rspec-expectations (3.9.2) diff-lcs (>= 1.2.0, < 2.0) rspec-support (~> 3.9.0) rspec-its (1.3.0) @@ -754,8 +753,7 @@ GEM rspec_junit_formatter (0.2.3) builder (< 4) rspec-core (>= 2, < 4, != 2.12.0) - rubocop (0.82.0) - jaro_winkler (~> 1.5.1) + rubocop (0.83.0) parallel (~> 1.10) parser (>= 2.7.0.1) rainbow (>= 2.2.2, < 4.0) diff --git a/modules/mu/master.rb b/modules/mu/master.rb index 791405b98..607a61301 100644 --- a/modules/mu/master.rb +++ b/modules/mu/master.rb @@ -602,7 +602,7 @@ def self.addHostToSSHConfig(server, return end if ssh_key_name.nil? or ssh_key_name.empty? - MU.log "Failed to extract ssh_key_name for #{ssh_key_name.mu_name} in addHostToSSHConfig", MU::ERR + MU.log "Failed to extract ssh_key_name for #{server.mu_name} in addHostToSSHConfig", MU::ERR return end diff --git a/modules/mu/mommacat/naming.rb b/modules/mu/mommacat/naming.rb index 109335b57..7f7a7c40a 100644 --- a/modules/mu/mommacat/naming.rb +++ b/modules/mu/mommacat/naming.rb @@ -277,17 +277,18 @@ def self.listOptionalTags # SSH config entries, etc. # @param server [MU::Cloud::Server]: The {MU::Cloud::Server} we'll be setting up. # @param sync_wait [Boolean]: Whether to wait for DNS to fully synchronize before returning. - def self.nameKitten(server, sync_wait: false) + def self.nameKitten(server, sync_wait: false, no_dns: false) node, config, _deploydata = server.describe mu_zone = nil # XXX GCP! - if MU::Cloud::AWS.hosted? and !MU::Cloud::AWS.isGovCloud? + if !no_dns and MU::Cloud::AWS.hosted? and !MU::Cloud::AWS.isGovCloud? zones = MU::Cloud::DNSZone.find(cloud_id: "platform-mu") mu_zone = zones.values.first if !zones.nil? end + if !mu_zone.nil? - MU::Cloud::DNSZone.genericMuDNSEntry(name: node, target: server.canonicalIP, cloudclass: MU::Cloud::Server, sync_wait: sync_wait) + MU::Cloud::DNSZone.genericMuDNSEntry(name: node.gsub(/[^a-z0-9!"\#$%&'\(\)\*\+,\-\/:;<=>\?@\[\]\^_`{\|}~\.]/, '-').gsub(/--|^-/, ''), target: server.canonicalIP, cloudclass: MU::Cloud::Server, sync_wait: sync_wait) else MU::Master.addInstanceToEtcHosts(server.canonicalIP, node) end diff --git a/modules/mu/providers/aws/database.rb b/modules/mu/providers/aws/database.rb index fc36741bd..4ed0db3f5 100644 --- a/modules/mu/providers/aws/database.rb +++ b/modules/mu/providers/aws/database.rb @@ -467,19 +467,19 @@ def manageSubnetGroup if subnet_ids.empty? raise MuError, "Couldn't find subnets in #{@vpc} to add to #{@config["subnet_group_name"]}. Make sure the subnets are valid and publicly_accessible is set correctly" else - resp = MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).describe_db_subnet_groups( - db_subnet_group_name: @config["subnet_group_name"] - ) - if !resp or !resp.db_subnet_groups or resp.db_subnet_groups.empty? + resp = begin + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).describe_db_subnet_groups( + db_subnet_group_name: @config["subnet_group_name"] + ) +# XXX ensure subnet group matches our config? + rescue ::Aws::RDS::Errors::DBSubnetGroupNotFoundFault # Create subnet group - resp = MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_subnet_group( + MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_subnet_group( db_subnet_group_name: @config["subnet_group_name"], db_subnet_group_description: @config["subnet_group_name"], subnet_ids: subnet_ids, tags: @tags.each_key.map { |k| { :key => k, :value => @tags[k] } } ) - else -# XXX ensure subnet group matches our config? end myFirewallRules.each { |sg| @@ -1091,7 +1091,7 @@ def genericParams } if @vpc and @config["subnet_group_name"] - paramhash[:db_subnet_group_name] = @config["subnet_group_name"].downcase + paramhash[:db_subnet_group_name] = @config["subnet_group_name"] end if @config['cloudwatch_logs'] @@ -1278,7 +1278,7 @@ def add_cluster_node @config['cluster_identifier'] = cluster.cloud_id.downcase # We're overriding @config["subnet_group_name"] because we need each cluster member to use the cluster's subnet group instead of a unique subnet group - @config["subnet_group_name"] = @config['cluster_identifier'] if @vpc + @config["subnet_group_name"] ||= cluster.cloud_id if @vpc @config["creation_style"] = "new" if @config["creation_style"] != "new" if @config.has_key?("parameter_group_family") manageDbParameterGroup @@ -1334,6 +1334,8 @@ def create_basic [:storage_encrypted, :master_user_password, :engine_version, :allocated_storage, :backup_retention_period, :preferred_backup_window, :master_username, :db_name, :database_name].each { |p| params.delete(p) } } + noun = @config["create_cluster"] ? "cluster" : "instance" + MU.retrier([Aws::RDS::Errors::InvalidParameterValue], max: 5, wait: 10) { if %w{existing_snapshot new_snapshot}.include?(@config["creation_style"]) clean_parent_opts.call diff --git a/modules/mu/providers/aws/server.rb b/modules/mu/providers/aws/server.rb index 5b36707e7..71809f6f8 100644 --- a/modules/mu/providers/aws/server.rb +++ b/modules/mu/providers/aws/server.rb @@ -801,7 +801,7 @@ def notify end deploydata["region"] = @config['region'] if !@config['region'].nil? if !@named - MU::MommaCat.nameKitten(self) + MU::MommaCat.nameKitten(self, no_dns: true) @named = true end From 4a494d8845681e78f3de7930038cf0baa6dc6a65 Mon Sep 17 00:00:00 2001 From: John Stange Date: Wed, 13 May 2020 11:04:01 -0400 Subject: [PATCH 116/124] AWS::Database: make cluster members pull subnet group out of the parent cluster's cloud descriptor, so we can't possibly get it wrong --- modules/mu/providers/aws/database.rb | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/modules/mu/providers/aws/database.rb b/modules/mu/providers/aws/database.rb index 4ed0db3f5..206597572 100644 --- a/modules/mu/providers/aws/database.rb +++ b/modules/mu/providers/aws/database.rb @@ -474,12 +474,16 @@ def manageSubnetGroup # XXX ensure subnet group matches our config? rescue ::Aws::RDS::Errors::DBSubnetGroupNotFoundFault # Create subnet group - MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_subnet_group( + resp = MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_subnet_group( db_subnet_group_name: @config["subnet_group_name"], db_subnet_group_description: @config["subnet_group_name"], subnet_ids: subnet_ids, tags: @tags.each_key.map { |k| { :key => k, :value => @tags[k] } } ) + # The API forces it to lowercase, for some reason? Maybe not + # always? Just rely on what it says. + @config["subnet_group_name"] = resp.db_subnet_group.db_subnet_group_name + resp end myFirewallRules.each { |sg| @@ -1277,8 +1281,9 @@ def add_cluster_node end @config['cluster_identifier'] = cluster.cloud_id.downcase + # We're overriding @config["subnet_group_name"] because we need each cluster member to use the cluster's subnet group instead of a unique subnet group - @config["subnet_group_name"] ||= cluster.cloud_id if @vpc + @config["subnet_group_name"] = cluster.cloud_desc.db_subnet_group if @vpc @config["creation_style"] = "new" if @config["creation_style"] != "new" if @config.has_key?("parameter_group_family") manageDbParameterGroup @@ -1336,7 +1341,7 @@ def create_basic noun = @config["create_cluster"] ? "cluster" : "instance" - MU.retrier([Aws::RDS::Errors::InvalidParameterValue], max: 5, wait: 10) { + MU.retrier([Aws::RDS::Errors::InvalidParameterValue, Aws::RDS::Errors::DBSubnetGroupNotFoundFault], max: 10, wait: 15) { if %w{existing_snapshot new_snapshot}.include?(@config["creation_style"]) clean_parent_opts.call MU.log "Creating database #{noun} #{@cloud_id} from snapshot #{@config["snapshot_id"]}" From 22af648f824eea209dd4da9f93a3dc48a1bf2619 Mon Sep 17 00:00:00 2001 From: John Stange Date: Wed, 13 May 2020 11:59:44 -0400 Subject: [PATCH 117/124] Azure: don't break adoptions with our funky stub toKitten implementations --- modules/mu/providers/azure/firewall_rule.rb | 9 ++++++++- modules/mu/providers/azure/vpc.rb | 4 +++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/modules/mu/providers/azure/firewall_rule.rb b/modules/mu/providers/azure/firewall_rule.rb index 2a006994e..5eaa8f2a3 100644 --- a/modules/mu/providers/azure/firewall_rule.rb +++ b/modules/mu/providers/azure/firewall_rule.rb @@ -337,7 +337,14 @@ def self.cleanup(**args) # We assume that any values we have in +@config+ are placeholders, and # calculate our own accordingly based on what's live in the cloud. def toKitten(**args) - bok = {} + + bok = { + "cloud" => "Azure", + "name" => cloud_desc.name, + "project" => @config['project'], + "credentials" => @config['credentials'], + "cloud_id" => @cloud_id.to_s + } bok end diff --git a/modules/mu/providers/azure/vpc.rb b/modules/mu/providers/azure/vpc.rb index 81df50bb7..10a21e602 100644 --- a/modules/mu/providers/azure/vpc.rb +++ b/modules/mu/providers/azure/vpc.rb @@ -335,8 +335,10 @@ def toKitten(**_args) return nil if cloud_desc.name == "default" # parent project builds these bok = { "cloud" => "Azure", + "name" => cloud_desc.name, "project" => @config['project'], - "credentials" => @config['credentials'] + "credentials" => @config['credentials'], + "cloud_id" => @cloud_id.to_s } bok From 5feb40761ea0d7885c3aac32e30aecf101574be5 Mon Sep 17 00:00:00 2001 From: John Stange Date: Wed, 13 May 2020 12:10:09 -0400 Subject: [PATCH 118/124] remove debugging statements from AWS::Database#toKitten and bump version to 3.2.0beta1 --- cloud-mu.gemspec | 4 ++-- modules/Gemfile.lock | 2 +- modules/mu/providers/aws/database.rb | 5 ++--- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/cloud-mu.gemspec b/cloud-mu.gemspec index c5433e127..e0ad3af1f 100644 --- a/cloud-mu.gemspec +++ b/cloud-mu.gemspec @@ -17,8 +17,8 @@ end Gem::Specification.new do |s| s.name = 'cloud-mu' - s.version = '3.2.0alpha1' - s.date = '2020-05-07' + s.version = '3.2.0beta1' + s.date = '2020-05-13' s.require_paths = ['modules'] s.required_ruby_version = '>= 2.4' s.summary = "The eGTLabs Mu toolkit for unified cloud deployments" diff --git a/modules/Gemfile.lock b/modules/Gemfile.lock index 207363a27..ad88e56bb 100644 --- a/modules/Gemfile.lock +++ b/modules/Gemfile.lock @@ -10,7 +10,7 @@ GIT PATH remote: .. specs: - cloud-mu (3.2.0alpha1) + cloud-mu (3.2.0beta1) addressable (~> 2.5) aws-sdk-core (< 3) azure_sdk (~> 0.52) diff --git a/modules/mu/providers/aws/database.rb b/modules/mu/providers/aws/database.rb index 206597572..b440e216c 100644 --- a/modules/mu/providers/aws/database.rb +++ b/modules/mu/providers/aws/database.rb @@ -415,10 +415,9 @@ def toKitten(**_args) if cloud_desc.enabled_cloudwatch_logs_exports and cloud_desc.enabled_cloudwatch_logs_exports.size > 0 -MU.log bok['name'], MU::NOTICE, details: { "desc" => cloud_desc, "bok" => bok } -# bok['cloudwatch_logs'] = + bok['cloudwatch_logs'] = cloud_desc.enabled_cloudwatch_logs_exports end -MU.log bok['name'], MU::NOTICE, details: cloud_desc if bok['name'] == "pgcluster" + bok end From 88ec4f61740b39f2112346b6c718979c1338baab Mon Sep 17 00:00:00 2001 From: John Stange Date: Wed, 13 May 2020 15:32:29 -0400 Subject: [PATCH 119/124] AWS: de-flake calls to the pricing API maybe --- modules/mu/providers/aws.rb | 69 +++++++++++++++++-------------------- 1 file changed, 32 insertions(+), 37 deletions(-) diff --git a/modules/mu/providers/aws.rb b/modules/mu/providers/aws.rb index 416140917..2ad5f0ba4 100644 --- a/modules/mu/providers/aws.rb +++ b/modules/mu/providers/aws.rb @@ -805,46 +805,41 @@ def self.listInstanceTypes(region = myRegion) @@instance_types ||= {} @@instance_types[region] ||= {} - next_token = nil - begin - # Pricing API isn't widely available, so ask a region we know supports - # it - resp = MU::Cloud::AWS.pricing(region: "us-east-1").get_products( - service_code: "AmazonEC2", - filters: [ - { - field: "productFamily", - value: "Compute Instance", - type: "TERM_MATCH" - }, - { - field: "tenancy", - value: "Shared", - type: "TERM_MATCH" - }, - { - field: "location", - value: human_region, - type: "TERM_MATCH" - } - ], - next_token: next_token - ) - resp.price_list.each { |pricing| - data = JSON.parse(pricing) - type = data["product"]["attributes"]["instanceType"] - next if @@instance_types[region].has_key?(type) - @@instance_types[region][type] = {} - ["ecu", "vcpu", "memory", "storage"].each { |a| - @@instance_types[region][type][a] = data["product"]["attributes"][a] + # Pricing API isn't widely available, so ask a region we know supports + # it + resp = MU::Cloud::AWS.pricing(region: "us-east-1").get_products( + service_code: "AmazonEC2", + filters: [ + { + field: "productFamily", + value: "Compute Instance", + type: "TERM_MATCH" + }, + { + field: "tenancy", + value: "Shared", + type: "TERM_MATCH" + }, + { + field: "location", + value: human_region, + type: "TERM_MATCH" } - @@instance_types[region][type]["memory"].sub!(/ GiB/, "") - @@instance_types[region][type]["memory"] = @@instance_types[region][type]["memory"].to_f - @@instance_types[region][type]["vcpu"] = @@instance_types[region][type]["vcpu"].to_f + ] + ) + resp.price_list.each { |pricing| + data = JSON.parse(pricing) + type = data["product"]["attributes"]["instanceType"] + next if @@instance_types[region].has_key?(type) + @@instance_types[region][type] = {} + ["ecu", "vcpu", "memory", "storage"].each { |a| + @@instance_types[region][type][a] = data["product"]["attributes"][a] } - next_token = resp.next_token - end while resp and next_token + @@instance_types[region][type]["memory"].sub!(/ GiB/, "") + @@instance_types[region][type]["memory"] = @@instance_types[region][type]["memory"].to_f + @@instance_types[region][type]["vcpu"] = @@instance_types[region][type]["vcpu"].to_f + } @@instance_types end From 74cdd52ebf7e6275d436a45b0b600d5c1534ea0c Mon Sep 17 00:00:00 2001 From: John Stange Date: Sun, 17 May 2020 03:53:41 -0400 Subject: [PATCH 120/124] Adoption: fixed diff reporting for subelement adds/removes deep under a resources; MU::Cloud::Google.listHabitats: default to caching results, significant speedup --- modules/mu.rb | 11 ++++++--- modules/mu/adoption.rb | 32 +++++++++++++++++++++++--- modules/mu/cleanup.rb | 2 +- modules/mu/cloud/resource_base.rb | 3 ++- modules/mu/mommacat/naming.rb | 9 +++++--- modules/mu/mommacat/search.rb | 2 +- modules/mu/providers/aws.rb | 2 +- modules/mu/providers/azure.rb | 2 +- modules/mu/providers/cloudformation.rb | 2 +- modules/mu/providers/google.rb | 13 +++++++---- 10 files changed, 59 insertions(+), 19 deletions(-) diff --git a/modules/mu.rb b/modules/mu.rb index cca53fc48..37ebf391f 100644 --- a/modules/mu.rb +++ b/modules/mu.rb @@ -125,13 +125,18 @@ def diff(with, on = self, level: 0, parents: [], report: {}, habitat: nil) done = [] on.sort.each { |elt| if elt.is_a?(Hash) and !MU::MommaCat.getChunkName(elt).first.nil? - elt_namestr, elt_location = MU::MommaCat.getChunkName(elt) + elt_namestr, elt_location, elt_location_list = MU::MommaCat.getChunkName(elt) with.sort.each { |other_elt| - other_elt_namestr, other_elt_location = MU::MommaCat.getChunkName(other_elt) + other_elt_namestr, other_elt_location, other_elt_location_list = MU::MommaCat.getChunkName(other_elt) # Case 1: The array element exists in both version of this array - if elt_namestr and other_elt_namestr and elt_namestr == other_elt_namestr and (elt_location.nil? or other_elt_location.nil? or elt_location == other_elt_location) + if elt_namestr and other_elt_namestr and + elt_namestr == other_elt_namestr and + (elt_location.nil? or other_elt_location.nil? or + elt_location == other_elt_location or + !(elt_location_list & other_elt_location_list).empty? + ) done << elt done << other_elt break if elt == other_elt # if they're identical, we're done diff --git a/modules/mu/adoption.rb b/modules/mu/adoption.rb index 470336d3e..29dc2a781 100644 --- a/modules/mu/adoption.rb +++ b/modules/mu/adoption.rb @@ -31,7 +31,6 @@ class Incomplete < MU::MuNonFatal; end } def initialize(clouds: MU::Cloud.supportedClouds, types: MU::Cloud.resource_types.keys, parent: nil, billing: nil, sources: nil, credentials: nil, group_by: :logical, savedeploys: false, diff: false, habitats: [], scrub_mu_isms: false, regions: [], merge: false) - @scraped = {} @clouds = clouds @types = types @@ -117,7 +116,9 @@ def scrapeClouds() if found and found.size > 0 if resclass.cfg_plural == "habitats" - found.reject! { |h| !cloudclass.listHabitats(credset).include?(h.cloud_id) } + found.reject! { |h| + !cloudclass.listHabitats(credset).include?(h.cloud_id) + } end MU.log "Found #{found.size.to_s} raw #{resclass.cfg_plural} in #{cloud}" @scraped[type] ||= {} @@ -256,8 +257,11 @@ def generateBaskets(prefix: "") end threads = [] + timers = {} + walltimers = {} @clouds.each { |cloud| @scraped.each_pair { |type, resources| + typestart = Time.now res_class = begin MU::Cloud.resourceClass(cloud, type) rescue MU::Cloud::MuCloudResourceNotImplemented @@ -267,6 +271,7 @@ def generateBaskets(prefix: "") next if !types.include?(res_class.cfg_plural) bok[res_class.cfg_plural] ||= [] + timers[type] ||= {} class_semaphore = Mutex.new @@ -283,6 +288,7 @@ def generateBaskets(prefix: "") end end threads << Thread.new(obj_thr) { |obj| + start = Time.now kitten_cfg = obj.toKitten(rootparent: @default_parent, billing: @billing, habitats: @habitats, types: @types) if kitten_cfg @@ -293,6 +299,7 @@ def generateBaskets(prefix: "") if !kitten_cfg['cloud_id'] MU.log "No cloud id in this #{res_class.cfg_name} kitten!", MU::ERR, details: kitten_cfg end + timers[type][kitten_cfg['cloud_id']] = (Time.now - start) } count += 1 end @@ -303,6 +310,7 @@ def generateBaskets(prefix: "") threads.each { |t| t.join } + puts "" bok[res_class.cfg_plural].sort! { |a, b| strs = [a, b].map { |x| @@ -330,9 +338,25 @@ def generateBaskets(prefix: "") end } } + walltimers[type] = Time.now - typestart } } + timers.each_pair { |type, resources| + next if resources.empty? + total = resources.values.sum + top_5 = resources.keys.sort { |a, b| + resources[b] <=> resources[a] + }.slice(0, 5).map { |k| + k.to_s+": "+sprintf("%.2fs", resources[k]) + } + if walltimers[type] < 45 + MU.log "Kittened #{resources.size.to_s} eligible #{type}s in #{sprintf("%.2fs", walltimers[type])}" + else + MU.log "Kittened #{resources.size.to_s} eligible #{type}s in #{sprintf("%.2fs", walltimers[type])} (CPU time #{sprintf("%.2fs", total)}, avg #{sprintf("%.2fs", total/resources.size)}). Top 5:", MU::NOTICE, details: top_5 + end + } + # No matching resources isn't necessarily an error next if count == 0 or bok.nil? @@ -415,7 +439,7 @@ def crawlChangeReport(tier, parent_key = nil, indent: "") for c in (0..(path.size-1)) do path_str << (" " * (c+2)) + (path[c] || "") end - slack_path_str += " under `"+path.join("/")+"`" if path.size > 0 + slack_path_str += "#{preposition} \*"+path.join(" ⇨ ")+"\*" if path.size > 0 end path_str << "" if !path_str.empty? @@ -429,9 +453,11 @@ def crawlChangeReport(tier, parent_key = nil, indent: "") if tier[:action] == :added color = "+ ".green + plain plain = "+ " + plain + slack += " added" elsif tier[:action] == :removed color = "- ".red + plain plain = "- " + plain + slack += " removed" end slack += " #{tier[:action]} #{preposition} \*#{loc}\*" if loc and !loc.empty? and [Array, Hash].include?(tier[:value].class) diff --git a/modules/mu/cleanup.rb b/modules/mu/cleanup.rb index 6a97b57a1..8854c3723 100644 --- a/modules/mu/cleanup.rb +++ b/modules/mu/cleanup.rb @@ -228,7 +228,7 @@ def self.cleanRegion(cloud, credset, region, global_vs_region_semaphore, global_ projects << $MU_CFG[cloud.downcase][credset]["project"] end begin - projects.concat(cloudclass.listHabitats(credset)) + projects.concat(cloudclass.listHabitats(credset, use_cache: false)) rescue NoMethodError end end diff --git a/modules/mu/cloud/resource_base.rb b/modules/mu/cloud/resource_base.rb index 9b1987e5a..309734669 100644 --- a/modules/mu/cloud/resource_base.rb +++ b/modules/mu/cloud/resource_base.rb @@ -101,6 +101,7 @@ def initialize(**args) raise MuError, "Can't instantiate a MU::Cloud object without a valid cloud (saw '#{my_cloud}')" end @cloudclass = MU::Cloud.resourceClass(my_cloud, self.class.shortname) + @cloud_desc_cache ||= args[:from_cloud_desc] if args[:from_cloud_desc] @cloudparentclass = MU::Cloud.cloudClass(my_cloud) @cloudobj = @cloudclass.new( mommacat: args[:mommacat], @@ -134,7 +135,6 @@ def initialize(**args) # own initialize(), so initialize all the attributes and instance # variables we know to be universal. else - class << self # Declare attributes that everyone should have PUBLIC_ATTRS.each { |a| @@ -150,6 +150,7 @@ class << self end @deploy = args[:mommacat] || args[:deploy] + @cloud_desc_cache ||= args[:from_cloud_desc] if args[:from_cloud_desc] @credentials = args[:credentials] @credentials ||= @config['credentials'] diff --git a/modules/mu/mommacat/naming.rb b/modules/mu/mommacat/naming.rb index 7f7a7c40a..271490ea8 100644 --- a/modules/mu/mommacat/naming.rb +++ b/modules/mu/mommacat/naming.rb @@ -84,6 +84,8 @@ def self.getChunkName(obj, array_of = nil) end name_string.gsub!(/\[.+?\](\[.+?\]$)/, '\1') if name_string # source is frozen so we can't just do gsub! + location_list = [] + location = if obj['project'] obj['project'] elsif obj['habitat'] and (obj['habitat']['id'] or obj['habitat']['name']) @@ -93,9 +95,10 @@ def self.getChunkName(obj, array_of = nil) ['projects', 'habitats'].each { |key| if obj[key] and obj[key].is_a?(Array) - hab_str = obj[key].sort.map { |p| + location_list = obj[key].sort.map { |p| (p["name"] || p["id"]).gsub(/^.*?[^\/]+\/([^\/]+)$/, '\1') - }.join(", ") + } + hab_str = location_list.join(", ") name_string.gsub!(/^.*?[^\/]+\/([^\/]+)$/, '\1') if name_string break end @@ -103,7 +106,7 @@ def self.getChunkName(obj, array_of = nil) hab_str end - [name_string, location] + [name_string, location, location_list] end # Generate a three-character string which can be used to unique-ify the diff --git a/modules/mu/mommacat/search.rb b/modules/mu/mommacat/search.rb index 3179f8765..b185af2d5 100644 --- a/modules/mu/mommacat/search.rb +++ b/modules/mu/mommacat/search.rb @@ -320,7 +320,7 @@ def self.search_cloud_provider(type, cloud, habitats, region, cloud_id: nil, tag habitats << nil end if resourceclass.canLiveIn.include?(:Habitat) - habitats.concat(cloudclass.listHabitats(credentials)) + habitats.concat(cloudclass.listHabitats(credentials, use_cache: false)) end end habitats << nil if habitats.empty? diff --git a/modules/mu/providers/aws.rb b/modules/mu/providers/aws.rb index 2ad5f0ba4..951e632cf 100644 --- a/modules/mu/providers/aws.rb +++ b/modules/mu/providers/aws.rb @@ -39,7 +39,7 @@ def self.virtual? end # List all AWS projects available to our credentials - def self.listHabitats(credentials = nil) + def self.listHabitats(credentials = nil, use_cache: true) cfg = credConfig(credentials) return [] if !cfg or !cfg['account_number'] [cfg['account_number']] diff --git a/modules/mu/providers/azure.rb b/modules/mu/providers/azure.rb index 6a260bd33..fbbe4756a 100644 --- a/modules/mu/providers/azure.rb +++ b/modules/mu/providers/azure.rb @@ -48,7 +48,7 @@ def self.genGUID end # List all Azure subscriptions available to our credentials - def self.listHabitats(credentials = nil) + def self.listHabitats(credentials = nil, use_cache: true) [] end diff --git a/modules/mu/providers/cloudformation.rb b/modules/mu/providers/cloudformation.rb index cec5bd605..1dcc0e058 100644 --- a/modules/mu/providers/cloudformation.rb +++ b/modules/mu/providers/cloudformation.rb @@ -34,7 +34,7 @@ def self.virtual? end # List all AWS projects available to our credentials - def self.listHabitats(credentials = nil) + def self.listHabitats(credentials = nil, use_cache: true) MU::Cloud::AWS.listHabitats(credentials) end diff --git a/modules/mu/providers/google.rb b/modules/mu/providers/google.rb index 57b5e7b2a..30ae31340 100644 --- a/modules/mu/providers/google.rb +++ b/modules/mu/providers/google.rb @@ -706,21 +706,26 @@ def self.defaultFolder(credentials = nil) nil end + @allprojects = [] + # List all Google Cloud Platform projects available to our credentials - def self.listHabitats(credentials = nil) + def self.listHabitats(credentials = nil, use_cache: true) cfg = credConfig(credentials) return [] if !cfg if cfg['restrict_to_habitats'] and cfg['restrict_to_habitats'].is_a?(Array) cfg['restrict_to_habitats'] << cfg['project'] if cfg['project'] return cfg['restrict_to_habitats'].uniq end + if @allprojects and !@allprojects.empty? and use_cache + return @allprojects + end result = MU::Cloud::Google.resource_manager(credentials: credentials).list_projects result.projects.reject! { |p| p.lifecycle_state == "DELETE_REQUESTED" } - allprojects = result.projects.map { |p| p.project_id } + @allprojects = result.projects.map { |p| p.project_id } if cfg['ignore_habitats'] and cfg['ignore_habitats'].is_a?(Array) - allprojects.reject! { |p| cfg['ignore_habitats'].include?(p) } + @allprojects.reject! { |p| cfg['ignore_habitats'].include?(p) } end - allprojects + @allprojects end @@regions = {} From 812032e24e345c4963ba2e28ac45d72e2d61754a Mon Sep 17 00:00:00 2001 From: John Stange Date: Tue, 19 May 2020 21:36:04 -0400 Subject: [PATCH 121/124] Adoption: A little more tweaking of diff Slack output; Google::Role.find: don't blow a rod if get_role fails --- modules/mu/adoption.rb | 7 ++++--- modules/mu/mommacat/naming.rb | 20 ++++++++++++++++++-- modules/mu/providers/google/role.rb | 9 +++++++-- 3 files changed, 29 insertions(+), 7 deletions(-) diff --git a/modules/mu/adoption.rb b/modules/mu/adoption.rb index 29dc2a781..81d4244fe 100644 --- a/modules/mu/adoption.rb +++ b/modules/mu/adoption.rb @@ -30,6 +30,7 @@ class Incomplete < MU::MuNonFatal; end :omnibus => "Jam everything into one monolothic configuration" } + def initialize(clouds: MU::Cloud.supportedClouds, types: MU::Cloud.resource_types.keys, parent: nil, billing: nil, sources: nil, credentials: nil, group_by: :logical, savedeploys: false, diff: false, habitats: [], scrub_mu_isms: false, regions: [], merge: false) @scraped = {} @clouds = clouds @@ -338,7 +339,8 @@ def generateBaskets(prefix: "") end } } - walltimers[type] = Time.now - typestart + walltimers[type] ||= 0 + walltimers[type] += (Time.now - typestart) } } @@ -433,13 +435,13 @@ def crawlChangeReport(tier, parent_key = nil, indent: "") slack_path_str = "" if tier[:parents] and tier[:parents].size > 2 path = tier[:parents].clone + slack_path_str += "#{preposition} \*"+path.join(" ⇨ ")+"\*" if path.size > 0 path.shift path.shift path.pop if path.last == name for c in (0..(path.size-1)) do path_str << (" " * (c+2)) + (path[c] || "") end - slack_path_str += "#{preposition} \*"+path.join(" ⇨ ")+"\*" if path.size > 0 end path_str << "" if !path_str.empty? @@ -489,7 +491,6 @@ def crawlChangeReport(tier, parent_key = nil, indent: "") end else tier[:value] ||= "" - slack += " was #{tier[:action]}" if ![:added, :removed].include?(tier[:action]) myreport["slack"] += " New #{tier[:field] ? "`"+tier[:field]+"`" : :value}: \*#{tier[:value]}\*" end diff --git a/modules/mu/mommacat/naming.rb b/modules/mu/mommacat/naming.rb index 271490ea8..37faa82b5 100644 --- a/modules/mu/mommacat/naming.rb +++ b/modules/mu/mommacat/naming.rb @@ -19,6 +19,16 @@ module MU # the normal synchronous deploy sequence invoked by *mu-deploy*. class MommaCat + # Lookup table to translate the word "habitat" back to its + # provider-specific jargon + HABITAT_SYNONYMS = { + "AWS" => "account", + "CloudFormation" => "account", + "Google" => "project", + "Azure" => "subscription", + "VMWare" => "sddc" + } + # Given a cloud provider's native descriptor for a resource, make some # reasonable guesses about what the thing's name should be. def self.guessName(desc, resourceclass, cloud_id: nil, tag_value: nil) @@ -52,8 +62,9 @@ def self.guessName(desc, resourceclass, cloud_id: nil, tag_value: nil) # extracted, returns nil. # @param obj [Hash] # @param array_of [String] + # @param habitat_translate [String] # @return [Array] - def self.getChunkName(obj, array_of = nil) + def self.getChunkName(obj, array_of = nil, habitat_translate: nil) return [nil, nil] if obj.nil? if [String, Integer, Boolean].include?(obj.class) return [obj, nil] @@ -82,7 +93,12 @@ def self.getChunkName(obj, array_of = nil) } found_it end - name_string.gsub!(/\[.+?\](\[.+?\]$)/, '\1') if name_string # source is frozen so we can't just do gsub! + if name_string + name_string.gsub!(/\[.+?\](\[.+?\]$)/, '\1') + if habitat_translate and HABITAT_SYNONYMS[habitat_translate] + name_string.sub!(/^habitats?\[(.+?)\]/i, HABITAT_SYNONYMS[habitat_translate]+'[\1]') + end + end location_list = [] diff --git a/modules/mu/providers/google/role.rb b/modules/mu/providers/google/role.rb index b5ebf7cdb..3d0ff10bc 100644 --- a/modules/mu/providers/google/role.rb +++ b/modules/mu/providers/google/role.rb @@ -591,8 +591,13 @@ def self.find(**args) bindings['by_scope']['projects'][args[:project]] bindings['by_scope']['projects'][args[:project]].keys.each { |r| if r.match(/^roles\//) - role = MU::Cloud::Google.iam(credentials: args[:credentials]).get_role(r) - found[role.name] = role + begin + role = MU::Cloud::Google.iam(credentials: args[:credentials]).get_role(r) + found[role.name] = role + rescue ::Google::Apis::ClientError => e + raise e if !e.message.match(/(?:forbidden|notFound): /) + MU.log "Failed MU::Cloud::Google.iam(credentials: #{args[:credentials]}).get_role(#{r})", MU::WARN, details: e.message + end elsif !found[r] # MU.log "NEED TO GET #{r}", MU::WARN end From e450015ce29b4ee324adf670c3c6059ceb8b89f9 Mon Sep 17 00:00:00 2001 From: John Stange Date: Sat, 13 Jun 2020 02:14:00 -0400 Subject: [PATCH 122/124] Adoption (diff): Don't step on Slack reports of changes to leaf field values --- modules/mu/adoption.rb | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/modules/mu/adoption.rb b/modules/mu/adoption.rb index 81d4244fe..aa8dad6fc 100644 --- a/modules/mu/adoption.rb +++ b/modules/mu/adoption.rb @@ -467,7 +467,7 @@ def crawlChangeReport(tier, parent_key = nil, indent: "") plain = path_str.join(" => \n") + indent + plain color = path_str.join(" => \n") + indent + color - slack += " "+slack_path_str+"." + slack += " "+slack_path_str if !slack_path_str.empty? myreport = { "slack" => slack, "plain" => plain, @@ -491,14 +491,14 @@ def crawlChangeReport(tier, parent_key = nil, indent: "") end else tier[:value] ||= "" - if ![:added, :removed].include?(tier[:action]) - myreport["slack"] += " New #{tier[:field] ? "`"+tier[:field]+"`" : :value}: \*#{tier[:value]}\*" + if ![:removed].include?(tier[:action]) + myreport["slack"] += ". New #{tier[:field] ? "`"+tier[:field]+"`" : :value}: \*#{tier[:value]}\*" + else + myreport["slack"] += " (was \*#{tier[:value]}\*)" end append = tier[:value].to_s.bold end - myreport["slack"] = slack - if append and !append.empty? myreport["plain"] += " =>\n "+indent+append myreport["color"] += " =>\n "+indent+append From 8a01c8bb715779284a4c0794cc37ddc4e0a8e391 Mon Sep 17 00:00:00 2001 From: John Stange Date: Tue, 16 Jun 2020 15:43:49 -0400 Subject: [PATCH 123/124] 3.2.0 release version tag --- cloud-mu.gemspec | 4 ++-- modules/Gemfile.lock | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cloud-mu.gemspec b/cloud-mu.gemspec index e0ad3af1f..dab88f203 100644 --- a/cloud-mu.gemspec +++ b/cloud-mu.gemspec @@ -17,8 +17,8 @@ end Gem::Specification.new do |s| s.name = 'cloud-mu' - s.version = '3.2.0beta1' - s.date = '2020-05-13' + s.version = '3.2.0' + s.date = '2020-06-16' s.require_paths = ['modules'] s.required_ruby_version = '>= 2.4' s.summary = "The eGTLabs Mu toolkit for unified cloud deployments" diff --git a/modules/Gemfile.lock b/modules/Gemfile.lock index ad88e56bb..b4af0b84d 100644 --- a/modules/Gemfile.lock +++ b/modules/Gemfile.lock @@ -10,7 +10,7 @@ GIT PATH remote: .. specs: - cloud-mu (3.2.0beta1) + cloud-mu (3.2.0) addressable (~> 2.5) aws-sdk-core (< 3) azure_sdk (~> 0.52) From c815ff516c733a0ce85e48857aa38e8935721ba7 Mon Sep 17 00:00:00 2001 From: John Stange Date: Tue, 16 Jun 2020 20:34:30 -0400 Subject: [PATCH 124/124] Azure: if you're going to fail to list regions at least throw a more informative error about it --- modules/mu/providers/azure.rb | 3 +++ 1 file changed, 3 insertions(+) diff --git a/modules/mu/providers/azure.rb b/modules/mu/providers/azure.rb index fbbe4756a..837482b4e 100644 --- a/modules/mu/providers/azure.rb +++ b/modules/mu/providers/azure.rb @@ -284,6 +284,9 @@ def self.listRegions(us_only = false, credentials: nil) end raise e end + if !sdk_response + raise MuError, "Nil response from Azure API attempting list_locations(#{subscription})" + end sdk_response.value.each do | region | @@regions.push(region.name)