From 0e05ed1410b83b5509efe68aa9e49026404e8db0 Mon Sep 17 00:00:00 2001 From: Andrew Lee Date: Wed, 9 Oct 2024 14:53:32 -0400 Subject: [PATCH 1/5] Add new AutoScaler and CustomScalerBase classes (#1429) * Adding infrastructure to support ipopt_v2 * Moving core/util to ipopt_v2 * Moving MH initializer to ipopt_v2 * Fixing pint version issue * Set TSA to use old IPOPT interface * Trying to resolve Windows failures * Working on platofrm dependent failure * BTInitializer with presolve * Moving last bits of core code to ipopt_v2 * Starting on idaes/models * Removing ma57_automatic_scaling default and updating idaes/models/control * idaes/model/properties part 1 * Remaining parts of idaes/models/proeprties * Fixing typo * Switching idaes/models/unit_models to ipopt_v2 * Attempt to work around HXLC issues for now * Some clean up * Switching modular properties initializer to solver indexed blocks * Addressing comments * Recovering from previous branch * Some clean up * Adding ScalerBase class and tests * Working on CustomScalerBase * Nominal value constraint scaling * Adding some initial integration tests for scaling * Some more nominal magnitude scaling approaches * Prototyping pseudojacobian scaler * Trying to debug pseudojacobian * Removing unnecessary import * Addressing pylint issues * Cleaning up nominal jacobian * More methods for CustomScalerBase * Prototyping Gibbs reactor scaler * Gibbs reactor constraint scaling * Working on testing and profiling * Refining Gibbs scaler * Refining nominal value walker * Fixing walker tests * Testing GibbsScaler with initialization * Fixing auto norm scaling on indexed blocks * Testing scaling profiler * Fixing typos * Fixing pylint issue * Imrpoving some doc strings * Apply suggestions from code review Co-authored-by: MarcusHolly <96305519+MarcusHolly@users.noreply.github.com> * Fixing issue with autoscaling vars with value None * Adding profiler to __init__ * Fixing name for RSS method * Fixing import of pyomo.environ * Allowing default scaling for indexed components * Adding catch for critical solver failure in profiler * Starting on docs * Finishing docs and unifiying method names * Profiler report methods and docs * Fixing typos * Pylint: fix unnecessary f-string * Addressing comments * Apply suggestions from code review Co-authored-by: agarciadiego <40575271+agarciadiego@users.noreply.github.com> * Addressing simple issues * Switching to StringEnum for scheme argument * Switching to ComponentMap for submodel scalers * Fixing Gibbs reactor test * fixing another test --------- Co-authored-by: MarcusHolly <96305519+MarcusHolly@users.noreply.github.com> Co-authored-by: agarciadiego <40575271+agarciadiego@users.noreply.github.com> --- docs/explanations/index.rst | 1 + .../applying_scaling_tools.rst | 61 + .../diagnosing_scaling_issues.rst | 20 + docs/explanations/scaling_toolbox/index.rst | 30 + .../scaling_toolbox/scaling_theory.rst | 39 + .../scaling_toolbox/scaling_toolbox.rst | 57 + .../scaling_toolbox/scaling_workflow.rst | 35 + docs/index.rst | 2 +- docs/reference_guides/core/util/scaling.rst | 2 +- docs/reference_guides/index.rst | 2 +- .../generic/unit_models/gibbs_reactor.rst | 6 + docs/reference_guides/scaling.rst | 108 -- docs/reference_guides/scaling/autoscaler.rst | 5 + .../scaling/custom_scaler.rst | 8 + docs/reference_guides/scaling/scaling.rst | 20 + .../scaling/scaling_profiler.rst | 6 + .../scaling/scaling_utils.rst | 6 + idaes/core/base/process_base.py | 3 +- idaes/core/base/tests/test_process_base.py | 1 + idaes/core/scaling/__init__.py | 26 + idaes/core/scaling/autoscaling.py | 232 +++ idaes/core/scaling/custom_scaler_base.py | 620 ++++++++ idaes/core/scaling/scaler_profiling.py | 375 +++++ idaes/core/scaling/scaling_base.py | 231 +++ idaes/core/scaling/tests/__init__.py | 0 idaes/core/scaling/tests/gibbs_solution.json | 725 +++++++++ .../scaling/tests/load_scaling_factors.json | 29 + idaes/core/scaling/tests/test_autoscaling.py | 402 +++++ .../scaling/tests/test_custom_scaler_base.py | 710 +++++++++ .../tests/test_custom_scaling_integration.py | 418 +++++ .../tests/test_nominal_value_walker.py | 745 +++++++++ idaes/core/scaling/tests/test_scaling_base.py | 319 ++++ .../scaling/tests/test_scaling_profiler.py | 624 ++++++++ idaes/core/scaling/tests/test_util.py | 1351 +++++++++++++++++ idaes/core/scaling/util.py | 812 ++++++++++ idaes/core/util/scaling.py | 3 +- idaes/models/unit_models/gibbs_reactor.py | 162 +- idaes/models/unit_models/tests/test_gibbs.py | 1262 ++------------- .../unit_models/tests/test_gibbs_scaling.py | 469 ++++++ 39 files changed, 8652 insertions(+), 1275 deletions(-) create mode 100644 docs/explanations/scaling_toolbox/applying_scaling_tools.rst create mode 100644 docs/explanations/scaling_toolbox/diagnosing_scaling_issues.rst create mode 100644 docs/explanations/scaling_toolbox/index.rst create mode 100644 docs/explanations/scaling_toolbox/scaling_theory.rst create mode 100644 docs/explanations/scaling_toolbox/scaling_toolbox.rst create mode 100644 docs/explanations/scaling_toolbox/scaling_workflow.rst delete mode 100644 docs/reference_guides/scaling.rst create mode 100644 docs/reference_guides/scaling/autoscaler.rst create mode 100644 docs/reference_guides/scaling/custom_scaler.rst create mode 100644 docs/reference_guides/scaling/scaling.rst create mode 100644 docs/reference_guides/scaling/scaling_profiler.rst create mode 100644 docs/reference_guides/scaling/scaling_utils.rst create mode 100644 idaes/core/scaling/__init__.py create mode 100644 idaes/core/scaling/autoscaling.py create mode 100644 idaes/core/scaling/custom_scaler_base.py create mode 100644 idaes/core/scaling/scaler_profiling.py create mode 100644 idaes/core/scaling/scaling_base.py create mode 100644 idaes/core/scaling/tests/__init__.py create mode 100644 idaes/core/scaling/tests/gibbs_solution.json create mode 100644 idaes/core/scaling/tests/load_scaling_factors.json create mode 100644 idaes/core/scaling/tests/test_autoscaling.py create mode 100644 idaes/core/scaling/tests/test_custom_scaler_base.py create mode 100644 idaes/core/scaling/tests/test_custom_scaling_integration.py create mode 100644 idaes/core/scaling/tests/test_nominal_value_walker.py create mode 100644 idaes/core/scaling/tests/test_scaling_base.py create mode 100644 idaes/core/scaling/tests/test_scaling_profiler.py create mode 100644 idaes/core/scaling/tests/test_util.py create mode 100644 idaes/core/scaling/util.py create mode 100644 idaes/models/unit_models/tests/test_gibbs_scaling.py diff --git a/docs/explanations/index.rst b/docs/explanations/index.rst index d243f51371..06b6fac74e 100644 --- a/docs/explanations/index.rst +++ b/docs/explanations/index.rst @@ -9,6 +9,7 @@ Explanations components/index conventions model_diagnostics/index + scaling_toolbox/index modeling_extensions/index related_packages/index faq diff --git a/docs/explanations/scaling_toolbox/applying_scaling_tools.rst b/docs/explanations/scaling_toolbox/applying_scaling_tools.rst new file mode 100644 index 0000000000..f7ed80cf52 --- /dev/null +++ b/docs/explanations/scaling_toolbox/applying_scaling_tools.rst @@ -0,0 +1,61 @@ +Applying Scaling Tools +====================== + +.. contents:: :local: + :depth: 2 + +Basic Usage +----------- + +.. Note:: + + Modelers should endeavor to provide as much scaling information as possible before calling Scalers in order to provide as much information on your particular case as possible. + +All Scalers in the Scaling Toolbox support a ``scale_model`` method that can be called to apply scaling to a model of the appropriate type. The ``scale_model`` method can be called as shown below: + +.. code-block:: python + + # Import Scaler class from library + from ideas.core.scaling import AutoScaler, set_scaling_factor + + # Set some scaling factors + set_scaling_factor(my_model.my_var, 1e-3) + + # Create instance of Scaler object + my_scaler = AutoScaler() + + # Apply Scaler to model + my_scaler.scale_model(my_model) + +Scaler Options +'''''''''''''' + +Many Scalers will support additional optional arguments which can be used to provide finer control over the scaling routine. See the documentation for the Scaler you are using for more details. Documentation for the core Scalers can be found here: + +* :ref:`AutoScaler Class` +* :ref:`CustomScalerBase Class` + +Advanced Usage +-------------- + +In most cases, Scaler classes will have individual methods for scaling variables and constraints that can be called separately. Advanced modelers may wish to make use of these to gain even finer control over the scaling process, and may wish to experiment with mixing-and-matching routines from different Scalers (e.g., a modeler may wish to use the AutoScaler for variables but combine it with a more customized routine for constraint scaling from a Custom Scaler). + +The CustomScalerBase class also contains a number of methods for common scaling approaches that that can be applied to individual variables and constraints, allowing advanced modelers to construct their own custom scaling routines. + +For example, the simple example above can also be implemented by calling methods to scale the variables and constraints as shown below: + +.. code-block:: python + + # Import Scaler class from library + from ideas.core.scaling import AutoScaler, set_scaling_factor + + # Set some scaling factors + set_scaling_factor(my_model.my_var, 1e-3) + + # Create instance of Scaler object + my_scaler = AutoScaler() + + # Apply Scaler to model + my_scaler.scale_variables_by_magnitude(my_model) + my_scaler.scale_constraints_by_jacobian_norm(my_model) + diff --git a/docs/explanations/scaling_toolbox/diagnosing_scaling_issues.rst b/docs/explanations/scaling_toolbox/diagnosing_scaling_issues.rst new file mode 100644 index 0000000000..71b5216309 --- /dev/null +++ b/docs/explanations/scaling_toolbox/diagnosing_scaling_issues.rst @@ -0,0 +1,20 @@ +Diagnosing Scaling Issues +========================= + +As mentioned in the previous section, the :ref:`IDAES Diagnostics Toolbox` contains a number of methods that can be used to help identify potential scaling issues. Scaling issues depend on the numerical state of the model, an thus the `report_numerical_issues` method is the place to start when looking for scaling issues. + +Some common signs of poor scaling which can be seen in the numerical issues report include: + +* large Jacobian Condition Numbers (>1e10), +* variables with very large or very small values, +* variables with values close to zero, +* variables with values close to a bound (conditionally), +* constraints with mismatched terms, +* constraints with potential cancellations, +* variables and constraints with extreme Jacobian norms, +* extreme entries in Jacobian matrix (conditionally). + +If you see any of these warnings in the model diagnostics output, it is a sign that you have potential scaling issues which should be investigated in order to improve the performance, robustness and accuracy of your model. + +.. Note:: + Not all scaling issues can be resolved through the application of scaling factors. In some cases, such as constraints with mismatched terms or possible cancellations, the constraint itself may be inherently poorly posed and thus may need to be refactored to resolve the scaling issue. diff --git a/docs/explanations/scaling_toolbox/index.rst b/docs/explanations/scaling_toolbox/index.rst new file mode 100644 index 0000000000..b81024cd81 --- /dev/null +++ b/docs/explanations/scaling_toolbox/index.rst @@ -0,0 +1,30 @@ +Scaling Toolbox +=============== + +Introduction +------------ + +The numerical scaling of a model is critical to its robustness and tractability, and can mean the difference between finding a solution and a solver failure. Scaling of non-linear models is a key step in the model development and application workflow, and often requires a significant amount of time and effort. The IDAES Scaling Toolbox aims to assist users in this task. + +Key Points +---------- + +* Scaling is critical for model performance +* Scaling is not a one-off process, but something that needs to be reassessed before each solver call +* There are three aspects to scaling which are equally important +* Model scaling is more about avoiding poor scaling than finding “good” scaling +* Order-of-magnitude scaling factors are often better than exact values +* A number of tools are available to assist modelers with this task + +Topics +------ + +.. toctree:: + :maxdepth: 1 + + scaling_theory + diagnosing_scaling_issues + scaling_toolbox + applying_scaling_tools + scaling_workflow + diff --git a/docs/explanations/scaling_toolbox/scaling_theory.rst b/docs/explanations/scaling_toolbox/scaling_theory.rst new file mode 100644 index 0000000000..6c8a9a4300 --- /dev/null +++ b/docs/explanations/scaling_toolbox/scaling_theory.rst @@ -0,0 +1,39 @@ +Scaling Theory +============== + +What is Model Scaling? +---------------------- + +The primary goal of model scaling is to minimize the effects of numerical round-off errors (due to machine precision) and to ensure that the model equations are well-posed and non-singular (to numerical tolerance). In reality, there are actually three inter-related scaling concepts which contribute to the overall performance and tractability of a model. + +Types of Scaling +'''''''''''''''' + +* Variable scaling determines what sort of change in a variable is “significant” or not. This factors into things like, for example, how an interior point method (such as IPOPT) will behave (i.e., how far into the interior of the feasible region a variable should be) and step size determination. + +* Constraint residual scaling refers to the magnitude of the residual of all constraints in the model. This is important as this is required to determine whether or not a model has converged, thus it is important that a residual equal to the solver tolerance does not significantly alter the solution to the model. + + * E.g. consider a constraint A=B. If the magnitude of A and B are 1e6 and the solver tolerance is 1e-6, this means that A and B need to be solved to 12 significant figures of precision to converge the constraint (which may be unnecessarily onerous). Similarly, if A and B were of order 1e-6, then they will be considered equal if A=1e-6 and B=0; i.e. ±100% error. + +* Jacobian scaling refers to the overall scaling and conditioning of the problem Jacobian. This is important as this determines the ability of the solver to find a path from the initial state to the final solution. It is important to ensure that the Jacobian is well-conditioned both in order to get good numerical behavior from the solver and also to ensure that floating point round-off error does not result in large changes to the variables at the solution. + +These aspects are not always complimentary, and there are often cases where improving one aspect of the model scaling can negatively affect another aspect (e.g., focusing too much on Jacobian condition number can often result in poor constraint residual tolerances). Additionally, each aspect affects different parts of the solver routine; Jacobian scaling is important for determining the step direction and size at each iteration, whilst constraint residual scaling is important for determining if and when a solution is found. The relative importance of each of these depends on the solver being used (e.g., for IPOPT constraint residual scaling is more important than Jacobian scaling as IPOPT does its own internal scaling of the Jacobian), however in general all of these will have an impact upon the solver behavior and users should endeavor to have good scaling for all aspects of the model. + +What Determines Good Scaling? +''''''''''''''''''''''''''''' + +Due to the three different aspects of scaling and the fact that they can sometimes compete with each other, it is hard (if not impossible) to define a single metric for what defines “good” scaling. However, common indicators of bad scaling are: + +* Variables with extremely large or small scaled magnitudes (>1e6 or <1e-6) – you should generally aim to have most variables have scaled magnitudes between 1 and 10. + + * An important exception is any variable that can pass through zero, like enthalpy or bidirectional fluxes. These variables should be scaled based on “significant variation” (think something like a standard deviation) of the variable throughout different conditions of the model. + * Zero flux boundary conditions should be scaled like the other fluxes in the model. + +* Constraints with 1 or more very large terms – as each constraint must be solved to solver tolerance in order to meet the convergence criteria, it is important that change on the order of magnitude of the solver tolerance is meaningful for each constraint term. Small perturbations in these large terms can causes large changes in the constraint residual. +* Constraints in which all terms are very small – having a few small terms combined with moderate terms is often acceptable (e.g., some terms might go to zero under certain circumstances), but constraints where all terms are of small magnitude is indicative of poor scaling. +* Constraint with terms that are of a significantly different order of magnitude. This means that a change that is significant in the small terms may be negligible for the larger terms (i.e., you can make significant changes in the small term with no effect on the larger term), whilst a change in the larger terms may result in huge changes in the smaller terms (i.e., a change in the final significant figure of the large term might result in orders of magnitude change in the smaller term, thus causing a cascade of changes through your model). +* Any time a difference is taken between several large numbers to obtain a small number, the model is at risk of catastrophic cancellation and ill-conditioning +* Very large Jacobian Condition Number (>1e10). The condition number represents a worst-case error amplification in the problem. For example, with a condition number of 1e10 then a round-off error due to machine precision (~1e-16) could result in a change in model state of 1e-6 (= 1e10 x 1e-16); note that most solvers use a default tolerance of 1e-6 to 1e-8, thus with a condition number of 1e10 or greater, there is a risk that your solution may be dominated by round-off errors. + +The :ref:`IDAES Diagnostics Toolbox` contains methods to identify and report these issues, and these should be used to help identify potential scaling issues. + diff --git a/docs/explanations/scaling_toolbox/scaling_toolbox.rst b/docs/explanations/scaling_toolbox/scaling_toolbox.rst new file mode 100644 index 0000000000..56b29d23e6 --- /dev/null +++ b/docs/explanations/scaling_toolbox/scaling_toolbox.rst @@ -0,0 +1,57 @@ +Scaling Toolbox +=============== + +.. contents:: :local: + :depth: 1 + +Approaches to Scaling +--------------------- + +Ultimately, the modeler is the one with the most understanding of the system being modeled and the expected solution. In an ideal world, the modeler would have sufficient time and knowledge to develop a set of scaling factors customized for their specific use case. However, process models are generally very large and modelers often lack the time and understanding to develop tailored scaling routines, thus there is a need for tools to support the modeler in this. + +In general, there are two types of scaling routines that can be developed for a model: + +* those that depend on the current state of the model (often referred to as auto-scalers), and thus require an initial solution, and +* those that make use of “best-guess” measures and heuristics for scaling and can thus be used before a model is initialized but require development by an expert modeler. + +In either case, scaling depends heavily on input from the end user. Whilst it might be possible to get a good idea of the scaling for the current state of the model (assuming the model has been initialized), this may not be indicative of the scaling at the final solution (generally, we want to solve or optimize for some unknown solution). Thus, the modeler needs to provide as much information as they can of the expected scaling near the solution point as possible (based on engineering knowledge or intuition, or solutions for similar problems in the past). It is important to note that scaling does not need to be exact – order of magnitude approximations are often as good (or better) than precise values – and the aim is less about providing “good” scaling as it is about avoiding bad scaling. + +Auto-Scalers (Current State Scaling) +------------------------------------ + +Pros: + +* fully automated, can scale an entire model with one command +* applicable to any model +* useful for getting initial values for scaling factors, or filling-in missing scaling factors + +Cons: + +* require an initial solution, and thus not useful for pre-initialization scaling +* consider only the current model state, and often overscale the problem + +For models with an initial (ideally feasible) solution, full information on the state of the model and the Jacobian is available. A number of approaches are available that can take this information and generate scaling factors for the entire model in order to meet some overall model characteristic (e.g., minimizing the norm of the Jacobian matrix). These have the advantage of requiring minimal input from the modeler and being able to scale an entire model in one go. However, these approaches require an initial solution (and thus are not useful for pre-initialization scaling) and as they consider only a single characteristic metric calculated at the current model state, they can often over scale the model and may not provide the best performance. + +A suite of autoscaler methods is available as part of the IDAES Scaling Toolbox through the :ref:`AutoScaler Class`. + +Custom Scalers (Best-Guess Scaling) +----------------------------------- + +Pros: + +* independent of model state, and can thus be used on uninitialized models + +Cons: + +* specific to a given model type - Custom Scalers developed for one model are not applicable to other models +* dependent on developer skill and foresight, and may not give good results for all cases + +The alternative to model-state based scaling is to develop customized scaling routines for individual models which take into account the model structure and behavior to estimate scaling factors. These routines are generally written by the original model developer, and thus depend heavily on the skill and foresight of the developer. On the other hand, as these routines depend on knowledge of the model structure rather than the model state, these routines can be applied to uninitialized models (given sufficient estimates of a few key variables). + +A suite of methods to assist with developing custom scaling routines are available as part of the IDAES Scaling Toolbox through the :ref:`CustomScalerBase Class`. Many models in the core IDAES libraries will have custom Scalers available for use - see the documentation of individual models for these. + +Utility Functions +----------------- + +The IDAES Scaling Toolbox also contains a number of utility functions for working with and reporting model scaling. More details on these functions can be :ref:`found here`. + diff --git a/docs/explanations/scaling_toolbox/scaling_workflow.rst b/docs/explanations/scaling_toolbox/scaling_workflow.rst new file mode 100644 index 0000000000..a7da5a8aec --- /dev/null +++ b/docs/explanations/scaling_toolbox/scaling_workflow.rst @@ -0,0 +1,35 @@ +Scaling Workflow +================ + +.. contents:: :local: + :depth: 1 + +General Approach +------------------ + +When scaling models, it is often difficult to know where to start. ``AutoScalers`` may appear to be attractive for this as they can scale an entire model (of any size) in one go and need minimal user input, but their narrow focus on the current model state is often insufficient for optimization purposes. Rather, it is generally necessary (and strongly encouraged) that modelers try to provide as much information about scaling as they can through their understanding of the system being modeled, and where possible the model structure. + +As a starting point, the following workflow is recommended when scaling any model: + +1. Understand as much about the model as possible, including expected input and output values and where possible the formulation of the constraints. +2. Make use of the Diagnostics Toolbox to ensure there are no structural issues and to identify potential scaling issues that must be resolved. This also provides a reference point for checking to see that your scaling factors are improving the state of the model. Modelers are encouraged to use these tools throughout the process to monitor their progress, however note that a partially scaled model will often have more issues than a completely unscaled model (this is often expected, and not necessarily a sign that you are going the wrong way). Of particular note are the ``display_variables_with_extreme_jacobians`` and ``display_constraints_with_extreme_jacobians`` methods (as well as the ``SVDToolbox`` for advanced users). +3. Start by scaling those variables you have the most information about – these will generally be variables process inputs, design and operating conditions, etc. +4. Working from what you already know, try to project expected scaling for other variables, repeating as necessary. +5. Once you have established scaling for all the variables (or as many as you can), start scaling constraints in a similar fashion (start with what you understand best). Make use of the scaling methods provided by the ``CustomScalerBase`` class to assist you in this. + +Scaling Hierarchical Models +--------------------------- + +The hierarchical nature of IDAES models adds an additional challenge to scaling workflows, but also provides opportunities for modularization and the creation of Scalers dedicated to scaling specific sub-sections of a model (e.g., unit and property models). + +Flowsheet Scaling Workflow +'''''''''''''''''''''''''' + +When scaling flowsheets, an approach similar to initialization can be used, where a modeler starts at the inlet (feed) to their process and scales each unit in a sequential fashion, propagating scaling factors along the connecting ``Arcs`` as they go (see ``propagate_state_scaling`` method in the ``CustomScalerBase`` class). Each unit model in the process can then be scaled in isolation applying Scalers suited to that unit model. Recycle loops bring an additional challenge to this, however careful consideration of the expected state of the recycle can help guide this process, or traditional iterative techniques can be applied. + +Scaling Unit and Property Models +'''''''''''''''''''''''''''''''' + +Unit models in turn are hierarchical constructs, which depend on sub-models such as the ``StateBlocks`` used to calculate physical properties. Each of these sub-models can have Scalers of their own, and thus a hierarchical approach to scaling can be applied where the unit model first calls a Scaler for the inlet state, then propagates the scaling to the outlet state and calls a Scaler for that StateBlock, and then finally uses this information to inform the scaling of the unit level variables and constraints (including those in any control volume(s)). The ``CustomScalerBase`` class contains a number of methods and tools to assist in this process, or the experienced modeler may wish to perform these steps manually. + + diff --git a/docs/index.rst b/docs/index.rst index 5750cf83f4..8f8f4f290b 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -59,7 +59,7 @@ Contents - Reference Guides | :doc:`IDAES Model Libraries ` | :doc:`IDAES Core ` - | :doc:`Model Scaling Toolbox ` + | :doc:`Model Scaling Toolbox ` | :doc:`Initializing Models ` | :doc:`Command-line tools ` | :doc:`Configuration ` diff --git a/docs/reference_guides/core/util/scaling.rst b/docs/reference_guides/core/util/scaling.rst index 19949977d1..681856d4c5 100644 --- a/docs/reference_guides/core/util/scaling.rst +++ b/docs/reference_guides/core/util/scaling.rst @@ -4,7 +4,7 @@ Scaling Methods .. note:: In v2.0, IDAES is beginning to deploy a new suite of scaling tools. This documentation refers to the older scaling tools. - For documentation of the new scaling toolbox, :ref:`see here`. + For documentation of the new scaling toolbox, :ref:`see here`. .. module:: idaes.core.util.scaling :noindex: diff --git a/docs/reference_guides/index.rst b/docs/reference_guides/index.rst index e8c60d539b..8f0c09a74e 100644 --- a/docs/reference_guides/index.rst +++ b/docs/reference_guides/index.rst @@ -8,7 +8,7 @@ Reference Guides model_libraries/index core/index - scaling + scaling/scaling initialization/index commands/index configuration diff --git a/docs/reference_guides/model_libraries/generic/unit_models/gibbs_reactor.rst b/docs/reference_guides/model_libraries/generic/unit_models/gibbs_reactor.rst index 566a644459..fbe42f200c 100644 --- a/docs/reference_guides/model_libraries/generic/unit_models/gibbs_reactor.rst +++ b/docs/reference_guides/model_libraries/generic/unit_models/gibbs_reactor.rst @@ -69,3 +69,9 @@ GibbsReactorData Class .. autoclass:: GibbsReactorData :members: + +GibbsReactorScaler Class +------------------------ + +.. autoclass:: GibbsReactorScaler + :members: diff --git a/docs/reference_guides/scaling.rst b/docs/reference_guides/scaling.rst deleted file mode 100644 index 365232ef02..0000000000 --- a/docs/reference_guides/scaling.rst +++ /dev/null @@ -1,108 +0,0 @@ -Scaling Toolbox -=============== - -.. note:: - - In v2.0, IDAES is beginning to deploy a new suite of scaling tools. This documentation refers to the new tools. - For documentation of the older scaling tools, :ref:`see here`. - -.. module:: idaes.core.util.scaling - -.. contents:: Contents - :depth: 2 - -Importance of Model Scaling ---------------------------- - -Creating well scaled models is important for increasing the efficiency and reliability of solvers. However, depending on units of measure and process scale, variables and constraints for process applications are often badly scaled unless efforts are taken to rescale the problem. - -The IDAES-IP takes the approach of writing the problem in the most natural form, and then applying model transformations to convert the problem to a more tractable form for the solver. This is achieved through leveraging the `Pyomo Scaling Transformation `_ and model ``Suffixes``. In this way, users may assign scaling factors to any variable, constraint or objective function in the problem in order to convert it from its natural form into a better scaled equivalent. The advantage of using ``Suffixes`` and transformations over more traditional approaches is that this allows scaling to be adjusted as required without needing to rework the model code. - -As a general rule of thumb, all variables, component and objective functions in a problem should be scaled to have a magnitude between 1 and 10 in order to ensure maximum robustness of the problem (although this can vary between different solvers). As quantities within process models can vary significantly (even within the same process), achieving good scaling requires a lot of input from the user to determine the expected magnitude of each variable and constraint and thus the appropriate scaling factor, To assist users with this, the IDAES-IP provides a number of utility functions for setting scaling factors and calculating initial guesses for scaling factors from available information. These tools can be found in the ``idaes.core.util.scaling`` module. - -Setting Scaling Factors ------------------------ - -Suffixes are used to specify scaling factors for any component in an IDAES model. These suffixes are created when needed by calling the ``set_scaling_factor()`` function. Using the ``set_scaling_factor()``, ``get_scaling_factor()``, and ``unset_scaling_factor()`` eliminates the need for users to deal directly with scaling suffixes, and ensures that scaling factors are stored in the correct location. - -.. autofunction:: set_scaling_factor - -.. autofunction:: get_scaling_factor - -.. autofunction:: unset_scaling_factor - -For variables, the ``set_variable_scaling_from_current_value`` can be used to automatically set the scaling factor for a given variable (or all variables in a given model) based on the current value of the variable. - -.. autofunction:: set_variable_scaling_from_current_value - -.. note :: - - If this function is used on a variable which has a current value of 0 or no current value (i.e., ``var.value == None``) then a warning will be logged and no scaling factor will be set for the variable. - -Default Scaling Factors ------------------------ - -Process models are generally large and contain a large number of components, thus it is often infeasible for a user to manually set scaling factors for all components individually. Additionally, getting good initial values in order to use the ``set_variable_scaling_from_current_value`` function requires solving the model which in turn requires initial scaling of the model. In order to provide a starting point for initialization, all IDAES models contain a ``default_scaling_factors`` dict which allows developers and users to assign default scaling factors for any component in a given model. - -The following methods are available on all IDAES models which can be used to access and manipulate the default scaling factors: - -.. module:: idaes.core.base.process_base - :noindex: - -.. autoclass:: ProcessBlockData - :members: set_default_scaling, get_default_scaling, unset_default_scaling - :noindex: - -These default scaling factors can be applied to a model using the ``set_scaling_from_default`` utility function. - -.. module:: idaes.core.util.scaling - :noindex: - -.. autofunction:: set_scaling_from_default - -.. note:: - - Default scaling factors are NOT automatically applied to a model. If ``set_scaling_from_default`` is not called, then the default scaling factors will not be used. - -Calculating Constraint and Objective Scaling Factors ----------------------------------------------------- - -If all variables in a problem have been assigned scaling factors, it is possible to automatically evaluate the terms in all expression (i.e., constraints and objective functions) using the inverse of the variable scaling factors as a nominal value for each variable. This information can then be used to estimate scaling factors for each expression. The IDAES-IP provides a number of utility functions for automatically calculating scaling factors for constraints and objective functions based on different approaches. - -.. autofunction:: set_constraint_scaling_harmonic_magnitude - -.. autofunction:: set_constraint_scaling_max_magnitude - -.. autofunction:: set_constraint_scaling_min_magnitude - -These functions make use of the ``NominalValueExtractionVisitor`` class which automatically walks the entire expression tree and determines the nominal value (expected magnitude and sign) for each additive term in the expression. Given an expression of the form :math:`f(x) = A(x) + B(x) + C(x)`, this class will return a list of the nominal values of :math:`A(x)`, :math:`B(x)` and :math:`C(x)` based on the scaling factors assigned to the variables in each sub-expression. These values can then be used to determine the best scaling factor for the overall expression. - -.. autoclass:: NominalValueExtractionVisitor - :members: - -Identifying Scaling Issues ------------------------------ - -A number of utility functions are available to help identify potential scaling issues in a problem. - -.. autofunction:: report_scaling_issues - -.. autofunction:: unscaled_variables_generator - -.. autofunction:: list_unscaled_variables - -.. autofunction:: badly_scaled_var_generator - -.. autofunction:: list_badly_scaled_variables - -.. autofunction:: unscaled_constraints_generator - -.. autofunction:: list_unscaled_constraints - -.. autofunction:: extreme_jacobian_entries - -.. autofunction:: extreme_jacobian_rows - -.. autofunction:: extreme_jacobian_columns - -.. autofunction:: jacobian_cond diff --git a/docs/reference_guides/scaling/autoscaler.rst b/docs/reference_guides/scaling/autoscaler.rst new file mode 100644 index 0000000000..bfb8a822b6 --- /dev/null +++ b/docs/reference_guides/scaling/autoscaler.rst @@ -0,0 +1,5 @@ +AutoScaler Class +================ + +.. automodule:: idaes.core.scaling.autoscaling + :members: diff --git a/docs/reference_guides/scaling/custom_scaler.rst b/docs/reference_guides/scaling/custom_scaler.rst new file mode 100644 index 0000000000..567d9cf4de --- /dev/null +++ b/docs/reference_guides/scaling/custom_scaler.rst @@ -0,0 +1,8 @@ +CustomScalerBase Class +====================== + +.. autoclass:: idaes.core.scaling.custom_scaler_base.ConstraintScalingScheme + :class-doc-from: class + +.. autoclass:: idaes.core.scaling.custom_scaler_base.CustomScalerBase + :members: diff --git a/docs/reference_guides/scaling/scaling.rst b/docs/reference_guides/scaling/scaling.rst new file mode 100644 index 0000000000..c694863331 --- /dev/null +++ b/docs/reference_guides/scaling/scaling.rst @@ -0,0 +1,20 @@ +Scaling Toolbox +=============== + +.. note:: + + In v2.0, IDAES is beginning to deploy a new suite of scaling tools. This documentation refers to the new tools. + For documentation of the older scaling tools, :ref:`see here`. + +The following modules compose the core IDAES Scaling Toolbox. + +.. module:: idaes.core.util.scaling + +.. toctree:: + :maxdepth: 2 + + scaling_utils + custom_scaler + autoscaler + scaling_profiler + diff --git a/docs/reference_guides/scaling/scaling_profiler.rst b/docs/reference_guides/scaling/scaling_profiler.rst new file mode 100644 index 0000000000..0dbbde69c1 --- /dev/null +++ b/docs/reference_guides/scaling/scaling_profiler.rst @@ -0,0 +1,6 @@ +Scaling Profiler Utility +======================== + +.. autoclass:: idaes.core.scaling.scaler_profiling.ScalingProfiler + :members: + diff --git a/docs/reference_guides/scaling/scaling_utils.rst b/docs/reference_guides/scaling/scaling_utils.rst new file mode 100644 index 0000000000..b6a6cb5cf4 --- /dev/null +++ b/docs/reference_guides/scaling/scaling_utils.rst @@ -0,0 +1,6 @@ +Scaling Utility Functions +========================= + +.. automodule:: idaes.core.scaling.util + :members: + :exclude-members: NominalValueExtractionVisitor diff --git a/idaes/core/base/process_base.py b/idaes/core/base/process_base.py index 422c7d6900..0276a40bf0 100644 --- a/idaes/core/base/process_base.py +++ b/idaes/core/base/process_base.py @@ -89,8 +89,9 @@ class ProcessBlockData(BlockData): CONFIG = ConfigBlock("ProcessBlockData", implicit=False) - # Set default initializer + # Set default initializer and scaler default_initializer = BlockTriangularizationInitializer + default_scaler = None def __init__(self, component): """ diff --git a/idaes/core/base/tests/test_process_base.py b/idaes/core/base/tests/test_process_base.py index fd43056151..767783b2b6 100644 --- a/idaes/core/base/tests/test_process_base.py +++ b/idaes/core/base/tests/test_process_base.py @@ -41,6 +41,7 @@ def test_base_attributes(): assert m.a.initialization_order == [m.a] assert m.a.default_initializer is BlockTriangularizationInitializer + assert m.a.default_scaler is None @pytest.mark.unit diff --git a/idaes/core/scaling/__init__.py b/idaes/core/scaling/__init__.py new file mode 100644 index 0000000000..0d1b13ee0f --- /dev/null +++ b/idaes/core/scaling/__init__.py @@ -0,0 +1,26 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +from .autoscaling import AutoScaler +from .custom_scaler_base import CustomScalerBase +from .scaler_profiling import ScalingProfiler +from .util import ( + scaling_factors_from_json_file, + scaling_factors_to_json_file, + scaling_factors_from_dict, + scaling_factors_to_dict, + get_scaling_factor, + get_scaling_suffix, + del_scaling_factor, + set_scaling_factor, + report_scaling_factors, +) diff --git a/idaes/core/scaling/autoscaling.py b/idaes/core/scaling/autoscaling.py new file mode 100644 index 0000000000..f2543c55a4 --- /dev/null +++ b/idaes/core/scaling/autoscaling.py @@ -0,0 +1,232 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +Tools for automatically scaling models based on current state. + +Author: Andrew Lee +""" + +import scipy as sp + +from pyomo.environ import ( + Block, + Constraint, + value, + Var, +) +from pyomo.core.base.block import BlockData +from pyomo.core.base.constraint import ConstraintData +from pyomo.core.base.var import VarData + +from idaes.core.scaling.scaling_base import ScalerBase +from idaes.core.scaling.util import get_scaling_factor +from idaes.core.util.scaling import get_jacobian + + +class AutoScaler(ScalerBase): + """ + IDAES Autoscaling Toolbox + + Contains a number of methods useful for automatically scaling models based + on the current model state (i.e., variable values). Users should be aware + of the limitations of autoscaling however, which only consider the current variable + values and are thus heavily dependent on model initialization. + + """ + + CONFIG = ScalerBase.CONFIG() + + def scale_variables_by_magnitude(self, blk_or_var, descend_into: bool = True): + """ + Calculate scaling factors for all variables in a model based on their + current magnitude. Variables with no value are assigned a scaling factor of 1. + + Args: + blk_or_var: block or variable object to calculate scaling factors for + descend_into: if blk_or_var is a Block, whether to descend into any sub-Blocks + (default=True) + + Returns: + None + + Raises: + TypeError is blk_or_var is not a Block or Var. + """ + if isinstance(blk_or_var, BlockData): + # Scalar Block or element of Indexed Block + # As scaling factors live with the components parent, do not descend into + # sub-blocks here + for v in blk_or_var.component_data_objects(Var, descend_into=False): + self._vardata_by_magnitude(v) + + # Next, get all child blocks and call scale_variables_by_magnitude recursively + if descend_into: + for b in blk_or_var.component_data_objects(Block, descend_into=False): + self.scale_variables_by_magnitude(b, descend_into=descend_into) + + elif isinstance(blk_or_var, Block): + # Indexed Block + for b in blk_or_var.values(): + for v in b.component_data_objects(Var, descend_into=descend_into): + self._vardata_by_magnitude(v) + + elif isinstance(blk_or_var, VarData): + # Scalar Var or element of Indexed Var + self._vardata_by_magnitude(blk_or_var) + + elif isinstance(blk_or_var, Var): + # Indexed Var + for v in blk_or_var.values(): + self._vardata_by_magnitude(v) + else: + raise TypeError(f"{blk_or_var.name} is not a block or variable.") + + def scale_constraints_by_jacobian_norm( + self, blk_or_cons, norm: int = 2, descend_into: bool = True + ): + """ + Calculate scaling factors for all constraints in a model based on the norm of + the Jacobian matrix, accounting for any variable scaling factors. + + Args: + blk_or_cons: block or constraint to calculate scaling factors for + norm: type of norm to use for scaling. Must be a positive integer. + descend_into: if blk_or_cons is a Block, whether to descend into any sub-Blocks + (default=True) + + Returns: + None + + Raises: + TypeError is blk_or_cons is not a Block or Constraint + ValueError if norm is not a positive integer + """ + # Validate norm + # First cast to int + norm = int(norm) + if norm < 1: + raise ValueError( + f"Invalid value for norm in scale_constraints_by_jacobian_norm ({norm}). " + "Value must be a positive integer." + ) + + # We want to avoid generating the Jacobian and NLP more than once, so first we + # will identify the top-level block and collect all the constraints of interest + # as a list or iterator + + if isinstance(blk_or_cons, BlockData): + # Scalar Block or element of Indexed Block + if descend_into: + # Scale all constraints, so pass con_list=None + con_list = None + else: + # Otherwise, get an iterator of constraints only on block + con_list = blk_or_cons.component_data_objects( + Constraint, descend_into=False + ) + jblock = blk_or_cons + + elif isinstance(blk_or_cons, Block): + # Indexed Block + # Underlying tools do not work for Indexed blocks, so + # use parent block instead and collect constraints of interest + con_list = [] + for b in blk_or_cons.values(): + for c in b.component_data_objects( + Constraint, descend_into=descend_into + ): + con_list.append(c) + jblock = blk_or_cons.parent_block() + + elif isinstance(blk_or_cons, ConstraintData): + # Scalar Constraint or element of Indexed Constraint + con_list = [blk_or_cons] + jblock = blk_or_cons.parent_block() + + elif isinstance(blk_or_cons, Constraint): + # Indexed Constraint + con_list = blk_or_cons.values() + jblock = blk_or_cons.parent_block() + + else: + raise TypeError(f"{blk_or_cons.name} is not a block or constraint.") + + # Once we have a single target block and list of constraints, call the scaler method + # once for all the constraints + self._con_by_norm(jblock, con_list=con_list, norm=norm) + + def scale_model(self, model, norm: int = 2, descend_into: bool = True): + """ + Apply auto-scaling routine to model. + + Args: + model: model to be scaled + norm: type of norm to use for scaling. Must be a positive integer. + descend_into: if sub-Blocks are present, whether to descend into any sub-Blocks + + Returns: + None + """ + self.scale_variables_by_magnitude(blk_or_var=model, descend_into=descend_into) + self.scale_constraints_by_jacobian_norm( + blk_or_cons=model, norm=norm, descend_into=descend_into + ) + + def _vardata_by_magnitude(self, vardata): + if vardata.value is None: + sf = 1.0 + else: + val = abs(value(vardata)) + if val <= self.config.zero_tolerance: + sf = 1.0 + else: + sf = 1.0 / val + + self._set_scaling_factor(vardata, "variable", sf) + + def _con_by_norm(self, blk, con_list=None, norm=2): + # Get scaled Jacobian - we want to consider any existing scaling + # We will account for existing scaling factors later and update them + jac, nlp = get_jacobian(blk, scaled=True) + + if con_list is None: + con_list = nlp.get_pyomo_equality_constraints() + + # Use scipy to get all the norms + # Should be more efficient that iterating in Python + axis = ( + 1 # Could make this an argument to also support variable-based norm scaling + ) + if jac.format == "csr": + jac_norms = sp.sparse.linalg.norm(jac, ord=norm, axis=axis) + else: + jac_norms = sp.linalg.norm(jac, ord=norm, axis=axis) + + # Iterate over constraints of interest and apply scaling factors + for c in con_list: + c_idx = nlp.get_pyomo_equality_constraints().index(c) + + # Get any existing scaling factor for this constraint + sf_old = get_scaling_factor(c) + if sf_old is None: + sf_old = 1.0 + + # Get norm for this constraint + n = jac_norms[c_idx] + + if n <= self.config.zero_tolerance: + sf = sf_old + else: + sf = sf_old / n + + self._set_scaling_factor(c, "constraint", sf) diff --git a/idaes/core/scaling/custom_scaler_base.py b/idaes/core/scaling/custom_scaler_base.py new file mode 100644 index 0000000000..4da3cdb20b --- /dev/null +++ b/idaes/core/scaling/custom_scaler_base.py @@ -0,0 +1,620 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +Base class for custom scaling routines. + +Author: Andrew Lee +""" +from copy import copy +from enum import Enum + +from pyomo.environ import ComponentMap, units, value +from pyomo.core.base.units_container import UnitsError +from pyomo.core.expr import identify_variables +from pyomo.core.expr.calculus.derivatives import Modes, differentiate + +from idaes.core.scaling.scaling_base import CONFIG, ScalerBase +from idaes.core.scaling.util import get_scaling_factor, NominalValueExtractionVisitor +import idaes.logger as idaeslog + +# Set up logger +_log = idaeslog.getLogger(__name__) + +CSCONFIG = CONFIG() + +DEFAULT_UNIT_SCALING = { + # "QuantityName: (reference units, scaling factor) + # Model developers should be careful when using these, especially when + # dealing with differential measurements (e.g. pressure and temperature differences) + "Temperature": (units.K, 1e-2), + "Pressure": (units.Pa, 1e-5), +} + + +class ConstraintScalingScheme(str, Enum): + """ + Schemes available for calculating constraint scaling factors. + + * harmonicMean ('harmonic_mean'): sf = sum(1/abs(nominal value)) + * inverseSum ('inverse_sum'): sf = 1 / sum(abs(nominal value)) + * inverseRSS ('inverse_root_sum_squared'): sf = 1 / sqrt(sum(abs(nominal value)**2)) + * inverseMaximum ('inverse_maximum'): sf = 1 / max(abs(nominal value) + * inverseMinimum ('inverse_minimum'): sf = 1 / min(abs(nominal value) + """ + + harmonicMean = "harmonic_mean" + inverseSum = "inverse_sum" + inverseRSS = "inverse_root_sum_squared" + inverseMaximum = "inverse_maximum" + inverseMinimum = "inverse_minimum" + + +class CustomScalerBase(ScalerBase): + """ + Base class for custom scaling routines. + + """ + + CONFIG = ScalerBase.CONFIG() + + # Common data structures for default scaling + # DEFAULT_SCALING_FACTORS = {"component_local_name": DEFAULT_SCALING} + DEFAULT_SCALING_FACTORS = None + + # UNIT_SCALING_FACTORS = {"units": UNIT_BASED_SCALING} + UNIT_SCALING_FACTORS = copy(DEFAULT_UNIT_SCALING) + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + if self.DEFAULT_SCALING_FACTORS is not None: + self.default_scaling_factors = copy(self.DEFAULT_SCALING_FACTORS) + else: + self.default_scaling_factors = {} + + if self.UNIT_SCALING_FACTORS is not None: + self.unit_scaling_factors = copy(self.UNIT_SCALING_FACTORS) + else: + self.unit_scaling_factors = {} + + def scale_model( + self, + model, + first_stage_fill_in: list = None, + second_stage_fill_in: list = None, + submodel_scalers: ComponentMap = None, + ): + """ + Default model scaling routine. + + This method performs a four-step scaling routine: + + 1. Scale variables using variable_scaling_routine + 2. Perform first-stage scaling factor fill in using user provided method(s), called in order declared + 3. Scale constraints using constraint_scaling_routine + 4. Perform second-stage scaling factor fill in using user provided method(s), called in order declared + + Args: + model: model to be scaled + first_stage_fill_in: list of methods to use for first-stage scaling factor fill in + second_stage_fill_in: list of methods to use for second-stage scaling factor fill in + submodel_scalers: ComponentMap of Scalers to use for sub-models + + Returns: + None + """ + # Step 1: Call variable scaling routine + self.variable_scaling_routine( + model, overwrite=self.config.overwrite, submodel_scalers=submodel_scalers + ) + # Step 2: Call variable fill in + if first_stage_fill_in is not None: + for i in first_stage_fill_in: + i(model) + + # Step 3: Call constraint scaling routine + self.constraint_scaling_routine( + model, overwrite=self.config.overwrite, submodel_scalers=submodel_scalers + ) + + # Step 4: Call constraint fill in + if second_stage_fill_in is not None: + for i in second_stage_fill_in: + i(model) + + def variable_scaling_routine( + self, model, overwrite: bool = False, submodel_scalers: ComponentMap = None + ): + """ + Routine to apply scaling factors to variables in model. + + Derived classes must overload this method. + + Args: + model: model to be scaled + overwrite: whether to overwrite existing scaling factors + submodel_scalers: ComponentMap of Scalers to use for sub-models + + Returns: + None + """ + raise NotImplementedError( + "Custom Scaler has not implemented a variable_scaling_routine method." + ) + + def constraint_scaling_routine( + self, model, overwrite: bool = False, submodel_scalers: ComponentMap = None + ): + """ + Routine to apply scaling factors to constraints in model. + + Derived classes must overload this method. + + Args: + model: model to be scaled + overwrite: whether to overwrite existing scaling factors + submodel_scalers: ComponentMap of Scalers to use for sub-models + + Returns: + None + """ + raise NotImplementedError( + "Custom Scaler has not implemented a constraint_scaling_routine method." + ) + + def get_default_scaling_factor(self, component): + """ + Get scaling factor for component from dict of default values. + + Args: + component: component to get default scaling factor for + + Returns: + default scaling factor if it exists, else None + """ + try: + return self.default_scaling_factors[component.local_name] + except KeyError: + # Might be indexed, see if parent component has default scaling + parent = component.parent_component() + try: + return self.default_scaling_factors[parent.local_name] + except KeyError: + # No default scaling found, give up + pass + + # Log a message and return nothing + _log.debug(f"No default scaling factor found for {component.name}") + return None + + # Common methods for variable scaling + def scale_variable_by_component( + self, target_variable, scaling_component, overwrite: bool = False + ): + """ + Set scaling factor for target_variable equal to that of scaling_component. + + Args: + target_variable: variable to set scaling factor for + scaling_component: component to use for scaling factor + overwrite: whether to overwrite existing scaling factors + + Returns: + None + """ + sf = get_scaling_factor(scaling_component) + + if sf is not None: + self.set_variable_scaling_factor( + variable=target_variable, scaling_factor=sf, overwrite=overwrite + ) + else: + _log.debug( + f"Could not set scaling factor for {target_variable.name}, " + f"no scaling factor set for {scaling_component.name}" + ) + + def scale_variable_by_bounds(self, variable, overwrite: bool = False): + """ + Set scaling factor for variable based on bounds. + + If variable has both upper and lower bounds, scaling factor will be based on the + mean of the bounds. If variable has only one bound, scaling factor will be based + on that bound. If variable has no bounds, scaling factor will not be set. + + Args: + variable: variable to set scaling factor for + overwrite: whether to overwrite existing scaling factors + + Returns: + None + """ + if variable.lb is not None: + if variable.ub is not None: + # Both bounds, use mean + xmag = 0.5 * (variable.ub + variable.lb) + else: + # Only lower bound + xmag = variable.lb + elif variable.ub is not None: + # Only upper bound + xmag = variable.ub + else: + # No bounds + _log.debug( + f"No scaling factor set for {variable.name}; variable has no bounds." + ) + return + + if xmag == 0: + sf = 1 + else: + sf = 1 / abs(xmag) + + self.set_variable_scaling_factor( + variable=variable, scaling_factor=sf, overwrite=overwrite + ) + + def scale_variable_by_default(self, variable, overwrite: bool = False): + """ + Set scaling factor for variable based on default scaling factor. + + Args: + variable: variable to set scaling factor for + overwrite: whether to overwrite existing scaling factors + + Returns: + None + """ + sf = self.get_default_scaling_factor(variable) + if sf is not None: + self.set_variable_scaling_factor( + variable=variable, scaling_factor=sf, overwrite=overwrite + ) + else: + _log.debug( + f"Could not set scaling factor for {variable.name}, " + f"no default scaling factor set." + ) + + def scale_variable_by_units(self, variable, overwrite: bool = False): + """ + Set scaling factor for variable based on units of measurement. + + Units of measurement for variable are compared to those stored in + self.unit_scaling_factors, and if a match is found the scaling factor set + using the associated value. + + Args: + variable: variable to set scaling factor for + overwrite: whether to overwrite existing scaling factors + + Returns: + None + """ + uom = units.get_units(variable) + + sf = None + # Keys in self.unit_scaling_factors are not used - only required because Pyomo + # units are non-hashable and thus cannot be keys + for refunits, unit_scale in self.unit_scaling_factors.values(): + try: + # Try convert the reference scaling factor to variable units + # TODO: Have not found a more efficient way to do this, as Pyomo basically + # TODO: involves a try/except anyway + # Need to invert reference scaling factor, and then invert result + sf = 1 / units.convert_value( + 1 / unit_scale, from_units=refunits, to_units=uom + ) + # Break once we have a match - no need to continue + break + except UnitsError: + pass + + if sf is not None: + self.set_variable_scaling_factor( + variable=variable, + scaling_factor=sf, + overwrite=overwrite, + ) + else: + _log.debug( + f"No scaling factor set for {variable.name}; no match for units {uom} found " + "in self.unit_scaling_factors" + ) + + # Common methods for constraint scaling + def scale_constraint_by_component( + self, target_constraint, scaling_component, overwrite: bool = False + ): + """ + Set scaling factor for target_constraint equal to that of scaling_component. + + Args: + target_constraint: constraint to set scaling factor for + scaling_component: component to use for scaling factor + overwrite: whether to overwrite existing scaling factors + + Returns: + None + """ + sf = get_scaling_factor(scaling_component) + if sf is not None: + self.set_constraint_scaling_factor( + constraint=target_constraint, scaling_factor=sf, overwrite=overwrite + ) + else: + _log.debug( + f"Could not set scaling factor for {target_constraint.name}, " + f"no scaling factor set for {scaling_component.name}" + ) + + def scale_constraint_by_default(self, constraint, overwrite: bool = False): + """ + Set scaling factor for constraint based on default scaling factor. + + Args: + constraint: constraint to set scaling factor for + overwrite: whether to overwrite existing scaling factors + + Returns: + None + """ + sf = self.get_default_scaling_factor(constraint) + if sf is not None: + self.set_constraint_scaling_factor( + constraint=constraint, scaling_factor=sf, overwrite=overwrite + ) + else: + _log.debug( + f"Could not set scaling factor for {constraint.name}, " + f"no default scaling factor set." + ) + + def get_expression_nominal_values(self, expression): + """ + Calculate nominal values for each additive term in a Pyomo expression. + + The nominal value of any Var is defined as the inverse of its scaling factor + (if assigned, else 1). + + Args: + expression: Pyomo expression to collect nominal values for + + Returns: + list of nominal values for each additive term + """ + # For convenience, if expression is a Pyomo component with an expr attribute, + # redirect to the expr attribute + if hasattr(expression, "expr"): + expression = expression.expr + + return NominalValueExtractionVisitor().walk_expression(expression) + + def scale_constraint_by_nominal_value( + self, + constraint, + scheme: ConstraintScalingScheme = ConstraintScalingScheme.inverseMaximum, + overwrite: bool = False, + ): + """ + Set scaling factor for constraint based on the nominal value(s). + + Terms with expected magnitudes of 0 will be ignored. + + Args: + constraint: constraint to set scaling factor for + scheme: ConstraintScalingScheme Enum indicating method to apply + for determining constraint scaling. + overwrite: whether to overwrite existing scaling factors + + Returns: + None + """ + nominal = self.get_expression_nominal_values(constraint.expr) + + # Remove any 0 terms + nominal = [j for j in nominal if j != 0] + + if len(nominal) == 0: + # No non-zero terms... + sf = 1 + elif scheme == ConstraintScalingScheme.harmonicMean: + sf = sum(1 / abs(i) for i in nominal) + elif scheme == ConstraintScalingScheme.inverseSum: + sf = 1 / sum(abs(i) for i in nominal) + elif scheme == ConstraintScalingScheme.inverseRSS: + sf = 1 / sum(abs(i) ** 2 for i in nominal) ** 0.5 + elif scheme == ConstraintScalingScheme.inverseMaximum: + sf = 1 / max(abs(i) for i in nominal) + elif scheme == ConstraintScalingScheme.inverseMinimum: + sf = 1 / min(abs(i) for i in nominal) + else: + raise ValueError( + f"Invalid value for 'scheme' argument ({scheme}) in " + "scale_constraint_by_nominal_value." + ) + + self.set_constraint_scaling_factor( + constraint=constraint, scaling_factor=sf, overwrite=overwrite + ) + + def scale_constraint_by_nominal_derivative_norm( + self, constraint, norm: int = 2, overwrite: bool = False + ): + """ + Scale constraint by norm of partial derivatives. + + Calculates partial derivatives of constraint at nominal variable values, + and then scaled the constraint by the user-selected norm of these derivatives. + Given perfect variable scaling, this should provide a similar result to + applying scaling based on the Jacobian norm, however this approach does not + require an initial solution for the problem (relying on nominal values instead). + + Args: + constraint: constraint to be scaled. + norm: type of norm to use for scaling. Must be a positive integer. + overwrite: whether to overwrite existing scaling factors. + + Returns: + None + """ + # Cast norm to int to make sure it is valid + norm = int(norm) + if norm < 1: + raise ValueError(f"norm must be a positive integer (received {norm})") + + var_data = [] + try: + # Iterate over all variables in constraint + for v in identify_variables(constraint.body): + # Store current value for restoration + ov = v.value # original value + sf = self.get_scaling_factor(v) # scaling factor + if sf is None: + # If no scaling factor set, use nominal value of 1 + sf = 1 + + var_data.append((v, ov, sf)) + + # Get partial derivatives + pjac = [] + for v in var_data: + # Iterate over all variable and set values + for w in var_data: + if w is not v: + # Set all other variables to their nominal magnitude + # nominal_value = 1/ scaling_factor + w[0].value = 1 / w[2] + else: + # Set derivative var to scaled value of 1 + # With perfect scaling, scaling_factor * value = 1 + w[0].value = 1 + + pjac.append( + value( + differentiate( + expr=constraint.body, wrt=v[0], mode=Modes.reverse_symbolic + ) + * (1 / v[2]) # Need to divide by scaling_factor + ) + ) + + finally: + # Restore all values for clean up + for v in var_data: + v[0].value = v[1] + + # Calculate norm + cnorm = sum(abs(j) ** norm for j in pjac) ** (1 / norm) + if cnorm != 0: + sf = 1 / cnorm + else: + sf = 1 + self.set_constraint_scaling_factor(constraint, sf, overwrite=overwrite) + + # Other methods + def propagate_state_scaling( + self, target_state, source_state, overwrite: bool = False + ): + """ + Propagate scaling of state variables from one StateBlock to another. + + Indexing of target and source StateBlocks must match. + + Args: + target_state: StateBlock to set scaling factors on + source_state: StateBlock to use as source for scaling factors + overwrite: whether to overwrite existing scaling factors + + Returns: + None + """ + for bidx, target_data in target_state.items(): + target_vars = target_data.define_state_vars() + source_vars = source_state[bidx].define_state_vars() + + for state, var in target_vars.items(): + for vidx, vardata in var.items(): + self.scale_variable_by_component( + target_variable=vardata, + scaling_component=source_vars[state][vidx], + overwrite=overwrite, + ) + + def call_submodel_scaler_method( + self, + model, + submodel: str, + method: str, + submodel_scalers: ComponentMap = None, + overwrite: bool = False, + ): + """ + Call scaling method for submodel. + + Scaler for submodel is taken from submodel_scalers if present, otherwise the + default scaler for the submodel is used. + + Args: + model: parent model of submodel + submodel: local name of submodel to be scaled as str + submodel_scalers: user provided ComponentMap of Scalers to use for submodels + method: name of method to call from submodel (as string) + + Returns: + None + """ + # Get actual submodel object from name + # For this method, we have to use the component name as the Scaler is written + # before the model is constructed. + sm_obj = model.find_component(submodel) + + if submodel_scalers is None: + submodel_scalers = {} + + # Iterate over indices of submodel + for smdata in sm_obj.values(): + # Get Scaler for submodel + if sm_obj in submodel_scalers: + scaler = submodel_scalers[sm_obj] + if callable(scaler): + # Check to see if Scaler is callable - this implies it is a class and not an instance + # Call the class to create an instance + scaler = scaler() + _log.debug(f"Using user-defined Scaler for {model}.{submodel}.") + else: + try: + scaler = smdata.default_scaler + _log.debug(f"Using default Scaler for {model}.{submodel}.") + except AttributeError: + _log.debug( + f"No default Scaler set for {model}.{submodel}. Cannot call {method}." + ) + return + if scaler is not None: + scaler = scaler() + else: + _log.debug( + f"No Scaler found for {model}.{submodel}. Cannot call {method}." + ) + + # If a Scaler is found, call desired method + if scaler is not None: + try: + smeth = getattr(scaler, method) + except AttributeError: + raise AttributeError( + f"Scaler for {model}.{submodel} does not have a method named {method}." + ) + smeth(smdata, overwrite=overwrite) diff --git a/idaes/core/scaling/scaler_profiling.py b/idaes/core/scaling/scaler_profiling.py new file mode 100644 index 0000000000..fe8b72e187 --- /dev/null +++ b/idaes/core/scaling/scaler_profiling.py @@ -0,0 +1,375 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +Tools for profiling scaling alternatives. +""" +import sys + +from pyomo.environ import check_optimal_termination, Constraint +from pyomo.common.tempfiles import TempfileManager + +from idaes.core.util.scaling import jacobian_cond +from idaes.core.scaling import AutoScaler, CustomScalerBase +from idaes.core.solvers import get_solver + + +class ScalingProfiler: + """ + Class for running a set of constraint scaling methods on a model and reporting the + effect on model condition number and solver behavior. + + Users should call the profile_scaling_methods method to generate a dict of results or + the report_scaling_profiles method for a stream-based output. + + Users are expected to provide callback functions to 1) construct an initialized model + that will be used for profiling, 2) apply user-defined variable scaling (used for the + imperfect information case) and 3) perturb the model from the initialized state to + test how well the model solves (optional). + + Users may also provide a dict of scaling methods they wish to apply using the scaling_methods + argument. If this is not provided, the tool will default to applying all the scaling methods + defined by the AutoScaler and CustomScalerBase classes. + + **NOTE** methods from the AutoScaler class are applied to Pyomo Blocks, whilst those from + CustomScalerBase are applied to individual ConstraintDatas. The profiling tool assumes that + methods will be applied to ConstraintDatas unless the `block_based` keyword argument is set to True + for the scaling method. + + Args: + build_model: callback to use to construct initialized model for testing + user_scaling: callback to use to apply user-defined scaling to initialized model + perturb_states: (optional) callback to use to perturb model state for re-solve tests + scaling_methods: (optional) dict of constraint scaling methods to profile. {"Name": (method, kwargs)} + solver: (optional) Pyomo solver object to use for re-solve tests + """ + + def __init__( + self, + build_model, + user_scaling, + perturb_state=None, + scaling_methods: dict = None, + solver=None, + ): + """ + Sets up a framework for applying different scaling methods to a model and compiling a + report of their effects on the Jacobian condition number and how easily the scaled + model can be solved for a perturbed state. + """ + self._build_model = build_model + self._user_scaling = user_scaling + self._perturb_state = perturb_state + self._scaling_methods = scaling_methods + self._solver = solver + + if self._solver is None: + self._solver = get_solver("ipopt_v2", writer_config={"scale_model": True}) + + if self._scaling_methods is None: + ascaler = AutoScaler() + cscaler = CustomScalerBase() + + self._scaling_methods = { + "Vars Only": (None, {}), + "Harmonic": ( + cscaler.scale_constraint_by_nominal_value, + {"scheme": "harmonic_mean"}, + ), + "Inverse Sum": ( + cscaler.scale_constraint_by_nominal_value, + {"scheme": "inverse_sum"}, + ), + "Inverse Root Sum Squares": ( + cscaler.scale_constraint_by_nominal_value, + {"scheme": "inverse_root_sum_squared"}, + ), + "Inverse Maximum": ( + cscaler.scale_constraint_by_nominal_value, + {"scheme": "inverse_maximum"}, + ), + "Inverse Minimum": ( + cscaler.scale_constraint_by_nominal_value, + {"scheme": "inverse_minimum"}, + ), + "Nominal L1 Norm": ( + cscaler.scale_constraint_by_nominal_derivative_norm, + {"norm": 1}, + ), + "Nominal L2 Norm": ( + cscaler.scale_constraint_by_nominal_derivative_norm, + {"norm": 2}, + ), + "Actual L1 Norm": ( + ascaler.scale_constraints_by_jacobian_norm, + {"norm": 1, "block_based": True}, + ), + "Actual L2 Norm": ( + ascaler.scale_constraints_by_jacobian_norm, + {"norm": 2, "block_based": True}, + ), + } + + def profile_scaling_methods(self): + """ + Generate results for all provided scaling methods. + + For each scaling method, calculate the Jacobian condition number and re-solve + the model with both user-provided variable scaling and perfect variable scaling + (scaling by inverse magnitude). A base case with no scaling applied is also + run for reference. + + Args: + None + + Returns: + dict with results for all scaling methods + """ + # Generate data for unscaled model + m = self._build_model() + unscaled = jacobian_cond(m, scaled=False) + stats = self._solved_perturbed_state(m) + + results = { + "Unscaled": {"Manual": {"condition_number": unscaled, **stats}}, + } + + # Run other cases + for case, (meth, margs) in self._scaling_methods.items(): + results[case] = self.run_case(meth, **margs) + + return results + + def report_scaling_profiles(self, stream=None): + """ + Run scaling profile workflow nad report results to a stream. + + Args: + stream: StringIO object to write result to (default=stdout) + + Returns: + None + """ + results = self.profile_scaling_methods() + + self.write_profile_report(results, stream) + + def write_profile_report(self, results: dict, stream=None): + """ + Write a report on the comparison of scaling methods to a stream based on + existing results dict. + + Args: + results: dict containing results from a scaler profiling run + stream: StringIO object to write result to (default=stdout) + + Returns: + None + """ + # If stream is None, default to stdout + if stream is None: + stream = sys.stdout + + # Get length of longest method name for padding + max_str = max([len(i) for i in results.keys()]) + if max_str < len("Scaling Method"): + max_str = len("Scaling Method") + # Length of each stats field is 22 characters, plus 4 for column dividers + # Max line length is thus longest string name + 2 columns of 22+4 characters + max_line = max_str + 26 * 2 + + # Write header rows + stream.write(f"\n{'='*max_line}\n") + stream.write("Scaling Profile Report\n") + stream.write(f"{'-' * max_line}\n") + # Pad User Scaling columns to full column width (22) + stream.write( + f"{'Scaling Method': <{max_str}} || {'User Scaling': <{22}} || Perfect Scaling\n" + ) + + # Iterate over keys in results and write summary for each scaling method + for k, v in results.items(): + if v["Manual"]["solved"]: + msolved = "Solved" + else: + msolved = "Failed" + # Pad iterations to 3 characters - hopefully we don;t see more than 999 iterations + miters = f"{v['Manual']['iterations']: <{3}}" + stream.write( + f"{k: <{max_str}} || {v['Manual']['condition_number']:.3E} | {msolved} {miters} " + ) + + if "Auto" in v.keys(): + if v["Auto"]["solved"]: + asolved = "Solved" + else: + asolved = "Failed" + # Pad iterations again + aiters = f"{v['Auto']['iterations']: <{3}}" + stream.write( + f"|| {v['Auto']['condition_number']:.3E} | {asolved} {aiters}\n" + ) + else: + # Add training column divider but no auto column + stream.write("||\n") + + # Write footer row + stream.write(f"{'=' * max_line}\n") + + def run_case(self, scaling_method, **kwargs): + """ + Run case for a given scaling method with both perfect and imperfect scaling information. + + Args: + scaling_method: constraint scaling method to be tested + kwargs: keyword argument to be passed to scaling method + + Returns: + dict summarising results of scaling case + """ + block_based = kwargs.pop("block_based", False) + + # Imperfect information + manual = self._run_scenario( + scaling_method, perfect=False, block_based=block_based, **kwargs + ) + + # Perfect information + perfect = self._run_scenario( + scaling_method, perfect=True, block_based=block_based, **kwargs + ) + + return {"Manual": manual, "Auto": perfect} + + def _scale_vars(self, model, perfect=False): + """ + Apply variable scaling to model. + """ + if perfect: + scaler = AutoScaler() + scaler.scale_variables_by_magnitude(model) + return model + + self._user_scaling(model) + + return model + + def _apply_scaling(self, model, scaling_method, block_based, **kwargs): + """ + Collect stats for a given scaling method. + """ + if scaling_method is not None: + if block_based: + scaling_method(model, **kwargs) + else: + for c in model.component_data_objects( + ctype=Constraint, descend_into=True + ): + scaling_method(c, **kwargs) + + def _run_scenario(self, scaling_method, block_based, perfect, **kwargs): + """ + Run a single scenario + """ + m = self._build_model() + self._scale_vars(m, perfect=perfect) + self._apply_scaling(m, scaling_method, block_based=block_based, **kwargs) + + cond = jacobian_cond(m, scaled=True) + stats = self._solved_perturbed_state(m) + + return {"condition_number": cond, **stats} + + def _solved_perturbed_state(self, model): + """ + Run re-solve tests if perturb_state callback provided. + """ + if self._perturb_state is None: + return {} + + self._perturb_state(model) + + TempfileManager.push() + tempfile = TempfileManager.create_tempfile(suffix="ipopt_out", text=True) + opts = {"output_file": tempfile} + + try: + status_obj = self._solver.solve(model, options=opts, tee=True) + solved = True + if not check_optimal_termination(status_obj): + solved = False + + iters, iters_in_restoration, iters_w_regularization = ( + self._parse_ipopt_output(tempfile) + ) + + return { + "solved": solved, + "termination_message": status_obj.solver.termination_message, + "iterations": iters, + "iters_in_restoration": iters_in_restoration, + "iters_w_regularization": iters_w_regularization, + } + except RuntimeError as err: + # Likely a critical solver failure + return { + "solved": False, + "termination_message": str(err), + "iterations": -1, + "iters_in_restoration": -1, + "iters_w_regularization": -1, + } + + def _parse_ipopt_output(self, ipopt_file): + """ + Parse an IPOPT output file and return: + + * number of iterations + * time in IPOPT + + Returns + ------- + Returns a tuple with (solve status object, bool (solve successful or + not), number of iters, solve time) + """ + # ToDO: Check for final iteration with regularization or restoration + + iters = 0 + iters_in_restoration = 0 + iters_w_regularization = 0 + # parse the output file to get the iteration count, solver times, etc. + with open(ipopt_file, "r") as f: + parseline = False + for line in f: + if line.startswith("iter"): + # This marks the start of the iteration logging, set parseline True + parseline = True + elif line.startswith("Number of Iterations....:"): + # Marks end of iteration logging, set parseline False + parseline = False + tokens = line.split() + iters = int(tokens[3]) + elif parseline: + # Line contains details of an iteration, look for restoration or regularization + tokens = line.split() + try: + if not tokens[6] == "-": + # Iteration with regularization + iters_w_regularization += 1 + if tokens[0].endswith("r"): + # Iteration in restoration + iters_in_restoration += 1 + except IndexError: + # Blank line at end of iteration list, so assume we hit this + pass + + return iters, iters_in_restoration, iters_w_regularization diff --git a/idaes/core/scaling/scaling_base.py b/idaes/core/scaling/scaling_base.py new file mode 100644 index 0000000000..d15f1e0fd0 --- /dev/null +++ b/idaes/core/scaling/scaling_base.py @@ -0,0 +1,231 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +Base class for Scalers + +Author: Andrew Lee +""" + +from pyomo.common.config import ( + Bool, + ConfigDict, + ConfigValue, + String_ConfigFormatter, +) +from pyomo.core.base.constraint import ConstraintData +from pyomo.core.base.var import VarData + +from idaes.core.scaling.util import get_scaling_factor, set_scaling_factor +import idaes.logger as idaeslog + +# Set up logger +_log = idaeslog.getLogger(__name__) + + +# Common base ConfigBlock for all Scalers +CONFIG = ConfigDict() +CONFIG.declare( + "zero_tolerance", + ConfigValue( + default=1e-12, + domain=float, + description="Value at which a variable will be considered equal to zero for scaling.", + ), +) +CONFIG.declare( + "max_variable_scaling_factor", + ConfigValue( + default=1e10, + domain=float, + description="Maximum value for variable scaling factors.", + ), +) +CONFIG.declare( + "min_variable_scaling_factor", + ConfigValue( + default=1e-10, + domain=float, + description="Minimum value for variable scaling factors.", + ), +) +CONFIG.declare( + "max_constraint_scaling_factor", + ConfigValue( + default=1e10, + domain=float, + description="Maximum value for constraint scaling factors.", + ), +) +CONFIG.declare( + "min_constraint_scaling_factor", + ConfigValue( + default=1e-10, + domain=float, + description="Minimum value for constraint scaling factors.", + ), +) +CONFIG.declare( + "overwrite", + ConfigValue( + default=False, + domain=Bool, + description="Whether to overwrite existing scaling factors.", + ), +) + + +class ScalerBase: + """ + Base class for IDAES Scaler objects + + Contains a number of methods useful for scaling models. + + """ + + CONFIG = CONFIG() + + def __init__(self, **kwargs): + self.config = self.CONFIG(kwargs) + + def __init_subclass__(cls, **kwargs): + super().__init_subclass__(**kwargs) + + # Handle cases where a class doc string was not set + if cls.__doc__ is None: + cls.__doc__ = "" + + cls.__doc__ = cls.__doc__ + cls.CONFIG.generate_documentation( + format=String_ConfigFormatter( + block_start="%s\n", + block_end="", + item_start="%s\n", + item_body="%s", + item_end="\n", + ), + indent_spacing=4, + width=66, + ) + + def get_scaling_factor(self, component): + """ + Get scaling factor for component. + + Alias for get_scaling_factor utility function. + + Args: + component: component to get scaling factor for + + Returns: + float - scaling factor + + Raises: + TypeError if component is a Block + """ + return get_scaling_factor(component) + + def set_variable_scaling_factor( + self, variable, scaling_factor: float, overwrite: bool = None + ): + """ + Set scaling factor for variable. + + Scaling factor is limited by min_variable_scaling_factor and max_variable_scaling_factor. + + Args: + variable: VarData component to set scaling factor for. + scaling_factor: nominal scaling factor to apply. May be limited by max and min values. + overwrite: whether to overwrite existing scaling factor (if present). + Defaults to Scaler config setting. + + Returns: + None + + Raises: + TypeError if variable is not an instance of VarData + """ + if not isinstance(variable, VarData): + raise TypeError(f"{variable} is not a variable (or is indexed).") + self._set_scaling_factor( + component=variable, + component_type="variable", + scaling_factor=scaling_factor, + overwrite=overwrite, + ) + + def set_constraint_scaling_factor( + self, constraint, scaling_factor: float, overwrite: bool = None + ): + """ + Set scaling factor for constraint. + + Scaling factor is limited by min_constraint_scaling_factor and max_constraint_scaling_factor. + + Args: + constraint: ConstraintData component to set scaling factor for. + scaling_factor: nominal scaling factor to apply. May be limited by max and min values. + overwrite: whether to overwrite existing scaling factor (if present). + Defaults to Scaler config setting. + + Returns: + None + + Raises: + TypeError if constraint is not an instance of ConstraintData + """ + if not isinstance(constraint, ConstraintData): + raise TypeError(f"{constraint} is not a constraint (or is indexed).") + self._set_scaling_factor( + component=constraint, + component_type="constraint", + scaling_factor=scaling_factor, + overwrite=overwrite, + ) + + def _set_scaling_factor( + self, component, component_type, scaling_factor, overwrite=None + ): + """ + PRIVATE METHOD + + The purpose of this method is to apply the correct max and min limits to scaling factors + before setting them. Which set of limits to apply is determined solely by the component_type + argument, and no type checking is performed. + """ + # If overwrite not provided, use scaler config + # This allows developers to have some more control over when to overwrite or not + if overwrite is None: + overwrite = self.config.overwrite + + if component_type == "variable": + maxsf = self.config.max_variable_scaling_factor + minsf = self.config.min_variable_scaling_factor + elif component_type == "constraint": + maxsf = self.config.max_constraint_scaling_factor + minsf = self.config.min_constraint_scaling_factor + else: + raise ValueError("Invalid value for component_type.") + + if scaling_factor > maxsf: + _log.debug( + f"Scaling factor for {component.name} limited by maximum value " + f"(max_sf: {maxsf} < sf: {scaling_factor})" + ) + scaling_factor = maxsf + elif scaling_factor < minsf: + _log.debug( + f"Scaling factor for {component.name} limited by minimum value " + f"(min_sf: {minsf} > sf: {scaling_factor})" + ) + scaling_factor = minsf + + set_scaling_factor(component, scaling_factor, overwrite=overwrite) diff --git a/idaes/core/scaling/tests/__init__.py b/idaes/core/scaling/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/idaes/core/scaling/tests/gibbs_solution.json b/idaes/core/scaling/tests/gibbs_solution.json new file mode 100644 index 0000000000..412825b305 --- /dev/null +++ b/idaes/core/scaling/tests/gibbs_solution.json @@ -0,0 +1,725 @@ +{ + "__metadata__": { + "format_version": 4, + "date": "2024-06-28", + "time": "11:24:52.903115", + "other": {}, + "__performance__": { + "n_components": 167, + "etime_make_dict": 0.0009469985961914062 + } + }, + "unknown": { + "__type__": "", + "data": { + "None": { + "__type__": "", + "__pyomo_components__": { + "fs": { + "__type__": "", + "data": { + "None": { + "__type__": "", + "__pyomo_components__": { + "properties": { + "__type__": "", + "data": { + "None": { + "__type__": "", + "__pyomo_components__": { + "Vap": { + "__type__": "", + "data": { + "None": { + "__type__": "" + } + } + }, + "H2": { + "__type__": "", + "data": { + "None": { + "__type__": "" + } + } + }, + "N2": { + "__type__": "", + "data": { + "None": { + "__type__": "" + } + } + }, + "O2": { + "__type__": "", + "data": { + "None": { + "__type__": "" + } + } + }, + "CH4": { + "__type__": "", + "data": { + "None": { + "__type__": "" + } + } + }, + "CO": { + "__type__": "", + "data": { + "None": { + "__type__": "" + } + } + }, + "CO2": { + "__type__": "", + "data": { + "None": { + "__type__": "" + } + } + }, + "H2O": { + "__type__": "", + "data": { + "None": { + "__type__": "" + } + } + }, + "NH3": { + "__type__": "", + "data": { + "None": { + "__type__": "" + } + } + } + } + } + } + }, + "unit": { + "__type__": "", + "data": { + "None": { + "__type__": "", + "__pyomo_components__": { + "control_volume": { + "__type__": "", + "data": { + "None": { + "__type__": "", + "__pyomo_components__": { + "properties_in": { + "__type__": "", + "data": { + "0.0": { + "__type__": "", + "__pyomo_components__": { + "flow_mol": { + "__type__": "", + "data": { + "None": { + "__type__": "", + "value": 230.0 + } + } + }, + "mole_frac_comp": { + "__type__": "", + "data": { + "'H2'": { + "__type__": "", + "value": 0.0435 + }, + "'N2'": { + "__type__": "", + "value": 0.6522 + }, + "'O2'": { + "__type__": "", + "value": 0.1739 + }, + "'CH4'": { + "__type__": "", + "value": 0.1304 + }, + "'CO'": { + "__type__": "", + "value": 1e-05 + }, + "'CO2'": { + "__type__": "", + "value": 1e-05 + }, + "'H2O'": { + "__type__": "", + "value": 1e-05 + }, + "'NH3'": { + "__type__": "", + "value": 1e-05 + } + } + }, + "pressure": { + "__type__": "", + "data": { + "None": { + "__type__": "", + "value": 101325.0 + } + } + }, + "temperature": { + "__type__": "", + "data": { + "None": { + "__type__": "", + "value": 1500.0 + } + } + }, + "flow_mol_phase": { + "__type__": "", + "data": { + "'Vap'": { + "__type__": "", + "value": 230.0 + } + } + }, + "mole_frac_phase_comp": { + "__type__": "", + "data": { + "('Vap', 'H2')": { + "__type__": "", + "value": 0.0435 + }, + "('Vap', 'N2')": { + "__type__": "", + "value": 0.6522 + }, + "('Vap', 'O2')": { + "__type__": "", + "value": 0.1739 + }, + "('Vap', 'CH4')": { + "__type__": "", + "value": 0.1304 + }, + "('Vap', 'CO')": { + "__type__": "", + "value": 1e-05 + }, + "('Vap', 'CO2')": { + "__type__": "", + "value": 1e-05 + }, + "('Vap', 'H2O')": { + "__type__": "", + "value": 1e-05 + }, + "('Vap', 'NH3')": { + "__type__": "", + "value": 1e-05 + } + } + }, + "enth_mol_phase": { + "__type__": "", + "data": { + "'Vap'": { + "__type__": "", + "value": -9735.757799999998 + } + } + }, + "enth_mol_phase_comp": { + "__type__": "", + "data": { + "('Vap', 'H2')": { + "__type__": "", + "value": 0.0 + }, + "('Vap', 'N2')": { + "__type__": "", + "value": 0.0 + }, + "('Vap', 'O2')": { + "__type__": "", + "value": 0.0 + }, + "('Vap', 'CH4')": { + "__type__": "", + "value": -74600.0 + }, + "('Vap', 'CO')": { + "__type__": "", + "value": -110530.0 + }, + "('Vap', 'CO2')": { + "__type__": "", + "value": -393520.0 + }, + "('Vap', 'H2O')": { + "__type__": "", + "value": -241830.0 + }, + "('Vap', 'NH3')": { + "__type__": "", + "value": -45900.0 + } + } + } + } + } + } + }, + "properties_out": { + "__type__": "", + "data": { + "0.0": { + "__type__": "", + "__pyomo_components__": { + "flow_mol": { + "__type__": "", + "data": { + "None": { + "__type__": "", + "value": 250.0586841391117 + } + } + }, + "mole_frac_comp": { + "__type__": "", + "data": { + "'H2'": { + "__type__": "", + "value": 0.10300593038593893 + }, + "'N2'": { + "__type__": "", + "value": 0.5998874968259134 + }, + "'O2'": { + "__type__": "", + "value": 0.00024125524310241033 + }, + "'CH4'": { + "__type__": "", + "value": 1.0211647371506291e-11 + }, + "'CO'": { + "__type__": "", + "value": 0.09736779533740232 + }, + "'CO2'": { + "__type__": "", + "value": 0.02259044606787874 + }, + "'H2O'": { + "__type__": "", + "value": 0.17690650096691196 + }, + "'NH3'": { + "__type__": "", + "value": 5.75162640685755e-07 + } + } + }, + "pressure": { + "__type__": "", + "data": { + "None": { + "__type__": "", + "value": 101325.0 + } + } + }, + "temperature": { + "__type__": "", + "data": { + "None": { + "__type__": "", + "value": 2844.38 + } + } + }, + "flow_mol_phase": { + "__type__": "", + "data": { + "'Vap'": { + "__type__": "", + "value": 250.0586841391117 + } + } + }, + "mole_frac_phase_comp": { + "__type__": "", + "data": { + "('Vap', 'H2')": { + "__type__": "", + "value": 0.10300593038593893 + }, + "('Vap', 'N2')": { + "__type__": "", + "value": 0.5998874968259134 + }, + "('Vap', 'O2')": { + "__type__": "", + "value": 0.00024125524310241033 + }, + "('Vap', 'CH4')": { + "__type__": "", + "value": 1.0211647371506291e-11 + }, + "('Vap', 'CO')": { + "__type__": "", + "value": 0.09736779533740232 + }, + "('Vap', 'CO2')": { + "__type__": "", + "value": 0.02259044606787874 + }, + "('Vap', 'H2O')": { + "__type__": "", + "value": 0.17690650096691196 + }, + "('Vap', 'NH3')": { + "__type__": "", + "value": 5.75162640685755e-07 + } + } + }, + "enth_mol_phase": { + "__type__": "", + "data": { + "'Vap'": { + "__type__": "", + "value": -38763.85833692704 + } + } + }, + "enth_mol_phase_comp": { + "__type__": "", + "data": { + "('Vap', 'H2')": { + "__type__": "", + "value": 88598.50033149906 + }, + "('Vap', 'N2')": { + "__type__": "", + "value": 1328.0818641205042 + }, + "('Vap', 'O2')": { + "__type__": "", + "value": -8728.364736274816 + }, + "('Vap', 'CH4')": { + "__type__": "", + "value": 10951.275326851828 + }, + "('Vap', 'CO')": { + "__type__": "", + "value": -116043.17471597753 + }, + "('Vap', 'CO2')": { + "__type__": "", + "value": -260197.0660256414 + }, + "('Vap', 'H2O')": { + "__type__": "", + "value": -178104.1583772846 + }, + "('Vap', 'NH3')": { + "__type__": "", + "value": -6774.557289033196 + } + } + }, + "gibbs_mol_phase_comp": { + "__type__": "", + "data": { + "('Vap', 'H2')": { + "__type__": "", + "value": -449349.89708332205 + }, + "('Vap', 'N2')": { + "__type__": "", + "value": -565714.3063137117 + }, + "('Vap', 'O2')": { + "__type__": "", + "value": -787130.7356067412 + }, + "('Vap', 'CH4')": { + "__type__": "", + "value": -1240363.0290605212 + }, + "('Vap', 'CO')": { + "__type__": "", + "value": -735228.6026972476 + }, + "('Vap', 'CO2')": { + "__type__": "", + "value": -1128793.9705006182 + }, + "('Vap', 'H2O')": { + "__type__": "", + "value": -842915.2648866929 + }, + "('Vap', 'NH3')": { + "__type__": "", + "value": -956881.9987818388 + } + } + }, + "entr_mol_phase_comp": { + "__type__": "", + "data": { + "('Vap', 'H2')": { + "__type__": "", + "value": 189.1267683694939 + }, + "('Vap', 'N2')": { + "__type__": "", + "value": 199.35535623855893 + }, + "('Vap', 'O2')": { + "__type__": "", + "value": 273.6632836929195 + }, + "('Vap', 'CH4')": { + "__type__": "", + "value": 439.9251521904151 + }, + "('Vap', 'CO')": { + "__type__": "", + "value": 217.68730900275983 + }, + "('Vap', 'CO2')": { + "__type__": "", + "value": 305.3730178369193 + }, + "('Vap', 'H2O')": { + "__type__": "", + "value": 233.7279500310817 + }, + "('Vap', 'NH3')": { + "__type__": "", + "value": 334.02971526055074 + } + } + } + } + } + } + }, + "heat": { + "__type__": "", + "data": { + "0.0": { + "__type__": "", + "value": -7454015.1138869105 + } + } + }, + "deltaP": { + "__type__": "", + "data": { + "0.0": { + "__type__": "", + "value": 0 + } + } + } + } + } + } + }, + "_flow_mol_inlet_ref": { + "__type__": "", + "data": { + "0.0": { + "__type__": "", + "value": 230.0 + } + } + }, + "_mole_frac_comp_inlet_ref": { + "__type__": "", + "data": { + "(0.0, 'H2')": { + "__type__": "", + "value": 0.0435 + }, + "(0.0, 'N2')": { + "__type__": "", + "value": 0.6522 + }, + "(0.0, 'O2')": { + "__type__": "", + "value": 0.1739 + }, + "(0.0, 'CH4')": { + "__type__": "", + "value": 0.1304 + }, + "(0.0, 'CO')": { + "__type__": "", + "value": 1e-05 + }, + "(0.0, 'CO2')": { + "__type__": "", + "value": 1e-05 + }, + "(0.0, 'H2O')": { + "__type__": "", + "value": 1e-05 + }, + "(0.0, 'NH3')": { + "__type__": "", + "value": 1e-05 + } + } + }, + "_temperature_inlet_ref": { + "__type__": "", + "data": { + "0.0": { + "__type__": "", + "value": 1500.0 + } + } + }, + "_pressure_inlet_ref": { + "__type__": "", + "data": { + "0.0": { + "__type__": "", + "value": 101325.0 + } + } + }, + "_flow_mol_outlet_ref": { + "__type__": "", + "data": { + "0.0": { + "__type__": "", + "value": 250.0586841391117 + } + } + }, + "_mole_frac_comp_outlet_ref": { + "__type__": "", + "data": { + "(0.0, 'H2')": { + "__type__": "", + "value": 0.10300593038593893 + }, + "(0.0, 'N2')": { + "__type__": "", + "value": 0.5998874968259134 + }, + "(0.0, 'O2')": { + "__type__": "", + "value": 0.00024125524310241033 + }, + "(0.0, 'CH4')": { + "__type__": "", + "value": 1.0211647371506291e-11 + }, + "(0.0, 'CO')": { + "__type__": "", + "value": 0.09736779533740232 + }, + "(0.0, 'CO2')": { + "__type__": "", + "value": 0.02259044606787874 + }, + "(0.0, 'H2O')": { + "__type__": "", + "value": 0.17690650096691196 + }, + "(0.0, 'NH3')": { + "__type__": "", + "value": 5.75162640685755e-07 + } + } + }, + "_temperature_outlet_ref": { + "__type__": "", + "data": { + "0.0": { + "__type__": "", + "value": 2844.38 + } + } + }, + "_pressure_outlet_ref": { + "__type__": "", + "data": { + "0.0": { + "__type__": "", + "value": 101325.0 + } + } + }, + "lagrange_mult": { + "__type__": "", + "data": { + "(0.0, 'H')": { + "__type__": "", + "value": 224674.94854166103 + }, + "(0.0, 'N')": { + "__type__": "", + "value": 282857.15315685584 + }, + "(0.0, 'O')": { + "__type__": "", + "value": 393565.3678033706 + }, + "(0.0, 'C')": { + "__type__": "", + "value": 341663.234893877 + } + } + }, + "heat_duty": { + "__type__": "", + "data": { + "0.0": { + "__type__": "", + "value": -7454015.1138869105 + } + } + }, + "deltaP": { + "__type__": "", + "data": { + "0.0": { + "__type__": "", + "value": 0 + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } +} \ No newline at end of file diff --git a/idaes/core/scaling/tests/load_scaling_factors.json b/idaes/core/scaling/tests/load_scaling_factors.json new file mode 100644 index 0000000000..0a6e463687 --- /dev/null +++ b/idaes/core/scaling/tests/load_scaling_factors.json @@ -0,0 +1,29 @@ +{ + "v[1]": 50, + "c[1]": 50, + "v[2]": 100, + "c[2]": 250, + "v[3]": 150, + "c[3]": 1250, + "v[4]": 200, + "c[4]": 6250, + "subblock_suffixes": { + "b[1]": { + "v2": 100, + "subblock_suffixes": {} + }, + "b[2]": { + "v2": 100, + "subblock_suffixes": {} + }, + "b[3]": { + "v2": 100, + "subblock_suffixes": {} + }, + "b[4]": { + "v2": 100, + "subblock_suffixes": {} + } + }, + "block_name": "unknown" +} \ No newline at end of file diff --git a/idaes/core/scaling/tests/test_autoscaling.py b/idaes/core/scaling/tests/test_autoscaling.py new file mode 100644 index 0000000000..d782b480bf --- /dev/null +++ b/idaes/core/scaling/tests/test_autoscaling.py @@ -0,0 +1,402 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +Tests for autoscalers. + +Author: Andrew Lee +""" +from math import sqrt +import pytest + +from pyomo.environ import Block, ConcreteModel, Constraint, Objective, Set, Suffix, Var + +from idaes.core.scaling import AutoScaler + + +@pytest.fixture +def model(): + m = ConcreteModel() + m.s = Set(initialize=[1, 2, 3, 4]) + + m.b = Block(m.s) + + m.v1 = Var(initialize=2) + m.v2 = Var(m.s, initialize=10) + + for bd in m.b.values(): + bd.v3 = Var(initialize=10) + bd.v4 = Var(initialize=100) + + @m.Constraint(m.s) + def c1(blk, i): + return blk.v1**i == blk.v2[i] + + @m.Constraint(m.s) + def c2(blk, i): + return blk.v2[i] == blk.b[i].v3 + + for bd in m.b.values(): + + @bd.Constraint() + def c3(blk): + return blk.v3**2 == blk.v4 + + m.o = Objective(expr=m.v1) + + return m + + +class TestAutoscaleVarMagnitude: + @pytest.mark.unit + def test_var_data(self, model): + scaler = AutoScaler() + scaler.scale_variables_by_magnitude(model.v1) + scaler.scale_variables_by_magnitude(model.v2[1]) + + assert model.scaling_factor[model.v1] == pytest.approx(1 / 2, rel=1e-8) + assert model.scaling_factor[model.v2[1]] == pytest.approx(1 / 10, rel=1e-8) + assert len(model.scaling_factor) == 2 + + @pytest.mark.unit + def test_var_data_no_value(self): + model = ConcreteModel() + model.v1 = Var() + + scaler = AutoScaler() + scaler.scale_variables_by_magnitude(model.v1) + + assert model.scaling_factor[model.v1] == 1 + + @pytest.mark.unit + def test_indexed_var(self, model): + scaler = AutoScaler() + scaler.scale_variables_by_magnitude(model.v2) + + for i in model.s: + assert model.scaling_factor[model.v2[i]] == pytest.approx(1 / 10, rel=1e-8) + assert len(model.scaling_factor) == 4 + + @pytest.mark.unit + def test_block_data(self, model): + scaler = AutoScaler() + scaler.scale_variables_by_magnitude(model.b[1]) + + assert model.b[1].scaling_factor[model.b[1].v3] == pytest.approx( + 1 / 10, rel=1e-8 + ) + assert model.b[1].scaling_factor[model.b[1].v4] == pytest.approx( + 1 / 100, rel=1e-8 + ) + assert len(model.b[1].scaling_factor) == 2 + + @pytest.mark.unit + def test_indexed_block(self, model): + scaler = AutoScaler() + scaler.scale_variables_by_magnitude(model.b) + + for bd in model.b.values(): + sfx = bd.scaling_factor + assert sfx[bd.v3] == pytest.approx(1 / 10, rel=1e-8) + assert sfx[bd.v4] == pytest.approx(1 / 100, rel=1e-8) + assert len(sfx) == 2 + + @pytest.mark.unit + def test_nested_blocks_descend(self, model): + scaler = AutoScaler() + scaler.scale_variables_by_magnitude(model) + + assert model.scaling_factor[model.v1] == pytest.approx(1 / 2, rel=1e-8) + for i in model.s: + assert model.scaling_factor[model.v2[i]] == pytest.approx(1 / 10, rel=1e-8) + assert len(model.scaling_factor) == 5 + + for bd in model.b.values(): + sfx = bd.scaling_factor + assert sfx[bd.v3] == pytest.approx(1 / 10, rel=1e-8) + assert sfx[bd.v4] == pytest.approx(1 / 100, rel=1e-8) + assert len(sfx) == 2 + + @pytest.mark.unit + def test_nested_blocks_no_descend(self, model): + scaler = AutoScaler() + scaler.scale_variables_by_magnitude(model, descend_into=False) + + assert model.scaling_factor[model.v1] == pytest.approx(1 / 2, rel=1e-8) + for i in model.s: + assert model.scaling_factor[model.v2[i]] == pytest.approx(1 / 10, rel=1e-8) + assert len(model.scaling_factor) == 5 + + for bd in model.b.values(): + assert not hasattr(bd, "scaling_factor") + + @pytest.mark.unit + def test_no_overwrite(self, model): + # Add some scaling factors to ensure they are not overwritten + model.scaling_factor = Suffix(direction=Suffix.EXPORT) + model.scaling_factor[model.v1] = 20 + + model.b[1].scaling_factor = Suffix(direction=Suffix.EXPORT) + model.b[1].scaling_factor[model.b[1].v3] = 20 + + scaler = AutoScaler(overwrite=False) + scaler.scale_variables_by_magnitude(model) + + assert model.scaling_factor[model.v1] == 20 + for i in model.s: + assert model.scaling_factor[model.v2[i]] == pytest.approx(1 / 10, rel=1e-8) + assert len(model.scaling_factor) == 5 + + for bd in model.b.values(): + sfx = bd.scaling_factor + if bd is model.b[1]: + assert sfx[bd.v3] == 20 + else: + assert sfx[bd.v3] == pytest.approx(1 / 10, rel=1e-8) + assert sfx[bd.v4] == pytest.approx(1 / 100, rel=1e-8) + assert len(sfx) == 2 + + @pytest.mark.unit + def test_max_and_min(self, model): + scaler = AutoScaler( + max_variable_scaling_factor=1 / 10, min_variable_scaling_factor=1 / 50 + ) + scaler.scale_variables_by_magnitude(model) + + assert model.scaling_factor[model.v1] == pytest.approx(1 / 10, rel=1e-8) + for i in model.s: + assert model.scaling_factor[model.v2[i]] == pytest.approx(1 / 10, rel=1e-8) + assert len(model.scaling_factor) == 5 + + for bd in model.b.values(): + sfx = bd.scaling_factor + assert sfx[bd.v3] == pytest.approx(1 / 10, rel=1e-8) + assert sfx[bd.v4] == pytest.approx(1 / 50, rel=1e-8) + assert len(sfx) == 2 + + @pytest.mark.unit + def test_var_fixed(self, model): + model.v1.fix() + scaler = AutoScaler() + scaler.scale_variables_by_magnitude(model.v1) + scaler.scale_variables_by_magnitude(model.v2[1]) + + assert model.scaling_factor[model.v1] == pytest.approx(1 / 2, rel=1e-8) + assert model.scaling_factor[model.v2[1]] == pytest.approx(1 / 10, rel=1e-8) + assert len(model.scaling_factor) == 2 + + @pytest.mark.unit + def test_not_block_or_var(self, model): + scaler = AutoScaler() + + with pytest.raises(TypeError, match="c1 is not a block or variable."): + scaler.scale_variables_by_magnitude(model.c1) + + +class TestConstraintsByNorm: + @pytest.mark.unit + def test_not_block_or_var(self, model): + scaler = AutoScaler() + + with pytest.raises(TypeError, match="v1 is not a block or constraint."): + scaler.scale_constraints_by_jacobian_norm(model.v1) + + @pytest.mark.unit + def test_block_data_L2(self, model): + scaler = AutoScaler() + + scaler.scale_constraints_by_jacobian_norm(model.b[1]) + + assert model.b[1].scaling_factor[model.b[1].c3] == pytest.approx( + 1 / sqrt(20**2 + 1**2), rel=1e-8 + ) + assert len(model.b[1].scaling_factor) == 1 + + @pytest.mark.unit + def test_block_data_L2_block_data(self, model): + scaler = AutoScaler() + + scaler.scale_constraints_by_jacobian_norm(model.b) + + for bd in model.b.values(): + assert bd.scaling_factor[bd.c3] == pytest.approx( + 1 / sqrt(20**2 + 1**2), rel=1e-8 + ) + assert len(bd.scaling_factor) == 1 + + @pytest.mark.unit + def test_nested_blocks_L2(self, model): + scaler = AutoScaler() + + scaler.scale_constraints_by_jacobian_norm(model) + + for i in model.s: + assert model.scaling_factor[model.c1[i]] == pytest.approx( + 1 / sqrt((i * 2 ** (i - 1)) ** 2 + 1**2), rel=1e-8 + ) + assert model.scaling_factor[model.c2[i]] == pytest.approx( + 1 / sqrt(2), rel=1e-8 + ) + assert len(model.scaling_factor) == 8 + + for bd in model.b.values(): + assert bd.scaling_factor[bd.c3] == pytest.approx( + 1 / sqrt(20**2 + 1**2), rel=1e-8 + ) + assert len(bd.scaling_factor) == 1 + + @pytest.mark.unit + def test_nested_blocks_L2_no_descent(self, model): + scaler = AutoScaler() + + scaler.scale_constraints_by_jacobian_norm(model, descend_into=False) + + for i in model.s: + assert model.scaling_factor[model.c1[i]] == pytest.approx( + 1 / sqrt((i * 2 ** (i - 1)) ** 2 + 1**2), rel=1e-8 + ) + assert model.scaling_factor[model.c2[i]] == pytest.approx( + 1 / sqrt(2), rel=1e-8 + ) + assert len(model.scaling_factor) == 8 + + for bd in model.b.values(): + assert not hasattr(bd, "scaling_factor") + + @pytest.mark.unit + def test_constraint_data_L2(self, model): + scaler = AutoScaler() + + scaler.scale_constraints_by_jacobian_norm(model.c1[1]) + + assert model.scaling_factor[model.c1[1]] == pytest.approx( + 1 / sqrt((1 * 2 ** (1 - 1)) ** 2 + 1**2), rel=1e-8 + ) + assert len(model.scaling_factor) == 1 + + @pytest.mark.unit + def test_indexed_constraint_L2(self, model): + scaler = AutoScaler() + + scaler.scale_constraints_by_jacobian_norm(model.c1) + + for i in model.s: + assert model.scaling_factor[model.c1[i]] == pytest.approx( + 1 / sqrt((i * 2 ** (i - 1)) ** 2 + 1**2), rel=1e-8 + ) + assert len(model.scaling_factor) == 4 + + @pytest.mark.unit + def test_block_data_L1(self, model): + scaler = AutoScaler() + + scaler.scale_constraints_by_jacobian_norm(model.b[1], norm=1) + + assert model.b[1].scaling_factor[model.b[1].c3] == pytest.approx( + 1 / 21, rel=1e-8 + ) + assert len(model.b[1].scaling_factor) == 1 + + @pytest.mark.unit + def test_nested_blocks_L1(self, model): + scaler = AutoScaler() + + scaler.scale_constraints_by_jacobian_norm(model, norm=1) + + for i in model.s: + assert model.scaling_factor[model.c1[i]] == pytest.approx( + 1 / ((i * 2 ** (i - 1)) + 1), rel=1e-8 + ) + assert model.scaling_factor[model.c2[i]] == pytest.approx(1 / 2, rel=1e-8) + assert len(model.scaling_factor) == 8 + + for bd in model.b.values(): + assert bd.scaling_factor[bd.c3] == pytest.approx(1 / 21, rel=1e-8) + assert len(bd.scaling_factor) == 1 + + @pytest.mark.unit + def test_constraint_data_L1(self, model): + scaler = AutoScaler() + + scaler.scale_constraints_by_jacobian_norm(model.c1[1], norm=1) + + assert model.scaling_factor[model.c1[1]] == pytest.approx( + 1 / ((1 * 2 ** (1 - 1)) + 1), rel=1e-8 + ) + assert len(model.scaling_factor) == 1 + + @pytest.mark.unit + def test_indexed_constraint_L1(self, model): + scaler = AutoScaler() + + scaler.scale_constraints_by_jacobian_norm(model.c1, norm=1) + + for i in model.s: + assert model.scaling_factor[model.c1[i]] == pytest.approx( + 1 / ((i * 2 ** (i - 1)) + 1), rel=1e-8 + ) + assert len(model.scaling_factor) == 4 + + @pytest.mark.unit + def test_nested_blocks_L1_no_overwrite(self, model): + # Add some scaling factors to ensure they are not overwritten + model.scaling_factor = Suffix(direction=Suffix.EXPORT) + model.scaling_factor[model.c1[1]] = 20 + + model.b[1].scaling_factor = Suffix(direction=Suffix.EXPORT) + model.b[1].scaling_factor[model.b[1].c3] = 20 + + scaler = AutoScaler() + + scaler.scale_constraints_by_jacobian_norm(model, norm=1) + + for i in model.s: + if i == 1: + assert model.scaling_factor[model.c1[i]] == 20 + else: + assert model.scaling_factor[model.c1[i]] == pytest.approx( + 1 / ((i * 2 ** (i - 1)) + 1), rel=1e-8 + ) + assert model.scaling_factor[model.c2[i]] == pytest.approx(1 / 2, rel=1e-8) + assert len(model.scaling_factor) == 8 + + for k, bd in model.b.items(): + if k == 1: + assert bd.scaling_factor[bd.c3] == 20 + else: + assert bd.scaling_factor[bd.c3] == pytest.approx(1 / 21, rel=1e-8) + assert len(bd.scaling_factor) == 1 + + +class TestAutoScaleModel: + @pytest.mark.unit + def test_scale_model_default(self, model): + scaler = AutoScaler() + + scaler.scale_model(model) + + c1_sf = { + "c1[1]": 0.0980580676, + "c1[2]": 0.0780868809, + "c1[3]": 0.0384615385, + "c1[4]": 0.0154376880, + } + + for k, v in model.scaling_factor.items(): + if str(k).startswith("v2"): + assert v == pytest.approx(0.1, rel=1e-8) + elif str(k).startswith("v1"): + assert v == pytest.approx(0.5, rel=1e-8) + elif str(k).startswith("c2"): + assert v == pytest.approx(0.5**0.5 * 0.1, rel=1e-8) + elif str(k).startswith("c1"): + assert v == pytest.approx(c1_sf[str(k)], rel=1e-8) diff --git a/idaes/core/scaling/tests/test_custom_scaler_base.py b/idaes/core/scaling/tests/test_custom_scaler_base.py new file mode 100644 index 0000000000..e3e9637807 --- /dev/null +++ b/idaes/core/scaling/tests/test_custom_scaler_base.py @@ -0,0 +1,710 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +Tests for CustomScalerBase. + +Author: Andrew Lee +""" +import pytest +import re + +from pyomo.environ import ( + Block, + ComponentMap, + ConcreteModel, + Constraint, + Set, + Suffix, + units, + value, + Var, +) +from pyomo.common.config import ConfigDict + +from idaes.core.scaling.custom_scaler_base import ( + CustomScalerBase, + ConstraintScalingScheme, +) +from idaes.core.util.constants import Constants +from idaes.core.util.testing import PhysicalParameterTestBlock +import idaes.logger as idaeslog + + +class DummyScaler(CustomScalerBase): + """ + Create some dummy methods that will record that they ran in lists + """ + + def variable_scaling_routine(self, model, overwrite, submodel_scalers): + # Create some lists to show that each method was run + model._verification = [] + model._subscalers = {} + model.overwrite = [] + + model._verification.append("variables") + model._subscalers["variables"] = submodel_scalers + model.overwrite.append(overwrite) + + def constraint_scaling_routine(self, model, overwrite, submodel_scalers): + model._verification.append("constraints") + model._subscalers["constraints"] = submodel_scalers + model.overwrite.append(overwrite) + + def fill_in_1(self, model): + model._verification.append("fill_in_1") + + def fill_in_2(self, model): + model._verification.append("fill_in_2") + + def dummy_method(self, model, overwrite): + model._dummy_scaler_test = overwrite + + +@pytest.fixture +def model(): + m = ConcreteModel() + + m.pressure = Var(units=units.Pa, bounds=(1e4, None)) + m.temperature = Var(units=units.K, bounds=(250, 350)) + m.volume_mol = Var(units=units.m**3 / units.mol, bounds=(None, 10)) + m.enth_mol = Var(units=units.J / units.mol) + + m.ideal_gas = Constraint( + expr=m.pressure * m.volume_mol == Constants.gas_constant * m.temperature + ) + m.enthalpy_eq = Constraint( + expr=4.81 * units.J / units.mol / units.K * m.temperature == m.enth_mol + ) + + m.scaling_factor = Suffix(direction=Suffix.EXPORT) + + return m + + +class TestCustomScalerBase: + @pytest.mark.unit + def test_init(self): + sb = CustomScalerBase() + + assert isinstance(sb.config, ConfigDict) + assert sb.config.zero_tolerance == 1e-12 + assert sb.config.max_variable_scaling_factor == 1e10 + assert sb.config.min_variable_scaling_factor == 1e-10 + assert sb.config.max_constraint_scaling_factor == 1e10 + assert sb.config.min_constraint_scaling_factor == 1e-10 + assert not sb.config.overwrite + + assert sb.default_scaling_factors == {} + assert sb.unit_scaling_factors == { + "Temperature": (units.K, 1e-2), + "Pressure": (units.Pa, 1e-5), + } + + @pytest.mark.unit + def test_variable_scaling_routine(self): + sb = CustomScalerBase() + + with pytest.raises( + NotImplementedError, + match="Custom Scaler has not implemented a variable_scaling_routine method.", + ): + sb.variable_scaling_routine("foo") + + @pytest.mark.unit + def test_constraint_scaling_routine(self): + sb = CustomScalerBase() + + with pytest.raises( + NotImplementedError, + match="Custom Scaler has not implemented a constraint_scaling_routine method.", + ): + sb.constraint_scaling_routine("foo") + + @pytest.mark.unit + def test_scale_model(self): + # Dummy object to hold testing data + model = ConcreteModel() + + sb = DummyScaler() + sb.scale_model( + model, + first_stage_fill_in=[sb.fill_in_1], + second_stage_fill_in=[sb.fill_in_2], + submodel_scalers={"foo", "bar"}, + ) + + # Check order methods were run in + assert model._verification == [ + "variables", + "fill_in_1", + "constraints", + "fill_in_2", + ] + + # Check that overwrite was passed on + model.overwrite == [False, False] + + # Check that submodel scalers were passed on to methods + assert model._subscalers == { + "variables": {"foo", "bar"}, + "constraints": {"foo", "bar"}, + } + + @pytest.mark.unit + def test_get_default_scaling_factor(self, model, caplog): + caplog.set_level(idaeslog.DEBUG, logger="idaes") + sb = CustomScalerBase() + + # No defaults defined yet + assert sb.get_default_scaling_factor(model.pressure) is None + assert "No default scaling factor found for pressure" in caplog.text + + # Set a default + sb.default_scaling_factors["pressure"] = 1e-4 + assert sb.get_default_scaling_factor(model.pressure) == 1e-4 + + @pytest.mark.unit + def test_get_default_scaling_factor_indexed(self, caplog): + caplog.set_level(idaeslog.DEBUG, logger="idaes") + m = ConcreteModel() + m.v = Var([1, 2, 3, 4]) + + sb = CustomScalerBase() + + # No defaults defined yet + assert sb.get_default_scaling_factor(m.v[1]) is None + assert "No default scaling factor found for v[1]" in caplog.text + + # Set a default for the indexed var + sb.default_scaling_factors["v"] = 1e-4 + assert sb.get_default_scaling_factor(m.v[1]) == 1e-4 + + # Set a default for the specific element + sb.default_scaling_factors["v[1]"] = 1e-8 + assert sb.get_default_scaling_factor(m.v[1]) == 1e-8 + + @pytest.mark.unit + def test_scale_variable_by_component(self, model, caplog): + caplog.set_level(idaeslog.DEBUG, logger="idaes") + sb = CustomScalerBase() + + # No scaling factors set + sb.scale_variable_by_component(model.pressure, model.temperature) + assert sb.get_scaling_factor(model.pressure) is None + assert ( + "Could not set scaling factor for pressure, no scaling factor set for temperature" + in caplog.text + ) + + # Set a scaling factor for temperature + model.scaling_factor = Suffix(direction=Suffix.EXPORT) + model.scaling_factor[model.temperature] = 1e-2 + + sb.scale_variable_by_component(model.pressure, model.temperature) + assert model.scaling_factor[model.pressure] == 1e-2 + + # Change temperature scaling and check for overwrite + model.scaling_factor[model.temperature] = 1e-3 + + sb.scale_variable_by_component( + model.pressure, model.temperature, overwrite=False + ) + assert model.scaling_factor[model.pressure] == 1e-2 + + sb.scale_variable_by_component( + model.pressure, model.temperature, overwrite=True + ) + assert model.scaling_factor[model.pressure] == 1e-3 + + @pytest.mark.unit + def test_scale_variable_by_bounds(self, model, caplog): + caplog.set_level(idaeslog.DEBUG, logger="idaes") + sb = CustomScalerBase() + + # Set a scaling factor for temperature to test overwrite + model.scaling_factor[model.temperature] = 1e-2 + + sb.scale_variable_by_bounds(model.temperature, overwrite=False) + assert model.scaling_factor[model.temperature] == 1e-2 + + # Both bounds + sb.scale_variable_by_bounds(model.temperature, overwrite=True) + assert model.scaling_factor[model.temperature] == 1 / 300 + + # Lower bound only + sb.scale_variable_by_bounds(model.pressure) + assert model.scaling_factor[model.pressure] == 1e-4 + + # Upper bound only + sb.scale_variable_by_bounds(model.volume_mol) + assert model.scaling_factor[model.volume_mol] == 0.1 + + # No bounds + sb.scale_variable_by_bounds(model.enth_mol) + assert ( + "No scaling factor set for enth_mol; variable has no bounds." in caplog.text + ) + assert model.enth_mol not in model.scaling_factor + + # Test case where bounds give a magnitude of 0 + model.pressure.setlb(-1e4) + model.pressure.setub(1e4) + sb.scale_variable_by_bounds(model.pressure, overwrite=True) + assert model.scaling_factor[model.pressure] == 1 + + @pytest.mark.unit + def test_scale_variable_by_default(self, model, caplog): + caplog.set_level(idaeslog.DEBUG, logger="idaes") + sb = CustomScalerBase() + + # No defaults defined yet + sb.scale_variable_by_default(model.pressure) + assert model.pressure not in model.scaling_factor + assert ( + "Could not set scaling factor for pressure, no default scaling factor set." + in caplog.text + ) + + # Set a default + sb.default_scaling_factors["pressure"] = 1e-4 + sb.scale_variable_by_default(model.pressure) + assert model.scaling_factor[model.pressure] == 1e-4 + + # Change default to check overwrite + sb.default_scaling_factors["pressure"] = 1e-5 + sb.scale_variable_by_default(model.pressure, overwrite=False) + assert model.scaling_factor[model.pressure] == 1e-4 + sb.scale_variable_by_default(model.pressure, overwrite=True) + assert model.scaling_factor[model.pressure] == 1e-5 + + @pytest.mark.unit + def test_scale_variable_by_units(self, model, caplog): + caplog.set_level(idaeslog.DEBUG, logger="idaes") + sb = CustomScalerBase() + + # Units in dict, no conversion required + sb.scale_variable_by_units(model.pressure) + assert model.scaling_factor[model.pressure] == 1e-5 + + # Equivalent units in dict, need conversion + model.p2 = Var(units=units.bar) + sb.scale_variable_by_units(model.p2) + assert model.scaling_factor[model.p2] == pytest.approx(1, rel=1e-8) + + # No match - no sf assigned + sb.scale_variable_by_units(model.enth_mol) + assert model.enth_mol not in model.scaling_factor + assert ( + "No scaling factor set for enth_mol; no match for units J/mol found " + "in self.unit_scaling_factors" in caplog.text + ) + + # Test for overwrite + model.scaling_factor[model.temperature] = 42 + sb.scale_variable_by_units(model.temperature, overwrite=False) + assert model.scaling_factor[model.temperature] == 42 + sb.scale_variable_by_units(model.temperature, overwrite=True) + assert model.scaling_factor[model.temperature] == 1e-2 + + @pytest.mark.unit + def test_scale_constraint_by_default(self, model, caplog): + caplog.set_level(idaeslog.DEBUG, logger="idaes") + sb = CustomScalerBase() + + # No defaults defined yet + sb.scale_constraint_by_default(model.ideal_gas) + assert model.ideal_gas not in model.scaling_factor + assert ( + "Could not set scaling factor for ideal_gas, no default scaling factor set." + in caplog.text + ) + + # Set a default + sb.default_scaling_factors["ideal_gas"] = 1e-3 + sb.scale_constraint_by_default(model.ideal_gas) + assert model.scaling_factor[model.ideal_gas] == 1e-3 + + # Change default to check overwrite + sb.default_scaling_factors["ideal_gas"] = 1e-5 + sb.scale_constraint_by_default(model.ideal_gas, overwrite=False) + assert model.scaling_factor[model.ideal_gas] == 1e-3 + sb.scale_constraint_by_default(model.ideal_gas, overwrite=True) + assert model.scaling_factor[model.ideal_gas] == 1e-5 + + @pytest.mark.unit + def test_get_expression_nominal_values(self, model): + sb = CustomScalerBase() + + # Set variable scaling factors for testing + model.scaling_factor[model.pressure] = 1e-5 + model.scaling_factor[model.temperature] = 1e-2 + model.scaling_factor[model.volume_mol] = 1e-1 + + nominal_values = sb.get_expression_nominal_values(model.ideal_gas.expr) + + # Nominal values will be (R*T, P*V) + assert nominal_values == [ + pytest.approx(831.446, rel=1e-5), + pytest.approx(1e6, rel=1e-5), + ] + + # Check redirection for ConstraintData objects + nominal_values = sb.get_expression_nominal_values(model.ideal_gas) + assert nominal_values == [ + pytest.approx(831.446, rel=1e-5), + pytest.approx(1e6, rel=1e-5), + ] + + @pytest.mark.unit + def test_scale_constraint_by_nominal_value_harmonic(self, model): + sb = CustomScalerBase() + + # Set variable scaling factors for testing + model.scaling_factor[model.pressure] = 1e-5 + model.scaling_factor[model.temperature] = 1e-2 + model.scaling_factor[model.volume_mol] = 1e-1 + model.scaling_factor[model.ideal_gas] = 1 + + # overwrite = False, no change + sb.scale_constraint_by_nominal_value( + model.ideal_gas, + scheme=ConstraintScalingScheme.harmonicMean, + overwrite=False, + ) + assert model.scaling_factor[model.ideal_gas] == 1 + # overwrite = True + sb.scale_constraint_by_nominal_value( + model.ideal_gas, scheme=ConstraintScalingScheme.harmonicMean, overwrite=True + ) + assert model.scaling_factor[model.ideal_gas] == pytest.approx( + (1 / 831.446 + 1e-6), rel=1e-5 + ) + + @pytest.mark.unit + def test_scale_constraint_by_nominal_value_max(self, model): + sb = CustomScalerBase() + + # Set variable scaling factors for testing + model.scaling_factor[model.pressure] = 1e-5 + model.scaling_factor[model.temperature] = 1e-2 + model.scaling_factor[model.volume_mol] = 1e-1 + model.scaling_factor[model.ideal_gas] = 1 + + # overwrite = False, no change + sb.scale_constraint_by_nominal_value(model.ideal_gas, overwrite=False) + assert model.scaling_factor[model.ideal_gas] == 1 + # overwrite = True + sb.scale_constraint_by_nominal_value(model.ideal_gas, overwrite=True) + assert model.scaling_factor[model.ideal_gas] == pytest.approx(1e-6, rel=1e-5) + + @pytest.mark.unit + def test_scale_constraint_by_nominal_value_min(self, model): + sb = CustomScalerBase() + + # Set variable scaling factors for testing + model.scaling_factor[model.pressure] = 1e-5 + model.scaling_factor[model.temperature] = 1e-2 + model.scaling_factor[model.volume_mol] = 1e-1 + model.scaling_factor[model.ideal_gas] = 1 + + # overwrite = False, no change + sb.scale_constraint_by_nominal_value( + model.ideal_gas, + scheme=ConstraintScalingScheme.inverseMinimum, + overwrite=False, + ) + assert model.scaling_factor[model.ideal_gas] == 1 + # overwrite = True + sb.scale_constraint_by_nominal_value( + model.ideal_gas, + scheme=ConstraintScalingScheme.inverseMinimum, + overwrite=True, + ) + assert model.scaling_factor[model.ideal_gas] == pytest.approx( + 1 / 831.446, rel=1e-5 + ) + + @pytest.mark.unit + def test_scale_constraint_by_nominal_value_inv_sum(self, model): + sb = CustomScalerBase() + + # Set variable scaling factors for testing + model.scaling_factor[model.pressure] = 1e-5 + model.scaling_factor[model.temperature] = 1e-2 + model.scaling_factor[model.volume_mol] = 1e-1 + model.scaling_factor[model.ideal_gas] = 1 + + # overwrite = False, no change + sb.scale_constraint_by_nominal_value( + model.ideal_gas, scheme=ConstraintScalingScheme.inverseSum, overwrite=False + ) + assert model.scaling_factor[model.ideal_gas] == 1 + # overwrite = True + sb.scale_constraint_by_nominal_value( + model.ideal_gas, scheme=ConstraintScalingScheme.inverseSum, overwrite=True + ) + assert model.scaling_factor[model.ideal_gas] == pytest.approx( + 1 / (831.446 + 1e6), rel=1e-5 + ) + + @pytest.mark.unit + def test_scale_constraint_by_nominal_value_rss(self, model): + sb = CustomScalerBase() + + # Set variable scaling factors for testing + model.scaling_factor[model.pressure] = 1e-5 + model.scaling_factor[model.temperature] = 1e-2 + model.scaling_factor[model.volume_mol] = 1e-1 + model.scaling_factor[model.ideal_gas] = 1 + + # overwrite = False, no change + sb.scale_constraint_by_nominal_value( + model.ideal_gas, scheme=ConstraintScalingScheme.inverseRSS, overwrite=False + ) + assert model.scaling_factor[model.ideal_gas] == 1 + # overwrite = True + sb.scale_constraint_by_nominal_value( + model.ideal_gas, scheme=ConstraintScalingScheme.inverseRSS, overwrite=True + ) + assert model.scaling_factor[model.ideal_gas] == pytest.approx( + 1 / (831.446**2 + 1e6**2) ** 0.5, rel=1e-5 + ) + + @pytest.mark.unit + def test_scale_constraint_by_nominal_value_invalid_scheme(self, model): + sb = CustomScalerBase() + + with pytest.raises( + ValueError, + match=re.escape( + "Invalid value for 'scheme' argument (foo) in " + "scale_constraint_by_nominal_value." + ), + ): + sb.scale_constraint_by_nominal_value(model.ideal_gas, scheme="foo") + + @pytest.mark.unit + def test_scale_constraint_by_nominal_derivative_1norm(self, model): + sb = CustomScalerBase() + + # Set variable scaling factors for testing + model.scaling_factor[model.pressure] = 1e-5 + model.scaling_factor[model.temperature] = 1e-2 + model.scaling_factor[model.volume_mol] = 1e-1 + model.scaling_factor[model.ideal_gas] = 1 + + # overwrite = False, no change + sb.scale_constraint_by_nominal_derivative_norm( + model.ideal_gas, norm=1, overwrite=False + ) + assert model.scaling_factor[model.ideal_gas] == 1 + # overwrite = True + sb.scale_constraint_by_nominal_derivative_norm( + model.ideal_gas, norm=1, overwrite=True + ) + assert model.scaling_factor[model.ideal_gas] == pytest.approx( + 4.99792e-7, rel=1e-5 # (1/(8.314+1e6+1e6) + ) + + # Check for clean up + assert model.pressure.value is None + assert model.temperature.value is None + assert model.pressure.value is None + + @pytest.mark.unit + def test_scale_constraint_by_nominal_derivative_2norm(self, model): + sb = CustomScalerBase() + + # Set variable scaling factors for testing + model.scaling_factor[model.pressure] = 1e-5 + model.scaling_factor[model.temperature] = 1e-2 + model.scaling_factor[model.volume_mol] = 1e-1 + model.scaling_factor[model.ideal_gas] = 1 + + # overwrite = False, no change + sb.scale_constraint_by_nominal_derivative_norm(model.ideal_gas, overwrite=False) + assert model.scaling_factor[model.ideal_gas] == 1 + # overwrite = True + sb.scale_constraint_by_nominal_derivative_norm(model.ideal_gas, overwrite=True) + assert model.scaling_factor[model.ideal_gas] == pytest.approx( + 1 / (8.314**2 + 1e6**2 + 1e6**2) ** 0.5, rel=1e-5 + ) + + # Check for clean up + assert model.pressure.value is None + assert model.temperature.value is None + assert model.pressure.value is None + + @pytest.mark.unit + def test_propagate_state_scaling(self): + # Dummy up two state blocks + m = ConcreteModel() + + m.properties = PhysicalParameterTestBlock() + + m.state1 = m.properties.build_state_block([1, 2, 3]) + m.state2 = m.properties.build_state_block([1, 2, 3]) + + # Set scaling factors on state1 + for t, sd in m.state1.items(): + sd.scaling_factor = Suffix(direction=Suffix.EXPORT) + sd.scaling_factor[sd.temperature] = 100 * t + sd.scaling_factor[sd.pressure] = 1e5 * t + + count = 1 + for j in sd.flow_mol_phase_comp.values(): + sd.scaling_factor[j] = 10 * t * count + count += 1 + + sb = CustomScalerBase() + sb.propagate_state_scaling(m.state2, m.state1) + + for t, sd in m.state2.items(): + assert sd.scaling_factor[sd.temperature] == 100 * t + assert sd.scaling_factor[sd.pressure] == 1e5 * t + + count = 1 + for j in sd.flow_mol_phase_comp.values(): + assert sd.scaling_factor[j] == 10 * t * count + count += 1 + + # Test for overwrite + for t, sd in m.state1.items(): + sd.scaling_factor[sd.temperature] = 200 * t + sd.scaling_factor[sd.pressure] = 2e5 * t + + count = 1 + for j in sd.flow_mol_phase_comp.values(): + sd.scaling_factor[j] = 20 * t * count + count += 1 + + sb.propagate_state_scaling(m.state2, m.state1, overwrite=False) + + for t, sd in m.state2.items(): + assert sd.scaling_factor[sd.temperature] == 100 * t + assert sd.scaling_factor[sd.pressure] == 1e5 * t + + count = 1 + for j in sd.flow_mol_phase_comp.values(): + assert sd.scaling_factor[j] == 10 * t * count + count += 1 + + @pytest.mark.unit + def test_call_submodel_scaler_method_no_scaler(self, caplog): + caplog.set_level(idaeslog.DEBUG, logger="idaes") + + # Dummy up a nested model + m = ConcreteModel() + m.b = Block([1, 2, 3]) + + sb = CustomScalerBase() + sb.call_submodel_scaler_method(m, "b", method="dummy_method", overwrite=True) + + for bd in m.b.values(): + assert not hasattr(bd, "_dummy_scaler_test") + + assert ( + "No default Scaler set for unknown.b. Cannot call dummy_method." + in caplog.text + ) + + @pytest.mark.unit + def test_call_submodel_scaler_method_default_scaler(self, caplog): + caplog.set_level(idaeslog.DEBUG, logger="idaes") + + # Dummy up a nested model + m = ConcreteModel() + m.b = Block([1, 2, 3]) + for bd in m.b.values(): + bd.default_scaler = DummyScaler + + sb = CustomScalerBase() + sb.call_submodel_scaler_method(m, "b", method="dummy_method", overwrite=True) + + for bd in m.b.values(): + assert bd._dummy_scaler_test + + assert "Using default Scaler for unknown.b." in caplog.text + + @pytest.mark.unit + def test_call_submodel_scaler_method_user_scaler(self, caplog): + caplog.set_level(idaeslog.DEBUG, logger="idaes") + + # Dummy up a nested model + m = ConcreteModel() + m.b = Block([1, 2, 3]) + + scaler_map = ComponentMap() + scaler_map[m.b] = DummyScaler() + + sb = CustomScalerBase() + sb.call_submodel_scaler_method( + m, + "b", + method="dummy_method", + submodel_scalers=scaler_map, + overwrite=False, + ) + + for bd in m.b.values(): + assert not bd._dummy_scaler_test + + assert "Using user-defined Scaler for unknown.b." in caplog.text + + @pytest.mark.unit + def test_call_submodel_scaler_method_user_scaler_class(self, caplog): + caplog.set_level(idaeslog.DEBUG, logger="idaes") + + # Dummy up a nested model + m = ConcreteModel() + m.b = Block([1, 2, 3]) + + scaler_map = ComponentMap() + scaler_map[m.b] = DummyScaler() + + sb = CustomScalerBase() + sb.call_submodel_scaler_method( + m, + "b", + method="dummy_method", + submodel_scalers=scaler_map, + overwrite=False, + ) + + for bd in m.b.values(): + assert not bd._dummy_scaler_test + + assert "Using user-defined Scaler for unknown.b." in caplog.text + + @pytest.mark.unit + def test_call_submodel_scaler_method_invalid_method(self): + # Dummy up a nested model + m = ConcreteModel() + m.b = Block([1, 2, 3]) + + scaler_map = ComponentMap() + scaler_map[m.b] = DummyScaler() + + sb = CustomScalerBase() + + with pytest.raises( + AttributeError, + match="Scaler for unknown.b does not have a method named foo.", + ): + sb.call_submodel_scaler_method( + m, + "b", + method="foo", + submodel_scalers=scaler_map, + overwrite=False, + ) diff --git a/idaes/core/scaling/tests/test_custom_scaling_integration.py b/idaes/core/scaling/tests/test_custom_scaling_integration.py new file mode 100644 index 0000000000..548846f786 --- /dev/null +++ b/idaes/core/scaling/tests/test_custom_scaling_integration.py @@ -0,0 +1,418 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +Integration tests for constraint scaling by expected magnitude in Custom Scaling. + +Tests will use the Gibbs Reactor example as the test case as it is very +poorly scaled in native form. + +Author: Andrew Lee +""" + +import os +import pytest + +from pyomo.environ import ( + assert_optimal_termination, + ConcreteModel, + Constraint, + Suffix, + TransformationFactory, + value, + Var, + units, +) + +from idaes.core import FlowsheetBlock +from idaes.models.unit_models.gibbs_reactor import GibbsReactor +from idaes.models.properties.activity_coeff_models.methane_combustion_ideal import ( + MethaneParameterBlock as MethaneCombustionParameterBlock, +) +from idaes.core.util.testing import PhysicalParameterTestBlock, initialization_tester +from idaes.core.solvers import get_solver +from idaes.core.util import to_json, from_json, StoreSpec +from idaes.core.util.scaling import jacobian_cond +from idaes.core.scaling import AutoScaler, CustomScalerBase, set_scaling_factor + + +FILENAME = "gibbs_solution.json" +local_path = os.path.dirname(os.path.realpath(__file__)) +fname = os.path.join(local_path, FILENAME) + + +def build_model(): + m = ConcreteModel() + m.fs = FlowsheetBlock(dynamic=False) + + m.fs.properties = MethaneCombustionParameterBlock() + + m.fs.unit = GibbsReactor( + property_package=m.fs.properties, + has_heat_transfer=True, + has_pressure_change=True, + ) + + m.fs.unit.inlet.flow_mol[0].fix(230.0) + m.fs.unit.inlet.mole_frac_comp[0, "H2"].fix(0.0435) + m.fs.unit.inlet.mole_frac_comp[0, "N2"].fix(0.6522) + m.fs.unit.inlet.mole_frac_comp[0, "O2"].fix(0.1739) + m.fs.unit.inlet.mole_frac_comp[0, "CO2"].fix(1e-5) + m.fs.unit.inlet.mole_frac_comp[0, "CH4"].fix(0.1304) + m.fs.unit.inlet.mole_frac_comp[0, "CO"].fix(1e-5) + m.fs.unit.inlet.mole_frac_comp[0, "H2O"].fix(1e-5) + m.fs.unit.inlet.mole_frac_comp[0, "NH3"].fix(1e-5) + m.fs.unit.inlet.temperature[0].fix(1500.0) + m.fs.unit.inlet.pressure[0].fix(101325.0) + + m.fs.unit.outlet.temperature[0].fix(2844.38) + m.fs.unit.deltaP.fix(0) + + return m + + +@pytest.fixture +def gibbs(): + m = build_model() + + # Load solution + from_json(m, fname=fname, wts=StoreSpec().value()) + # Make sure we have no suffixes loaded + assert not hasattr(m.fs.unit, "scaling_factor") + + # Autoscale variables by magnitude + scaler = AutoScaler() + scaler.scale_variables_by_magnitude(m) + assert hasattr(m.fs.unit, "scaling_factor") + + return m + + +@pytest.mark.integration +def test_verify_model_load(gibbs): + for v in gibbs.component_data_objects(ctype=Var, descend_into=True): + assert v.value is not None + + +@pytest.mark.integration +def test_autoscale_L2_norm(gibbs): + scaler = AutoScaler() + scaler.scale_constraints_by_jacobian_norm(gibbs, norm=2) + + scaled = jacobian_cond(gibbs, scaled=True) + + assert scaled == pytest.approx(2510.945, rel=1e-5) + + +@pytest.mark.integration +def test_autoscale_L1_norm(gibbs): + scaler = AutoScaler() + scaler.scale_constraints_by_jacobian_norm(gibbs, norm=1) + + scaled = jacobian_cond(gibbs, scaled=True) + + assert scaled == pytest.approx(2986.994, rel=1e-5) + + +@pytest.mark.integration +def test_nominal_magnitude_harmonic(gibbs): + scaler = CustomScalerBase() + + for c in gibbs.component_data_objects(ctype=Constraint, descend_into=True): + scaler.scale_constraint_by_nominal_value(c, scheme="harmonic_mean") + + assert jacobian_cond(gibbs, scaled=True) == pytest.approx(2.83944e12, rel=1e-5) + + +@pytest.mark.integration +def test_nominal_magnitude_inv_max(gibbs): + scaler = CustomScalerBase() + + for c in gibbs.component_data_objects(ctype=Constraint, descend_into=True): + scaler.scale_constraint_by_nominal_value(c, scheme="inverse_maximum") + + assert jacobian_cond(gibbs, scaled=True) == pytest.approx(784576, rel=1e-5) + + +@pytest.mark.integration +def test_nominal_magnitude_inv_min(gibbs): + scaler = CustomScalerBase() + + for c in gibbs.component_data_objects(ctype=Constraint, descend_into=True): + scaler.scale_constraint_by_nominal_value(c, scheme="inverse_minimum") + + assert jacobian_cond(gibbs, scaled=True) == pytest.approx(5.601e12, rel=1e-5) + + +@pytest.mark.integration +def test_nominal_magnitude_inv_sum(gibbs): + scaler = CustomScalerBase() + + for c in gibbs.component_data_objects(ctype=Constraint, descend_into=True): + scaler.scale_constraint_by_nominal_value(c, scheme="inverse_sum") + + assert jacobian_cond(gibbs, scaled=True) == pytest.approx(1501632, rel=1e-5) + + +@pytest.mark.integration +def test_nominal_magnitude_inv_rss(gibbs): + scaler = CustomScalerBase() + + for c in gibbs.component_data_objects(ctype=Constraint, descend_into=True): + scaler.scale_constraint_by_nominal_value(c, scheme="inverse_root_sum_squared") + + assert jacobian_cond(gibbs, scaled=True) == pytest.approx(959994, rel=1e-5) + + +@pytest.mark.integration +def test_scale_constraint_by_nominal_derivative_2norm_perfect_information(gibbs): + scaler = CustomScalerBase() + + for c in gibbs.component_data_objects(ctype=Constraint, descend_into=True): + scaler.scale_constraint_by_nominal_derivative_norm(c) + + scaled = jacobian_cond(gibbs, scaled=True) + assert scaled == pytest.approx(3.07419e06, rel=1e-5) + + +@pytest.mark.integration +def test_scale_constraint_by_nominal_derivative_2norm_imperfect_information(): + # Build a fresh model with no scaling factors + model = build_model() + + # Set imperfect scaling factors for all variables, representing an initial "best-guess" + # Feed states are known exactly - set scaling based on these + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].flow_mol, 1 / 230 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].flow_mol_phase, 1 / 230 + ) # Only 1 phase, so we "know" this + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].mole_frac_comp["H2"], 1 / 0.0435 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].mole_frac_comp["N2"], 1 / 0.6522 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].mole_frac_comp["O2"], 1 / 0.1739 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].mole_frac_comp["CO2"], 1e5 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].mole_frac_comp["CH4"], + 1 / 0.1304, + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].mole_frac_comp["CO"], 1e5 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].mole_frac_comp["H2O"], 1e5 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].mole_frac_comp["NH3"], 1e5 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].temperature, 1 / 1500 + ) + set_scaling_factor(model.fs.unit.control_volume.properties_in[0.0].pressure, 1e-5) + # Assume user does not know anything about enthalpy + + # Best guesses for unit model and outlet state conditions + set_scaling_factor(model.fs.unit.control_volume.heat[0.0], 1e-6) + + set_scaling_factor(model.fs.unit.control_volume.properties_out[0.0].flow_mol, 1e-2) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].flow_mol_phase, 1e-2 + ) # Only 1 phase, so we "know" this + # N2 is inert, so will be order 0.1, assume CH4 and H2 are near-totally consumed, assume most O2 consumed + # Assume moderate amounts of CO2 and H2O, small amounts of CO, trace NH3 NH3 + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["H2"], 1e4 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["N2"], 10 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["O2"], 1e2 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["CO2"], 10 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["CH4"], 1e4 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["CO"], 1e3 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["H2O"], 10 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["NH3"], 1e4 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].temperature, 1e-3 + ) + set_scaling_factor(model.fs.unit.control_volume.properties_out[0.0].pressure, 1e-5) + + scaler = CustomScalerBase() + for c in model.component_data_objects(ctype=Constraint, descend_into=True): + scaler.scale_constraint_by_nominal_derivative_norm(c) + + scaled = jacobian_cond(model, scaled=True) + assert scaled == pytest.approx(1.06128e11, rel=1e-5) + + +@pytest.mark.integration +def test_scale_constraint_by_nominal_derivative_1norm_perfect_information(gibbs): + scaler = CustomScalerBase() + + for c in gibbs.component_data_objects(ctype=Constraint, descend_into=True): + scaler.scale_constraint_by_nominal_derivative_norm(c, norm=1) + + scaled = jacobian_cond(gibbs, scaled=True) + assert scaled == pytest.approx(2.060153e06, rel=1e-5) + + +@pytest.mark.integration +def test_scale_constraint_by_nominal_derivative_clean_up(gibbs): + # Confirm that the scaler did not change any values in the model + # Build a fresh model with no scaling factors or initial values + model = build_model() + # Clone the model to use as a reference case + refmodel = model.clone() + + # Set imperfect scaling factors for all variables, representing an initial "best-guess" + # Feed states are known exactly - set scaling based on these + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].flow_mol, 1 / 230 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].flow_mol_phase, 1 / 230 + ) # Only 1 phase, so we "know" this + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].mole_frac_comp["H2"], 1 / 0.0435 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].mole_frac_comp["N2"], 1 / 0.6522 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].mole_frac_comp["O2"], 1 / 0.1739 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].mole_frac_comp["CO2"], 1e5 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].mole_frac_comp["CH4"], + 1 / 0.1304, + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].mole_frac_comp["CO"], 1e5 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].mole_frac_comp["H2O"], 1e5 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].mole_frac_comp["NH3"], 1e5 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].temperature, 1 / 1500 + ) + set_scaling_factor(model.fs.unit.control_volume.properties_in[0.0].pressure, 1e-5) + # Assume user does not know anything about enthalpy + + # Best guesses for unit model and outlet state conditions + set_scaling_factor(model.fs.unit.control_volume.heat[0.0], 1e-6) + + set_scaling_factor(model.fs.unit.control_volume.properties_out[0.0].flow_mol, 1e-2) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].flow_mol_phase, 1e-2 + ) # Only 1 phase, so we "know" this + # N2 is inert, so will be order 0.1, assume CH4 and H2 are near-totally consumed, assume most O2 consumed + # Assume moderate amounts of CO2 and H2O, small amounts of CO, trace NH3 + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["H2"], 1e4 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["N2"], 10 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["O2"], 1e2 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["CO2"], 10 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["CH4"], 1e4 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["CO"], 1e3 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["H2O"], 10 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["NH3"], 1e4 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].temperature, 1e-3 + ) + set_scaling_factor(model.fs.unit.control_volume.properties_out[0.0].pressure, 1e-5) + + scaler = CustomScalerBase() + for c in model.component_data_objects(ctype=Constraint, descend_into=True): + scaler.scale_constraint_by_nominal_derivative_norm(c) + + # Iterate overall all variables in model and compare value to reference model + for v in model.component_data_objects(ctype=Var, descend_into=True): + refv = refmodel.find_component(v.name) + + assert v.value == refv.value + + +if __name__ == "__main__": + # Run file to regenerate solution json + m = build_model() + + initialization_tester( + m, + optarg={"tol": 1e-6}, + state_args={ + "temperature": 2844.38, + "pressure": 101325.0, + "flow_mol": 251.05, + "mole_frac_comp": { + "CH4": 1e-5, + "CO": 0.0916, + "CO2": 0.0281, + "H2": 0.1155, + "H2O": 0.1633, + "N2": 0.5975, + "NH3": 1e-5, + "O2": 0.0067, + }, + }, + ) + + scaler = AutoScaler() + scaler.scale_variables_by_magnitude(m) + scaler.scale_constraints_by_jacobian_norm(m) + + solver = get_solver("ipopt_v2", writer_config={"scale_model": True}) + results = solver.solve(m, tee=True) + + # Check for optimal solution + assert_optimal_termination(results) + + to_json(m, fname=fname, human_read=True, wts=StoreSpec().value()) diff --git a/idaes/core/scaling/tests/test_nominal_value_walker.py b/idaes/core/scaling/tests/test_nominal_value_walker.py new file mode 100644 index 0000000000..905c05682e --- /dev/null +++ b/idaes/core/scaling/tests/test_nominal_value_walker.py @@ -0,0 +1,745 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +This module contains tests for the NominalValueExtractionVisitor. +""" +import math +import logging +import pytest + +import pyomo.environ as pyo +from pyomo.contrib.pynumero.asl import AmplInterface + +from idaes.core.scaling.util import NominalValueExtractionVisitor, set_scaling_factor +from idaes.models.properties.modular_properties.eos.ceos_common import ( + cubic_roots_available, + CubicThermoExpressions, + CubicType as CubicEoS, +) + +__author__ = "Andrew Lee" + + +class TestNominalValueExtractionVisitor: + @pytest.fixture(scope="class") + def m(self): + m = pyo.ConcreteModel() + m.set = pyo.Set(initialize=["a", "b", "c"]) + + return m + + @pytest.mark.unit + def test_int(self, m): + assert NominalValueExtractionVisitor().walk_expression(expr=7) == [7] + + @pytest.mark.unit + def test_float(self, m): + assert NominalValueExtractionVisitor().walk_expression(expr=7.7) == [7.7] + + @pytest.mark.unit + def test_negative_float(self, m): + assert NominalValueExtractionVisitor().walk_expression(expr=-7.7) == [-7.7] + + @pytest.mark.unit + def test_zero(self, m): + assert NominalValueExtractionVisitor().walk_expression(expr=0) == [0] + + @pytest.mark.unit + def test_true(self, m): + assert NominalValueExtractionVisitor().walk_expression(expr=True) == [1] + + @pytest.mark.unit + def test_false(self, m): + assert NominalValueExtractionVisitor().walk_expression(expr=False) == [0] + + @pytest.mark.unit + def test_scalar_param_no_scale(self, m): + m.scalar_param = pyo.Param(initialize=1, mutable=True) + assert NominalValueExtractionVisitor().walk_expression(expr=m.scalar_param) == [ + 1 + ] + + @pytest.mark.unit + def test_scalar_param_w_scale(self, m): + m.scalar_param = pyo.Param(default=12, mutable=True) + set_scaling_factor(m.scalar_param, 1 / 10) + assert NominalValueExtractionVisitor().walk_expression(expr=m.scalar_param) == [ + 12 + ] + + @pytest.mark.unit + def test_indexed_param_w_scale(self, m): + m.indexed_param = pyo.Param(m.set, initialize=1, mutable=True) + set_scaling_factor(m.indexed_param["a"], 1 / 13) + set_scaling_factor(m.indexed_param["b"], 1 / 14) + set_scaling_factor(m.indexed_param["c"], 1 / 15) + + assert NominalValueExtractionVisitor().walk_expression( + expr=m.indexed_param["a"] + ) == [1] + assert NominalValueExtractionVisitor().walk_expression( + expr=m.indexed_param["b"] + ) == [1] + assert NominalValueExtractionVisitor().walk_expression( + expr=m.indexed_param["c"] + ) == [1] + + @pytest.mark.unit + def test_scalar_var_no_scale(self, m): + m.scalar_var = pyo.Var(initialize=10) + # Should use current value + assert NominalValueExtractionVisitor().walk_expression(expr=m.scalar_var) == [ + 10 + ] + + @pytest.mark.unit + def test_scalar_var_w_scale(self, m): + set_scaling_factor(m.scalar_var, 1 / 21) + assert NominalValueExtractionVisitor().walk_expression(expr=m.scalar_var) == [ + 21 + ] + + @pytest.mark.unit + def test_var_neg_bounds(self): + m = pyo.ConcreteModel() + m.var = pyo.Var(bounds=(-1000, 0)) + set_scaling_factor(m.var, 1 / 4) + + # Expect nominal value to be negative + assert NominalValueExtractionVisitor().walk_expression(expr=m.var) == [-4] + + @pytest.mark.unit + def test_var_neg_upper_bound(self): + m = pyo.ConcreteModel() + m.var = pyo.Var(bounds=(None, -2000)) + set_scaling_factor(m.var, 1 / 4) + + # Expect nominal value to be negative + assert NominalValueExtractionVisitor().walk_expression(expr=m.var) == [-4] + + @pytest.mark.unit + def test_var_neg_domain(self): + m = pyo.ConcreteModel() + m.var = pyo.Var(domain=pyo.NegativeReals) + + set_scaling_factor(m.var, 1 / 4) + # Expect nominal value to be negative + assert NominalValueExtractionVisitor().walk_expression(expr=m.var) == [-4] + + @pytest.mark.unit + def test_var_neg_value(self): + m = pyo.ConcreteModel() + m.var = pyo.Var(initialize=-1) + set_scaling_factor(m.var, 1 / 4) + + # Expect nominal value to be negative + assert NominalValueExtractionVisitor().walk_expression(expr=m.var) == [-4] + + @pytest.mark.unit + def test_var_fixed_value(self): + m = pyo.ConcreteModel() + m.var = pyo.Var(initialize=-1) + m.var.fix() + set_scaling_factor(m.var, 1 / 4) + + # Nominal value should be value + assert NominalValueExtractionVisitor().walk_expression(expr=m.var) == [-1] + + @pytest.mark.unit + def test_var_pos_bounds(self): + m = pyo.ConcreteModel() + m.var = pyo.Var(bounds=(0, 1000)) + set_scaling_factor(m.var, 1 / 4) + + # Expect nominal value to be positive + assert NominalValueExtractionVisitor().walk_expression(expr=m.var) == [4] + + @pytest.mark.unit + def test_var_pos_lower_bound(self): + m = pyo.ConcreteModel() + m.var = pyo.Var(bounds=(1000, None)) + set_scaling_factor(m.var, 1 / 4) + + # Expect nominal value to be positive + assert NominalValueExtractionVisitor().walk_expression(expr=m.var) == [4] + + @pytest.mark.unit + def test_var_pos_domain(self): + m = pyo.ConcreteModel() + m.var = pyo.Var(domain=pyo.PositiveReals) + set_scaling_factor(m.var, 1 / 4) + + # Expect nominal value to be positive + assert NominalValueExtractionVisitor().walk_expression(expr=m.var) == [4] + + @pytest.mark.unit + def test_var_pos_value(self): + m = pyo.ConcreteModel() + m.var = pyo.Var(initialize=1) + set_scaling_factor(m.var, 1 / 4) + + # Expect nominal value to be positive + assert NominalValueExtractionVisitor().walk_expression(expr=m.var) == [4] + + @pytest.mark.unit + def test_indexed_var_no_scale(self, m): + m.indexed_var = pyo.Var(m.set, initialize=1) + assert NominalValueExtractionVisitor().walk_expression( + expr=m.indexed_var["a"] + ) == [1] + assert NominalValueExtractionVisitor().walk_expression( + expr=m.indexed_var["b"] + ) == [1] + assert NominalValueExtractionVisitor().walk_expression( + expr=m.indexed_var["c"] + ) == [1] + + @pytest.mark.unit + def test_indexed_var_w_scale(self, m): + set_scaling_factor(m.indexed_var["a"], 1 / 22) + set_scaling_factor(m.indexed_var["b"], 1 / 23) + set_scaling_factor(m.indexed_var["c"], 1 / 24) + + assert NominalValueExtractionVisitor().walk_expression( + expr=m.indexed_var["a"] + ) == [22] + assert NominalValueExtractionVisitor().walk_expression( + expr=m.indexed_var["b"] + ) == [23] + assert NominalValueExtractionVisitor().walk_expression( + expr=m.indexed_var["c"] + ) == [24] + + @pytest.mark.unit + def test_indexed_var_w_scale_partial_fixed(self, m): + m.indexed_var["a"].fix(20) + set_scaling_factor(m.indexed_var["a"], 1 / 22) + set_scaling_factor(m.indexed_var["b"], 1 / 23) + set_scaling_factor(m.indexed_var["c"], 1 / 24) + + assert NominalValueExtractionVisitor().walk_expression( + expr=m.indexed_var["a"] + ) == [20] + assert NominalValueExtractionVisitor().walk_expression( + expr=m.indexed_var["b"] + ) == [23] + assert NominalValueExtractionVisitor().walk_expression( + expr=m.indexed_var["c"] + ) == [24] + + # Clean up for future tests + m.indexed_var["a"].unfix() + + @pytest.mark.unit + def test_equality_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=m.scalar_var == m.indexed_var["a"] + ) == [21, 22] + + @pytest.mark.unit + def test_inequality_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=m.scalar_var <= m.indexed_var["a"] + ) == [21, 22] + + @pytest.mark.unit + def test_sum_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=sum(m.indexed_var[i] for i in m.set) + ) == [22, 23, 24] + + @pytest.mark.unit + def test_additive_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=m.scalar_var + m.indexed_var["a"] + m.scalar_param + ) == [21, 22, 12] + + @pytest.mark.unit + def test_additive_expr_w_negation(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=m.scalar_var + m.indexed_var["a"] - m.scalar_param + ) == [21, 22, -12] + + @pytest.mark.unit + def test_product_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=m.scalar_var * m.indexed_var["a"] * m.scalar_param + ) == [21 * 22 * 12] + + @pytest.mark.unit + def test_product_sum_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=(m.scalar_var + m.indexed_var["a"]) + * (m.scalar_param + m.indexed_var["b"]) + ) == [21 * 12, 21 * 23, 22 * 12, 22 * 23] + + @pytest.mark.unit + def test_product_sum_expr_w_negation(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=(m.scalar_var - m.indexed_var["a"]) + * (m.scalar_param - m.indexed_var["b"]) + ) == [21 * 12, -21 * 23, -22 * 12, 22 * 23] + + @pytest.mark.unit + def test_division_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=m.scalar_var / m.indexed_var["a"] / m.scalar_param + ) == [21 / 22 / 12] + + @pytest.mark.unit + def test_division_sum_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=(m.scalar_var + m.indexed_var["a"]) + / (m.scalar_param + m.indexed_var["b"]) + ) == [(21 + 22) / (12 + 23)] + + @pytest.mark.unit + def test_division_sum_expr_w_negation(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=(m.scalar_var - m.indexed_var["a"]) + / (m.scalar_param - m.indexed_var["b"]) + ) == [(21 - 22) / (12 - 23)] + + @pytest.mark.unit + def test_division_expr_error(self, m, caplog): + caplog.set_level(logging.DEBUG, logger="idaes.core.scaling") + assert NominalValueExtractionVisitor().walk_expression( + expr=1 / (m.scalar_var - 21) + ) == [1] + + expected = "Nominal value of 0 found in denominator of division expression. " + "Assigning a value of 1. You should check you scaling factors and models to " + "ensure there are no values of 0 that can appear in these functions." + + assert expected in caplog.text + + @pytest.mark.unit + def test_pow_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=m.scalar_var ** m.indexed_var["a"] + ) == pytest.approx([21**22], rel=1e-12) + + @pytest.mark.unit + def test_pow_sum_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=(m.scalar_var + m.indexed_var["a"]) + ** (m.scalar_param + m.indexed_var["b"]) + ) == [ + pytest.approx((21 + 22) ** (12 + 23), rel=1e-12), + ] + + @pytest.mark.unit + def test_pow_sum_expr_w_negation(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=(m.scalar_var - m.indexed_var["a"]) + ** (m.scalar_param - m.indexed_var["b"]) + ) == [ + pytest.approx(abs(21 - 22) ** (12 - 23), rel=1e-12), + ] + + @pytest.mark.unit + def test_negation_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression(expr=-m.scalar_var) == [ + -21 + ] + + @pytest.mark.unit + def test_negation_sum_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=-(m.scalar_var + m.indexed_var["a"]) + ) == [-21, -22] + + @pytest.mark.unit + def test_log_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.log(m.scalar_var) + ) == [pytest.approx(math.log(21), rel=1e-12)] + + @pytest.mark.unit + def test_log_expr_error(self, m): + with pytest.raises( + ValueError, + match="Evaluation error occurred when getting nominal value in log expression " + "with input 0.0. You should check you scaling factors and model to " + "address any numerical issues or scale this constraint manually.", + ): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.log(m.scalar_var - 21) + ) + + @pytest.mark.unit + def test_log_sum_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.log(m.scalar_var + m.indexed_var["a"]) + ) == [pytest.approx(math.log(21 + 22), rel=1e-12)] + + @pytest.mark.unit + def test_log_sum_expr_w_negation(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.log(-m.scalar_var + m.indexed_var["a"]) + ) == [pytest.approx(math.log(-21 + 22), rel=1e-12)] + + @pytest.mark.unit + def test_log10_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.log10(m.scalar_var) + ) == [pytest.approx(math.log10(21), rel=1e-12)] + + @pytest.mark.unit + def test_log10_sum_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.log10(m.scalar_var + m.indexed_var["a"]) + ) == [pytest.approx(math.log10(21 + 22), rel=1e-12)] + + @pytest.mark.unit + def test_log10_sum_expr_w_negation(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.log10(-m.scalar_var + m.indexed_var["a"]) + ) == [pytest.approx(math.log10(-21 + 22), rel=1e-12)] + + @pytest.mark.unit + def test_log10_expr_error(self, m): + with pytest.raises( + ValueError, + match="Evaluation error occurred when getting nominal value in log10 expression " + "with input 0.0. You should check you scaling factors and model to " + "address any numerical issues or scale this constraint manually.", + ): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.log10(m.scalar_var - 21) + ) + + @pytest.mark.unit + def test_sqrt_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.sqrt(m.scalar_var) + ) == [pytest.approx(21**0.5, rel=1e-12)] + + @pytest.mark.unit + def test_sqrt_sum_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.sqrt(m.scalar_var + m.indexed_var["a"]) + ) == [pytest.approx((21 + 22) ** 0.5, rel=1e-12)] + + @pytest.mark.unit + def test_sqrt_sum_expr_w_negation(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.sqrt(-m.scalar_var + m.indexed_var["a"]) + ) == [pytest.approx((-21 + 22) ** 0.5, rel=1e-12)] + + @pytest.mark.unit + def test_sqrt_expr_error(self, m): + with pytest.raises( + ValueError, + match="Evaluation error occurred when getting nominal value in sqrt expression " + "with input -21.0. You should check you scaling factors and model to " + "address any numerical issues or scale this constraint manually.", + ): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.sqrt(-m.scalar_var) + ) + + @pytest.mark.unit + def test_sin_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.sin(m.scalar_var) + ) == [pytest.approx(math.sin(21), rel=1e-12)] + + @pytest.mark.unit + def test_sin_sum_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.sin(m.scalar_var + m.indexed_var["a"]) + ) == [pytest.approx(math.sin(21 + 22), rel=1e-12)] + + @pytest.mark.unit + def test_sin_sum_expr_w_negation(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.sin(m.scalar_var - m.indexed_var["a"]) + ) == [pytest.approx(math.sin(21 - 22), rel=1e-12)] + + @pytest.mark.unit + def test_cos_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.cos(m.scalar_var) + ) == [pytest.approx(math.cos(21), rel=1e-12)] + + @pytest.mark.unit + def test_cos_sum_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.cos(m.scalar_var + m.indexed_var["a"]) + ) == [pytest.approx(math.cos(21 + 22), rel=1e-12)] + + @pytest.mark.unit + def test_cos_sum_expr_w_negation(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.cos(m.scalar_var - m.indexed_var["a"]) + ) == [pytest.approx(math.cos(21 - 22), rel=1e-12)] + + @pytest.mark.unit + def test_tan_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.tan(m.scalar_var) + ) == [pytest.approx(math.tan(21), rel=1e-12)] + + @pytest.mark.unit + def test_tan_sum_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.tan(m.scalar_var + m.indexed_var["a"]) + ) == [pytest.approx(math.tan(21 + 22), rel=1e-12)] + + @pytest.mark.unit + def test_tan_sum_expr_w_negation(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.tan(m.scalar_var - m.indexed_var["a"]) + ) == [pytest.approx(math.tan(21 - 22), rel=1e-12)] + + @pytest.mark.unit + def test_sinh_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.sinh(m.scalar_var) + ) == [pytest.approx(math.sinh(21), rel=1e-12)] + + @pytest.mark.unit + def test_sinh_sum_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.sinh(m.scalar_var + m.indexed_var["a"]) + ) == [pytest.approx(math.sinh(21 + 22), rel=1e-12)] + + @pytest.mark.unit + def test_sinh_sum_expr_w_negation(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.sinh(m.scalar_var - m.indexed_var["a"]) + ) == [pytest.approx(math.sinh(21 - 22), rel=1e-12)] + + @pytest.mark.unit + def test_cosh_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.cosh(m.scalar_var) + ) == [pytest.approx(math.cosh(21), rel=1e-12)] + + @pytest.mark.unit + def test_cosh_sum_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.cosh(m.scalar_var + m.indexed_var["a"]) + ) == [pytest.approx(math.cosh(21 + 22), rel=1e-12)] + + @pytest.mark.unit + def test_cosh_sum_expr_w_negation(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.cosh(m.scalar_var - m.indexed_var["a"]) + ) == [pytest.approx(math.cosh(21 - 22), rel=1e-12)] + + @pytest.mark.unit + def test_tanh_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.tanh(m.scalar_var) + ) == [pytest.approx(math.tanh(21), rel=1e-12)] + + @pytest.mark.unit + def test_tanh_sum_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.tanh(m.scalar_var + m.indexed_var["a"]) + ) == [pytest.approx(math.tanh(21 + 22), rel=1e-12)] + + @pytest.mark.unit + def test_tanh_sum_expr_w_negation(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.tanh(m.scalar_var - m.indexed_var["a"]) + ) == [pytest.approx(math.tanh(21 - 22), rel=1e-12)] + + @pytest.mark.unit + def test_asin_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression(expr=pyo.asin(1)) == [ + pytest.approx(math.asin(1), rel=1e-12) + ] + + @pytest.mark.unit + def test_asin_sum_expr(self, m): + m.scalar_param.set_value(0.5) + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.asin(0.5 + m.scalar_param) + ) == [pytest.approx(math.asin(1), rel=1e-12)] + + @pytest.mark.unit + def test_asin_sum_expr_negation(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.asin(0.5 - m.scalar_param) + ) == [pytest.approx(math.asin(0), rel=1e-12)] + + @pytest.mark.unit + def test_acos_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.acos(m.scalar_param) + ) == [pytest.approx(math.acos(0.5), rel=1e-12)] + + @pytest.mark.unit + def test_acos_sum_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.acos(0.5 + m.scalar_param) + ) == [pytest.approx(math.acos(1), rel=1e-12)] + + @pytest.mark.unit + def test_acos_sum_expr_w_negation(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.acos(0.5 - m.scalar_param) + ) == [pytest.approx(math.acos(0), rel=1e-12)] + + @pytest.mark.unit + def test_asinh_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.asinh(m.scalar_var) + ) == [pytest.approx(math.asinh(21), rel=1e-12)] + + @pytest.mark.unit + def test_asinh_sum_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.asinh(m.scalar_var + m.indexed_var["a"]) + ) == [pytest.approx(math.asinh(21 + 22), rel=1e-12)] + + @pytest.mark.unit + def test_asinh_sum_expr_w_negation(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.asinh(m.scalar_var - m.indexed_var["a"]) + ) == [pytest.approx(math.asinh(21 - 22), rel=1e-12)] + + @pytest.mark.unit + def test_acosh_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.acosh(m.scalar_var) + ) == [pytest.approx(math.acosh(21), rel=1e-12)] + + @pytest.mark.unit + def test_acosh_sum_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.acosh(m.scalar_var + m.indexed_var["a"]) + ) == [pytest.approx(math.acosh(21 + 22), rel=1e-12)] + + @pytest.mark.unit + def test_acosh_sum_expr_w_negation(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.acosh(-m.scalar_var + m.indexed_var["a"]) + ) == [pytest.approx(math.acosh(-21 + 22), rel=1e-12)] + + @pytest.mark.unit + def test_atanh_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.atanh(m.scalar_param) + ) == [pytest.approx(math.atanh(0.5), rel=1e-12)] + + @pytest.mark.unit + def test_atanh_sum_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.atanh(0.4 + m.scalar_param) + ) == [pytest.approx(math.atanh(0.9), rel=1e-12)] + + @pytest.mark.unit + def test_atanh_sum_expr_w_negation(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.atanh(0.4 - m.scalar_param) + ) == [pytest.approx(math.atanh(-0.1), rel=1e-12)] + + @pytest.mark.unit + def test_exp_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.exp(m.scalar_param) + ) == [pytest.approx(math.exp(0.5), rel=1e-12)] + + @pytest.mark.unit + def test_exp_sum_expr(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.exp(0.4 + m.scalar_param) + ) == [pytest.approx(math.exp(0.9), rel=1e-12)] + + @pytest.mark.unit + def test_exp_sum_expr_w_negation(self, m): + assert NominalValueExtractionVisitor().walk_expression( + expr=pyo.exp(-0.4 + m.scalar_param) + ) == [pytest.approx(math.exp(0.1), rel=1e-12)] + + @pytest.mark.unit + def test_expr_if(self, m): + m.exprif = pyo.Expr_if( + IF=m.scalar_param, + THEN=m.indexed_var["a"], + ELSE=m.indexed_var["b"] + m.indexed_var["c"], + ) + + assert NominalValueExtractionVisitor().walk_expression(expr=m.exprif) == [ + 22, + 23, + 24, + ] + + @pytest.mark.unit + def test_expr_if_w_negation(self, m): + m.exprif = pyo.Expr_if( + IF=m.scalar_param, + THEN=m.indexed_var["a"], + ELSE=m.indexed_var["b"] - m.indexed_var["c"], + ) + + assert NominalValueExtractionVisitor().walk_expression(expr=m.exprif) == [ + 22, + 23, + -24, + ] + + @pytest.mark.unit + @pytest.mark.skipif( + not AmplInterface.available(), reason="pynumero_ASL is not available" + ) + @pytest.mark.skipif(not cubic_roots_available, reason="Cubic roots not available") + def test_ext_func(self): + # Use the cubic root external function to test + m = pyo.ConcreteModel() + m.a = pyo.Var(initialize=1) + m.b = pyo.Var(initialize=1) + + set_scaling_factor(m.a, 1 / 2) + set_scaling_factor(m.b, 1 / 4) + + m.expr_write = CubicThermoExpressions(m) + Z = m.expr_write.z_liq(eos=CubicEoS.PR, A=m.a, B=m.b) + + expected_mag = -9.489811292072448 + assert NominalValueExtractionVisitor().walk_expression(expr=Z) == [ + pytest.approx(expected_mag, rel=1e-8) + ] + + # Check that model state did not change + assert pyo.value(m.a) == 1 + assert pyo.value(m.b) == 1 + assert pyo.value(Z) == pytest.approx(-2.1149075414767577, rel=1e-8) + + # Now, change the actual state to the expected magnitudes and confirm result + m.a.set_value(2) + m.b.set_value(4) + assert pyo.value(Z) == pytest.approx(expected_mag, rel=1e-8) + + @pytest.mark.unit + def test_Expression(self, m): + m.expression = pyo.Expression( + expr=m.scalar_param ** (sum(m.indexed_var[i] for i in m.set)) + ) + + assert NominalValueExtractionVisitor().walk_expression(expr=m.expression) == [ + 0.5 ** (22 + 23 + 24) + ] + + @pytest.mark.unit + def test_constraint(self, m): + m.constraint = pyo.Constraint(expr=m.scalar_var == m.expression) + + assert NominalValueExtractionVisitor().walk_expression( + expr=m.constraint.expr + ) == [21, 0.5 ** (22 + 23 + 24)] diff --git a/idaes/core/scaling/tests/test_scaling_base.py b/idaes/core/scaling/tests/test_scaling_base.py new file mode 100644 index 0000000000..d1b5a2425b --- /dev/null +++ b/idaes/core/scaling/tests/test_scaling_base.py @@ -0,0 +1,319 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +Tests for ScalerBase. + +Author: Andrew Lee +""" +import pytest +import re + +from pyomo.environ import ConcreteModel, Constraint, Set, Suffix, Var +from pyomo.common.config import ConfigDict + +from idaes.core.scaling.scaling_base import ScalerBase +import idaes.logger as idaeslog + + +@pytest.fixture +def model(): + m = ConcreteModel() + + m.s = Set(initialize=[1, 2, 3, 4]) + + m.v = Var(m.s) + + def c_rule(b, i): + return b.v[i] == i + + m.c = Constraint(m.s, rule=c_rule) + + m.scaling_factor = Suffix(direction=Suffix.EXPORT) + + m.scaling_factor[m.v[1]] = 1 + m.scaling_factor[m.v[2]] = 2 + m.scaling_factor[m.v[3]] = 3 + m.scaling_factor[m.v[4]] = 4 + m.scaling_factor[m.c[1]] = 11 + m.scaling_factor[m.c[2]] = 21 + m.scaling_factor[m.c[3]] = 31 + m.scaling_factor[m.c[4]] = 41 + + return m + + +class TestScalerBase: + @pytest.mark.unit + def test_init(self): + sb = ScalerBase() + + assert isinstance(sb.config, ConfigDict) + assert sb.config.zero_tolerance == 1e-12 + assert sb.config.max_variable_scaling_factor == 1e10 + assert sb.config.min_variable_scaling_factor == 1e-10 + assert sb.config.max_constraint_scaling_factor == 1e10 + assert sb.config.min_constraint_scaling_factor == 1e-10 + assert not sb.config.overwrite + + @pytest.mark.unit + def test_get_scaling_factor(self, model): + sb = ScalerBase() + + assert sb.get_scaling_factor(model.v[1]) == 1 + + @pytest.mark.unit + def test_set_scaling_factor(self, model): + sb = ScalerBase() + + sb._set_scaling_factor( + component=model.v[1], + component_type="variable", + scaling_factor=42, + overwrite=False, + ) + sb._set_scaling_factor( + component=model.c[1], + component_type="constraint", + scaling_factor=42, + overwrite=False, + ) + assert model.scaling_factor[model.v[1]] == 1 # Overwrite = False, no change + assert model.scaling_factor[model.c[1]] == 11 # Overwrite = False, no change + + sb._set_scaling_factor( + component=model.v[1], + component_type="variable", + scaling_factor=42, + overwrite=True, + ) + sb._set_scaling_factor( + component=model.c[1], + component_type="constraint", + scaling_factor=42, + overwrite=True, + ) + assert model.scaling_factor[model.v[1]] == 42 + assert model.scaling_factor[model.c[1]] == 42 + + @pytest.mark.unit + def test_set_scaling_factor_overwrite_default(self, model): + sb = ScalerBase(overwrite=False) + + sb._set_scaling_factor( + component=model.v[1], component_type="variable", scaling_factor=42 + ) + sb._set_scaling_factor( + component=model.c[1], component_type="constraint", scaling_factor=42 + ) + assert model.scaling_factor[model.v[1]] == 1 # Overwrite = False, no change + assert model.scaling_factor[model.c[1]] == 11 # Overwrite = False, no change + + # Change default overwrite setting + sb.config.overwrite = True + sb._set_scaling_factor( + component=model.v[1], component_type="variable", scaling_factor=42 + ) + sb._set_scaling_factor( + component=model.c[1], component_type="constraint", scaling_factor=42 + ) + assert model.scaling_factor[model.v[1]] == 42 + assert model.scaling_factor[model.c[1]] == 42 + + @pytest.mark.unit + def test_set_scaling_factor_invalid_component_type(self, model): + sb = ScalerBase() + + with pytest.raises( + ValueError, + match="Invalid value for component_type.", + ): + sb._set_scaling_factor( + component=model.v[1], component_type="foo", scaling_factor="bar" + ) + + @pytest.mark.unit + def test_set_scaling_factor_max_limit(self, model, caplog): + sb = ScalerBase( + overwrite=True, + max_variable_scaling_factor=100, + max_constraint_scaling_factor=200, + ) + + caplog.set_level( + idaeslog.DEBUG, + logger="idaes", + ) + # Below max var limit - value set as given + sb._set_scaling_factor( + component=model.v[1], component_type="variable", scaling_factor=42 + ) + assert model.scaling_factor[model.v[1]] == 42 + + # Above max var limit - value limited to max + sb._set_scaling_factor( + component=model.v[1], component_type="variable", scaling_factor=150 + ) + assert model.scaling_factor[model.v[1]] == 100 + assert ( + "Scaling factor for v[1] limited by maximum value (max_sf: 100.0 < sf: 150)" + in caplog.text + ) + + # Above max var limit but below max con limit - value set as given + sb._set_scaling_factor( + component=model.v[1], component_type="constraint", scaling_factor=150 + ) + assert model.scaling_factor[model.v[1]] == 150 + + # Above max con limit - value limited to max + sb._set_scaling_factor( + component=model.v[1], component_type="constraint", scaling_factor=250 + ) + assert model.scaling_factor[model.v[1]] == 200 + assert ( + "Scaling factor for v[1] limited by maximum value (max_sf: 200.0 < sf: 250)" + in caplog.text + ) + + @pytest.mark.unit + def test_set_scaling_factor_min_limit(self, model, caplog): + sb = ScalerBase( + overwrite=True, + min_variable_scaling_factor=100, + min_constraint_scaling_factor=200, + ) + + caplog.set_level(idaeslog.DEBUG, logger="idaes") + # Above both min limits - value set as given + sb._set_scaling_factor( + component=model.v[1], component_type="constraint", scaling_factor=400 + ) + assert model.scaling_factor[model.v[1]] == 400 + + # Below con min limit - value limited + sb._set_scaling_factor( + component=model.v[1], component_type="constraint", scaling_factor=150 + ) + assert model.scaling_factor[model.v[1]] == 200 + assert ( + "Scaling factor for v[1] limited by minimum value (min_sf: 200.0 > sf: 150)" + in caplog.text + ) + + # Below con min limit but above min var limit - value set as given + sb._set_scaling_factor( + component=model.v[1], component_type="variable", scaling_factor=150 + ) + assert model.scaling_factor[model.v[1]] == 150 + + # Below both limited + sb._set_scaling_factor( + component=model.v[1], component_type="variable", scaling_factor=50 + ) + assert model.scaling_factor[model.v[1]] == 100 + assert ( + "Scaling factor for v[1] limited by minimum value (min_sf: 100.0 > sf: 50)" + in caplog.text + ) + + @pytest.mark.unit + def test_set_variable_scaling_factor(self, model, caplog): + sb = ScalerBase( + max_variable_scaling_factor=1e3, + min_variable_scaling_factor=100, + max_constraint_scaling_factor=2e3, + min_constraint_scaling_factor=200, + overwrite=True, + ) + + caplog.set_level(idaeslog.DEBUG, logger="idaes") + + # Scaling factor within limits + sb.set_variable_scaling_factor(model.v[1], 200) + assert model.scaling_factor[model.v[1]] == 200 + + # Too large + sb.set_variable_scaling_factor(model.v[1], 2e3) + assert model.scaling_factor[model.v[1]] == 1e3 + assert ( + "Scaling factor for v[1] limited by maximum value (max_sf: 1000.0 < sf: 2000.0)" + in caplog.text + ) + + # Too small + sb.set_variable_scaling_factor(model.v[1], 1) + assert model.scaling_factor[model.v[1]] == 100 + assert ( + "Scaling factor for v[1] limited by minimum value (min_sf: 100.0 > sf: 1)" + in caplog.text + ) + + @pytest.mark.unit + def test_set_variable_scaling_factor_invalid_type(self, model): + sb = ScalerBase() + + with pytest.raises( + TypeError, match=re.escape("c[1] is not a variable (or is indexed).") + ): + sb.set_variable_scaling_factor(model.c[1], 200) + + with pytest.raises( + TypeError, match=re.escape("v is not a variable (or is indexed).") + ): + sb.set_variable_scaling_factor(model.v, 200) + + @pytest.mark.unit + def test_set_constraint_scaling_factor(self, model, caplog): + sb = ScalerBase( + max_variable_scaling_factor=1e3, + min_variable_scaling_factor=100, + max_constraint_scaling_factor=2e3, + min_constraint_scaling_factor=200, + overwrite=True, + ) + + caplog.set_level(idaeslog.DEBUG, logger="idaes") + + # Scaling factor within limits + sb.set_constraint_scaling_factor(model.c[1], 200) + assert model.scaling_factor[model.c[1]] == 200 + + # Too large + sb.set_constraint_scaling_factor(model.c[1], 3e3) + assert model.scaling_factor[model.c[1]] == 2e3 + assert ( + "Scaling factor for c[1] limited by maximum value (max_sf: 2000.0 < sf: 3000.0)" + in caplog.text + ) + + # Too small + sb.set_constraint_scaling_factor(model.c[1], 1) + assert model.scaling_factor[model.c[1]] == 200 + assert ( + "Scaling factor for c[1] limited by minimum value (min_sf: 200.0 > sf: 1)" + in caplog.text + ) + + @pytest.mark.unit + def test_set_constraint_scaling_factor_invalid_type(self, model): + sb = ScalerBase() + + with pytest.raises( + TypeError, match=re.escape("v[1] is not a constraint (or is indexed).") + ): + sb.set_constraint_scaling_factor(model.v[1], 200) + + with pytest.raises( + TypeError, match=re.escape("c is not a constraint (or is indexed).") + ): + sb.set_constraint_scaling_factor(model.c, 200) diff --git a/idaes/core/scaling/tests/test_scaling_profiler.py b/idaes/core/scaling/tests/test_scaling_profiler.py new file mode 100644 index 0000000000..fc3e47afb7 --- /dev/null +++ b/idaes/core/scaling/tests/test_scaling_profiler.py @@ -0,0 +1,624 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +Tests for Scaling Profiler + +Author: Andrew Lee +""" + +from io import StringIO +import os +import pytest + +from pyomo.environ import ConcreteModel, Constraint, value, Var + +from idaes.core import FlowsheetBlock +from idaes.models.unit_models.gibbs_reactor import GibbsReactor +from idaes.models.properties.activity_coeff_models.methane_combustion_ideal import ( + MethaneParameterBlock as MethaneCombustionParameterBlock, +) +from idaes.core.util import from_json, StoreSpec +from idaes.core.scaling import set_scaling_factor +from idaes.core.scaling.scaler_profiling import ScalingProfiler + + +def demo_model(): + m = ConcreteModel() + + m.v1 = Var(initialize=2) + m.v1.fix() + m.v2 = Var(initialize=4) + m.v3 = Var(initialize=-6) + + m.c1 = Constraint(expr=m.v2 == m.v1**2) + m.c2 = Constraint(expr=0 == m.v1 + m.v2 + m.v3) + + return m + + +def demo_scaling(model): + set_scaling_factor(model.v2, 0.5) + + +def demo_pertubration(model): + model.v1.fix(3) + + +class TestScalingProfiler: + @pytest.mark.unit + def test_init_class(self): + sp = ScalingProfiler( + build_model=demo_model, + user_scaling=demo_scaling, + perturb_state=demo_pertubration, + ) + + assert sp._build_model is demo_model + assert sp._user_scaling is demo_scaling + assert sp._perturb_state is demo_pertubration + assert sp._solver is not None + assert sp._scaling_methods is not None + + @pytest.mark.unit + def test_init_class_w_args(self): + sp = ScalingProfiler( + build_model=demo_model, + user_scaling=demo_scaling, + perturb_state=demo_pertubration, + solver="foo", + scaling_methods="bar", + ) + + assert sp._build_model is demo_model + assert sp._user_scaling is demo_scaling + assert sp._perturb_state is demo_pertubration + assert sp._solver == "foo" + assert sp._scaling_methods == "bar" + + @pytest.mark.unit + def test_scale_vars_user(self): + sp = ScalingProfiler( + build_model=demo_model, + user_scaling=demo_scaling, + perturb_state=demo_pertubration, + ) + + model = sp._build_model() + sp._scale_vars(model) + + # Should apply user scaling which will only scale v2 + assert len(model.scaling_factor) == 1 + assert model.scaling_factor[model.v2] == 0.5 + + @pytest.mark.unit + def test_scale_vars_auto(self): + sp = ScalingProfiler( + build_model=demo_model, + user_scaling=demo_scaling, + perturb_state=demo_pertubration, + ) + + model = sp._build_model() + sp._scale_vars(model, perfect=True) + + # Should apply auto scaling which will scale all vars + assert len(model.scaling_factor) == 3 + assert model.scaling_factor[model.v1] == 0.5 + assert model.scaling_factor[model.v2] == 0.25 + assert model.scaling_factor[model.v3] == pytest.approx(1 / 6, rel=1e-8) + + @pytest.mark.unit + def test_apply_scaling_no_method(self): + sp = ScalingProfiler( + build_model=demo_model, + user_scaling=demo_scaling, + ) + + model = sp._build_model() + sp._apply_scaling(model, None, block_based="foo") + + # Should not apply any constraint scaling - no scaling suffix should be present + assert not hasattr(model, "scaling_factor") + + @pytest.mark.unit + def test_apply_scaling_constraint_based_method(self): + # Dummy method to apply scaling by constraint + def dummy_scaler(constraint): + set_scaling_factor(constraint, 2) + + sp = ScalingProfiler( + build_model=demo_model, + user_scaling=demo_scaling, + ) + + # Constraint based method, so use block_based=False + model = sp._build_model() + sp._apply_scaling(model, dummy_scaler, block_based=False) + + # Should have scaling factors of 2 for all constraints + assert len(model.scaling_factor) == 2 + assert model.scaling_factor[model.c1] == 2 + assert model.scaling_factor[model.c2] == 2 + + @pytest.mark.unit + def test_apply_scaling_block_based_method(self): + # Dummy method to apply scaling by block + def dummy_scaler(block): + set_scaling_factor(block.c1, 2) + set_scaling_factor(block.c2, 4) + + sp = ScalingProfiler( + build_model=demo_model, + user_scaling=demo_scaling, + ) + + # Block based method, so use block_based=True + model = sp._build_model() + sp._apply_scaling(model, dummy_scaler, block_based=True) + + # Should have scaling factors of for all constraints + assert len(model.scaling_factor) == 2 + assert model.scaling_factor[model.c1] == 2 + assert model.scaling_factor[model.c2] == 4 + + @pytest.mark.unit + def test_solve_perturbed_state_no_callback(self): + sp = ScalingProfiler( + build_model=demo_model, + user_scaling=demo_scaling, + ) + + model = sp._build_model() + res = sp._solved_perturbed_state(model) + + assert res == {} + + @pytest.mark.unit + def test_solve_perturbed_state(self): + sp = ScalingProfiler( + build_model=demo_model, + user_scaling=demo_scaling, + perturb_state=demo_pertubration, + ) + + model = sp._build_model() + res = sp._solved_perturbed_state(model) + + assert value(model.v3) == pytest.approx(-12, rel=1e-6) + + # Model is trivially presolvable, so expect convergence in 0 iterations + assert res["solved"] + assert ( + res["termination_message"] + == "TerminationCondition.convergenceCriteriaSatisfied" + ) + assert res["iterations"] == 0 + + +# Case study using Gibbs reactor model +# Get solution json from scaling tests +FILENAME = "gibbs_solution.json" +local_path = os.path.dirname(os.path.realpath(__file__)) +fname = os.path.join(local_path, FILENAME) + + +def build_model(): + model = ConcreteModel() + model.fs = FlowsheetBlock(dynamic=False) + + model.fs.properties = MethaneCombustionParameterBlock() + + model.fs.unit = GibbsReactor( + property_package=model.fs.properties, + has_heat_transfer=True, + has_pressure_change=True, + ) + + model.fs.unit.inlet.flow_mol[0].fix(230.0) + model.fs.unit.inlet.mole_frac_comp[0, "H2"].fix(0.0435) + model.fs.unit.inlet.mole_frac_comp[0, "N2"].fix(0.6522) + model.fs.unit.inlet.mole_frac_comp[0, "O2"].fix(0.1739) + model.fs.unit.inlet.mole_frac_comp[0, "CO2"].fix(1e-5) + model.fs.unit.inlet.mole_frac_comp[0, "CH4"].fix(0.1304) + model.fs.unit.inlet.mole_frac_comp[0, "CO"].fix(1e-5) + model.fs.unit.inlet.mole_frac_comp[0, "H2O"].fix(1e-5) + model.fs.unit.inlet.mole_frac_comp[0, "NH3"].fix(1e-5) + model.fs.unit.inlet.temperature[0].fix(1500.0) + model.fs.unit.inlet.pressure[0].fix(101325.0) + + model.fs.unit.outlet.temperature[0].fix(2844.38) + model.fs.unit.deltaP.fix(0) + + from_json(model, fname=fname, wts=StoreSpec.value()) + + return model + + +def scale_vars(model): + # Set imperfect scaling factors for all variables, representing an initial "best-guess" + # Feed states are known exactly - set scaling based on these + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].flow_mol, 1 / 230 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].flow_mol_phase, 1 / 230 + ) # Only 1 phase, so we "know" this + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].mole_frac_comp["H2"], 1 / 0.0435 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].mole_frac_comp["N2"], 1 / 0.6522 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].mole_frac_comp["O2"], 1 / 0.1739 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].mole_frac_comp["CO2"], 1e5 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].mole_frac_comp["CH4"], + 1 / 0.1304, + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].mole_frac_comp["CO"], 1e5 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].mole_frac_comp["H2O"], 1e5 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].mole_frac_comp["NH3"], 1e5 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].temperature, 1 / 1500 + ) + set_scaling_factor(model.fs.unit.control_volume.properties_in[0.0].pressure, 1e-5) + # Assume user does not know anything about enthalpy + + # Best guesses for unit model and outlet state conditions + set_scaling_factor(model.fs.unit.control_volume.heat[0.0], 1e-6) + + set_scaling_factor(model.fs.unit.control_volume.properties_out[0.0].flow_mol, 1e-2) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].flow_mol_phase, 1e-2 + ) # Only 1 phase, so we "know" this + # N2 is inert, so will be order 0.1, assume CH4 and H2 are near-totally consumed, assume most O2 consumed + # Assume moderate amounts of CO2 and H2O, small amounts of CO, trace NH3 NH3 + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["H2"], 1e4 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["N2"], 10 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["O2"], 1e2 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["CO2"], 10 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["CH4"], 1e4 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["CO"], 1e3 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["H2O"], 10 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["NH3"], 1e4 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].temperature, 1e-3 + ) + set_scaling_factor(model.fs.unit.control_volume.properties_out[0.0].pressure, 1e-5) + + return model + + +def perturb_solution(model): + # Decrease O2 in feed + model.fs.unit.inlet.mole_frac_comp[0, "N2"].fix(0.7222) + model.fs.unit.inlet.mole_frac_comp[0, "O2"].fix(0.1039) + + +expected_profile = { + "Unscaled": { + "Manual": { + "condition_number": 5.70342e17, + "solved": False, + "termination_message": "TerminationCondition.locallyInfeasible", + "iterations": 57, + "iters_in_restoration": 27, + "iters_w_regularization": 21, + } + }, + "Vars Only": { + "Manual": { + "condition_number": 9.24503e16, + "solved": False, + "termination_message": "TerminationCondition.locallyInfeasible", + "iterations": 82, + "iters_in_restoration": 82, + "iters_w_regularization": 39, + }, + "Auto": { + "condition_number": 6.57667e14, + "solved": True, + "termination_message": "TerminationCondition.convergenceCriteriaSatisfied", + "iterations": 9, + "iters_in_restoration": 0, + "iters_w_regularization": 0, + }, + }, + "Harmonic": { + "Manual": { + "condition_number": 3.73643e18, + "solved": False, + "termination_message": "TerminationCondition.locallyInfeasible", + "iterations": 39, + "iters_in_restoration": 39, + "iters_w_regularization": 0, + }, + "Auto": { + "condition_number": 2.83944e12, + "solved": True, + "termination_message": "TerminationCondition.convergenceCriteriaSatisfied", + "iterations": 17, + "iters_in_restoration": 0, + "iters_w_regularization": 0, + }, + }, + "Inverse Sum": { + "Manual": { + "condition_number": 9.31670e15, + "solved": False, + "termination_message": "TerminationCondition.iterationLimit", + "iterations": 200, + "iters_in_restoration": 200, + "iters_w_regularization": 75, + }, + "Auto": { + "condition_number": 1.50163e6, + "solved": True, + "termination_message": "TerminationCondition.convergenceCriteriaSatisfied", + "iterations": 6, + "iters_in_restoration": 0, + "iters_w_regularization": 0, + }, + }, + "Inverse Root Sum Squares": { + "Manual": { + "condition_number": 1.15511e16, + "solved": False, + "termination_message": "TerminationCondition.iterationLimit", + "iterations": 200, + "iters_in_restoration": 201, + "iters_w_regularization": 107, + }, + "Auto": { + "condition_number": 9.59994e5, + "solved": True, + "termination_message": "TerminationCondition.convergenceCriteriaSatisfied", + "iterations": 6, + "iters_in_restoration": 0, + "iters_w_regularization": 0, + }, + }, + "Inverse Maximum": { + "Manual": { + "condition_number": 1.094304e16, + "solved": False, + "termination_message": "TerminationCondition.iterationLimit", + "iterations": 200, + "iters_in_restoration": 197, + "iters_w_regularization": 75, + }, + "Auto": { + "condition_number": 7.84576e5, + "solved": True, + "termination_message": "TerminationCondition.convergenceCriteriaSatisfied", + "iterations": 6, + "iters_in_restoration": 0, + "iters_w_regularization": 0, + }, + }, + "Inverse Minimum": { + "Manual": { + "condition_number": 7.34636e18, + "solved": False, + "termination_message": "TerminationCondition.locallyInfeasible", + "iterations": 49, + "iters_in_restoration": 49, + "iters_w_regularization": 1, + }, + "Auto": { + "condition_number": 5.600998e12, + "solved": True, + "termination_message": "TerminationCondition.convergenceCriteriaSatisfied", + "iterations": 16, + "iters_in_restoration": 0, + "iters_w_regularization": 0, + }, + }, + "Nominal L1 Norm": { + "Manual": { + "condition_number": 1.18925e16, + "solved": False, + "termination_message": "TerminationCondition.locallyInfeasible", + "iterations": 61, + "iters_in_restoration": 60, + "iters_w_regularization": 15, + }, + "Auto": { + "condition_number": 2.06015e6, + "solved": True, + "termination_message": "TerminationCondition.convergenceCriteriaSatisfied", + "iterations": 4, + "iters_in_restoration": 0, + "iters_w_regularization": 0, + }, + }, + "Nominal L2 Norm": { + "Manual": { + "condition_number": 1.18824e16, + "solved": False, + "termination_message": "TerminationCondition.locallyInfeasible", + "iterations": 53, + "iters_in_restoration": 50, + "iters_w_regularization": 7, + }, + "Auto": { + "condition_number": 3.07419e6, + "solved": True, + "termination_message": "TerminationCondition.convergenceCriteriaSatisfied", + "iterations": 4, + "iters_in_restoration": 0, + "iters_w_regularization": 0, + }, + }, + "Actual L1 Norm": { + "Manual": { + "condition_number": 1.46059e9, + "solved": False, + "termination_message": "TerminationCondition.locallyInfeasible", + "iterations": 29, + "iters_in_restoration": 29, + "iters_w_regularization": 0, + }, + "Auto": { + "condition_number": 2986.99, + "solved": True, + "termination_message": "TerminationCondition.convergenceCriteriaSatisfied", + "iterations": 6, + "iters_in_restoration": 0, + "iters_w_regularization": 0, + }, + }, + "Actual L2 Norm": { + "Manual": { + "condition_number": 6.61297e8, + "solved": False, + "termination_message": "TerminationCondition.locallyInfeasible", + "iterations": 29, + "iters_in_restoration": 29, + "iters_w_regularization": 0, + }, + "Auto": { + "condition_number": 2510.95, + "solved": True, + "termination_message": "TerminationCondition.convergenceCriteriaSatisfied", + "iterations": 6, + "iters_in_restoration": 0, + "iters_w_regularization": 0, + }, + }, +} + + +@pytest.mark.unit +def test_write_profile_report(): + sp = ScalingProfiler( + build_model=build_model, + user_scaling=scale_vars, + perturb_state=perturb_solution, + ) + + stream = StringIO() + + sp.write_profile_report(results=expected_profile, stream=stream) + + expected = """ +============================================================================ +Scaling Profile Report +---------------------------------------------------------------------------- +Scaling Method || User Scaling || Perfect Scaling +Unscaled || 5.703E+17 | Failed 57 || +Vars Only || 9.245E+16 | Failed 82 || 6.577E+14 | Solved 9 +Harmonic || 3.736E+18 | Failed 39 || 2.839E+12 | Solved 17 +Inverse Sum || 9.317E+15 | Failed 200 || 1.502E+06 | Solved 6 +Inverse Root Sum Squares || 1.155E+16 | Failed 200 || 9.600E+05 | Solved 6 +Inverse Maximum || 1.094E+16 | Failed 200 || 7.846E+05 | Solved 6 +Inverse Minimum || 7.346E+18 | Failed 49 || 5.601E+12 | Solved 16 +Nominal L1 Norm || 1.189E+16 | Failed 61 || 2.060E+06 | Solved 4 +Nominal L2 Norm || 1.188E+16 | Failed 53 || 3.074E+06 | Solved 4 +Actual L1 Norm || 1.461E+09 | Failed 29 || 2.987E+03 | Solved 6 +Actual L2 Norm || 6.613E+08 | Failed 29 || 2.511E+03 | Solved 6 +============================================================================ +""" + + assert stream.getvalue() == expected + + +@pytest.mark.integration +def test_case_study_profiling(): + sp = ScalingProfiler( + build_model=build_model, + user_scaling=scale_vars, + perturb_state=perturb_solution, + ) + + results = sp.profile_scaling_methods() + + for cmeth, stats in results.items(): + for vmeth in ["Manual", "Auto"]: + if cmeth == "Unscaled" and vmeth == "Auto": + # Unscaled does not have data for auto + continue + rstats = stats[vmeth] + xstats = expected_profile[cmeth][vmeth] + assert rstats["condition_number"] == pytest.approx( + xstats["condition_number"], rel=1e-5 + ) + assert rstats["solved"] == xstats["solved"] + assert rstats["termination_message"] == xstats["termination_message"] + for iters in [ + "iterations", + "iters_in_restoration", + "iters_w_regularization", + ]: + # We will allow a variance of 2 iteration in this test to avoid being overly fragile + assert rstats[iters] == pytest.approx(xstats[iters], abs=2) + + +@pytest.mark.integration +def test_report_scaling_profiles(): + sp = ScalingProfiler( + build_model=build_model, + user_scaling=scale_vars, + perturb_state=perturb_solution, + ) + + stream = StringIO() + + sp.report_scaling_profiles(stream=stream) + + expected = """ +============================================================================ +Scaling Profile Report +---------------------------------------------------------------------------- +Scaling Method || User Scaling || Perfect Scaling +Unscaled || 5.703E+17 | Failed 57 || +Vars Only || 9.245E+16 | Failed 82 || 6.577E+14 | Solved 9 +Harmonic || 3.736E+18 | Failed 39 || 2.839E+12 | Solved 17 +Inverse Sum || 9.317E+15 | Failed 200 || 1.502E+06 | Solved 6 +Inverse Root Sum Squares || 1.155E+16 | Failed 200 || 9.600E+05 | Solved 6 +Inverse Maximum || 1.094E+16 | Failed 200 || 7.846E+05 | Solved 6 +Inverse Minimum || 7.346E+18 | Failed 49 || 5.601E+12 | Solved 16 +Nominal L1 Norm || 1.189E+16 | Failed 61 || 2.060E+06 | Solved 4 +Nominal L2 Norm || 1.188E+16 | Failed 53 || 3.074E+06 | Solved 4 +Actual L1 Norm || 1.461E+09 | Failed 29 || 2.987E+03 | Solved 6 +Actual L2 Norm || 6.613E+08 | Failed 29 || 2.511E+03 | Solved 6 +============================================================================ +""" + + assert stream.getvalue() == expected diff --git a/idaes/core/scaling/tests/test_util.py b/idaes/core/scaling/tests/test_util.py new file mode 100644 index 0000000000..36e2bf3cb5 --- /dev/null +++ b/idaes/core/scaling/tests/test_util.py @@ -0,0 +1,1351 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +Tests for scaling utility functions. + +Author: Andrew Lee +""" +from io import StringIO +import os +import pytest +import re + +from pyomo.environ import Block, Constraint, ConcreteModel, Set, Suffix, Var +from pyomo.common.fileutils import this_file_dir +from pyomo.common.tempfiles import TempfileManager + +from idaes.core.scaling.util import ( + get_scaling_suffix, + get_scaling_factor, + set_scaling_factor, + del_scaling_factor, + _suffix_to_dict, + _suffix_from_dict, + _collect_block_suffixes, + _set_block_suffixes_from_dict, + scaling_factors_to_dict, + scaling_factors_from_dict, + scaling_factors_to_json_file, + scaling_factors_from_json_file, + report_scaling_factors, +) +import idaes.logger as idaeslog + +currdir = this_file_dir() + + +class TestGetScalingSuffix: + @pytest.mark.unit + def test_get_scaling_suffix_block_new(self, caplog): + caplog.set_level( + idaeslog.DEBUG, + logger="idaes", + ) + + m = ConcreteModel() + sfx = get_scaling_suffix(m) + + assert "Created new scaling suffix for unknown" in caplog.text + + assert isinstance(m.scaling_factor, Suffix) + assert sfx is m.scaling_factor + + @pytest.mark.unit + def test_get_scaling_suffix_indexed_component_new(self, caplog): + caplog.set_level( + idaeslog.DEBUG, + logger="idaes", + ) + + m = ConcreteModel() + m.v = Var([1, 2, 3, 4]) + sfx = get_scaling_suffix(m.v[1]) + + assert "Created new scaling suffix for unknown" in caplog.text + + assert isinstance(m.scaling_factor, Suffix) + assert sfx is m.scaling_factor + + @pytest.mark.unit + def test_get_scaling_suffix_indexed_block(self): + m = ConcreteModel() + m.b = Block([1, 2, 3, 4]) + + with pytest.raises( + TypeError, + match="IndexedBlocks cannot have scaling factors attached to them. " + "Please assign scaling factors to the elements of the IndexedBlock.", + ): + get_scaling_suffix(m.b) + + @pytest.mark.unit + def test_get_scaling_suffix_component_new(self, caplog): + caplog.set_level( + idaeslog.DEBUG, + logger="idaes", + ) + + m = ConcreteModel() + m.v = Var() + sfx = get_scaling_suffix(m.v) + + assert "Created new scaling suffix for unknown" in caplog.text + + assert isinstance(m.scaling_factor, Suffix) + assert sfx is m.scaling_factor + + @pytest.mark.unit + def test_get_scaling_suffix_block_existing(self, caplog): + m = ConcreteModel() + m.scaling_factor = Suffix(direction=Suffix.EXPORT) + sfx = get_scaling_suffix(m) + + assert "Created new scaling suffix for unknown" not in caplog.text + + assert isinstance(m.scaling_factor, Suffix) + assert sfx is m.scaling_factor + + @pytest.mark.unit + def test_get_scaling_suffix_component_existing(self, caplog): + caplog.set_level( + idaeslog.DEBUG, + logger="idaes", + ) + + m = ConcreteModel() + m.scaling_factor = Suffix(direction=Suffix.EXPORT) + m.v = Var() + sfx = get_scaling_suffix(m.v) + + assert "Created new scaling suffix for unknown" not in caplog.text + + assert isinstance(m.scaling_factor, Suffix) + assert sfx is m.scaling_factor + + +class TestSuffixToFromDict: + @pytest.fixture + def model(self): + m = ConcreteModel() + m.s = Set(initialize=[1, 2, 3, 4]) + + m.v = Var(m.s) + + @m.Constraint(m.s) + def c(b, i): + return b.v[i] == i + + m.b = Block(m.s) + + for bd in m.b.values(): + bd.v2 = Var() + + bd.scaling_factor = Suffix(direction=Suffix.EXPORT) + bd.scaling_factor[bd.v2] = 10 + + m.scaling_factor = Suffix(direction=Suffix.EXPORT) + for i in m.s: + m.scaling_factor[m.v[i]] = 5 * i + m.scaling_factor[m.c[i]] = 5**i + + return m + + @pytest.mark.unit + def test_suffix_to_dict(self, model): + sdict = _suffix_to_dict(model.scaling_factor) + + assert sdict == { + "v[1]": 5, + "v[2]": 10, + "v[3]": 15, + "v[4]": 20, + "c[1]": 5, + "c[2]": 25, + "c[3]": 125, + "c[4]": 625, + } + + @pytest.mark.unit + def test_suffix_from_dict(self, model): + sdict = { + "v[1]": 5, + "v[2]": 10, + "v[3]": 15, + "v[4]": 20, + "c[1]": 5, + "c[2]": 25, + "c[3]": 125, + "c[4]": 625, + } + + _suffix_from_dict(model.scaling_factor, sdict, overwrite=True) + + assert model.scaling_factor[model.v[1]] == 5 + assert model.scaling_factor[model.v[2]] == 10 + assert model.scaling_factor[model.v[3]] == 15 + assert model.scaling_factor[model.v[4]] == 20 + + assert model.scaling_factor[model.c[1]] == 5 + assert model.scaling_factor[model.c[2]] == 25 + assert model.scaling_factor[model.c[3]] == 125 + assert model.scaling_factor[model.c[4]] == 625 + + assert len(model.scaling_factor) == 8 + + @pytest.mark.unit + def test_suffix_from_dict_invalid_component_name(self, model): + sdict = { + "v[1]": 5, + "v[2]": 10, + "v[3]": 15, + "v[4]": 20, + "c[1]": 5, + "c[2]": 25, + "c[3]": 125, + "c[4]": 625, + "foo": 7, + } + + with pytest.raises( + ValueError, + match=re.escape("Could not find component foo on block unknown."), + ): + _suffix_from_dict(model.scaling_factor, sdict, overwrite=True) + + # If we set verify_name=False, it should proceed + _suffix_from_dict( + model.scaling_factor, sdict, overwrite=True, verify_names=False + ) + + assert model.scaling_factor[model.v[1]] == 5 + assert model.scaling_factor[model.v[2]] == 10 + assert model.scaling_factor[model.v[3]] == 15 + assert model.scaling_factor[model.v[4]] == 20 + + assert model.scaling_factor[model.c[1]] == 5 + assert model.scaling_factor[model.c[2]] == 25 + assert model.scaling_factor[model.c[3]] == 125 + assert model.scaling_factor[model.c[4]] == 625 + + assert len(model.scaling_factor) == 8 + + @pytest.mark.unit + def test_collect_block_suffixes_single(self): + m = ConcreteModel() + m.s = Set(initialize=[1, 2, 3, 4]) + + m.v = Var(m.s) + + @m.Constraint(m.s) + def c(b, i): + return b.v[i] == i + + m.scaling_factor = Suffix(direction=Suffix.EXPORT) + for i in m.s: + m.scaling_factor[m.v[i]] = 5 * i + m.scaling_factor[m.c[i]] = 5**i + + sdict = _collect_block_suffixes(m) + + assert sdict == { + "v[1]": 5, + "v[2]": 10, + "v[3]": 15, + "v[4]": 20, + "c[1]": 5, + "c[2]": 25, + "c[3]": 125, + "c[4]": 625, + "subblock_suffixes": {}, + } + + @pytest.mark.unit + def test_collect_block_suffixes_nested(self, model): + sdict = _collect_block_suffixes(model) + + assert sdict == { + "v[1]": 5, + "v[2]": 10, + "v[3]": 15, + "v[4]": 20, + "c[1]": 5, + "c[2]": 25, + "c[3]": 125, + "c[4]": 625, + "subblock_suffixes": { + "b[1]": { + "v2": 10, + "subblock_suffixes": {}, + }, + "b[2]": { + "v2": 10, + "subblock_suffixes": {}, + }, + "b[3]": { + "v2": 10, + "subblock_suffixes": {}, + }, + "b[4]": { + "v2": 10, + "subblock_suffixes": {}, + }, + }, + } + + @pytest.mark.unit + def test_collect_block_suffixes_nested_descend_false(self, model): + sdict = _collect_block_suffixes(model, descend_into=False) + + assert sdict == { + "v[1]": 5, + "v[2]": 10, + "v[3]": 15, + "v[4]": 20, + "c[1]": 5, + "c[2]": 25, + "c[3]": 125, + "c[4]": 625, + } + + @pytest.mark.unit + def test_set_block_suffixes_from_dict(self): + m = ConcreteModel() + m.s = Set(initialize=[1, 2, 3, 4]) + + m.v = Var(m.s) + + @m.Constraint(m.s) + def c(b, i): + return b.v[i] == i + + m.b = Block(m.s) + + for bd in m.b.values(): + bd.v2 = Var() + + # Set suffix values to retrieve + # Only set values for some subblocks to make sure behaviour is correct + sdict = { + "v[1]": 5, + "v[2]": 10, + "v[3]": 15, + "v[4]": 20, + "c[1]": 5, + "c[2]": 25, + "c[3]": 125, + "c[4]": 625, + "subblock_suffixes": { + "b[1]": { + "v2": 10, + }, + "b[2]": { + "v2": 20, + }, + }, + } + + _set_block_suffixes_from_dict(m, sdict) + + assert m.scaling_factor[m.v[1]] == 5 + assert m.scaling_factor[m.v[2]] == 10 + assert m.scaling_factor[m.v[3]] == 15 + assert m.scaling_factor[m.v[4]] == 20 + + assert m.scaling_factor[m.c[1]] == 5 + assert m.scaling_factor[m.c[2]] == 25 + assert m.scaling_factor[m.c[3]] == 125 + assert m.scaling_factor[m.c[4]] == 625 + + assert len(m.scaling_factor) == 8 + + assert m.b[1].scaling_factor[m.b[1].v2] == 10 + assert len(m.b[1].scaling_factor) == 1 + + assert m.b[2].scaling_factor[m.b[2].v2] == 20 + assert len(m.b[2].scaling_factor) == 1 + + assert not hasattr(m.b[3], "scaling_factor") + assert not hasattr(m.b[4], "scaling_factor") + + @pytest.mark.unit + def test_set_block_suffixes_from_dict_overwrite_false(self): + m = ConcreteModel() + m.s = Set(initialize=[1, 2, 3, 4]) + + m.v = Var(m.s) + + @m.Constraint(m.s) + def c(b, i): + return b.v[i] == i + + m.b = Block(m.s) + + for bd in m.b.values(): + bd.v2 = Var() + + # Set some existing scaling factors + bd.scaling_factor = Suffix(direction=Suffix.EXPORT) + bd.scaling_factor[bd.v2] = 100 + + # Set suffix values to retrieve + # Only set values for some subblocks to make sure behaviour is correct + sdict = { + "v[1]": 5, + "v[2]": 10, + "v[3]": 15, + "v[4]": 20, + "c[1]": 5, + "c[2]": 25, + "c[3]": 125, + "c[4]": 625, + "subblock_suffixes": { + "b[1]": { + "v2": 10, + }, + "b[2]": { + "v2": 20, + }, + }, + } + + _set_block_suffixes_from_dict(m, sdict, overwrite=False) + + assert m.scaling_factor[m.v[1]] == 5 + assert m.scaling_factor[m.v[2]] == 10 + assert m.scaling_factor[m.v[3]] == 15 + assert m.scaling_factor[m.v[4]] == 20 + + assert m.scaling_factor[m.c[1]] == 5 + assert m.scaling_factor[m.c[2]] == 25 + assert m.scaling_factor[m.c[3]] == 125 + assert m.scaling_factor[m.c[4]] == 625 + + assert len(m.scaling_factor) == 8 + + for i in [1, 2, 3, 4]: + assert m.b[i].scaling_factor[m.b[i].v2] == 100 + assert len(m.b[i].scaling_factor) == 1 + + # Check that we did not mutate the original dict + assert sdict == { + "v[1]": 5, + "v[2]": 10, + "v[3]": 15, + "v[4]": 20, + "c[1]": 5, + "c[2]": 25, + "c[3]": 125, + "c[4]": 625, + "subblock_suffixes": { + "b[1]": { + "v2": 10, + }, + "b[2]": { + "v2": 20, + }, + }, + } + + @pytest.mark.unit + def test_set_block_suffixes_from_dict_verify_names(self): + m = ConcreteModel() + m.s = Set(initialize=[1, 2, 3, 4]) + + m.v = Var(m.s) + + @m.Constraint(m.s) + def c(b, i): + return b.v[i] == i + + m.b = Block(m.s) + + for bd in m.b.values(): + bd.v2 = Var() + + # Set suffix values to retrieve + # Only set values for some subblocks to make sure behaviour is correct + sdict = { + "v[1]": 5, + "v[2]": 10, + "v[3]": 15, + "v[4]": 20, + "c[1]": 5, + "c[2]": 25, + "c[3]": 125, + "c[4]": 625, + "subblock_suffixes": { + "b[1]": { + "v2": 10, + }, + "foo": { + "v2": 20, + }, + }, + } + + with pytest.raises( + AttributeError, + match="Block unknown does not have a subblock named foo.", + ): + _set_block_suffixes_from_dict(m, sdict, verify_names=True) + + @pytest.mark.unit + def test_scaling_factors_to_dict_suffix(self, model): + sdict = scaling_factors_to_dict(model.scaling_factor) + + assert sdict == { + "v[1]": 5, + "v[2]": 10, + "v[3]": 15, + "v[4]": 20, + "c[1]": 5, + "c[2]": 25, + "c[3]": 125, + "c[4]": 625, + "block_name": "unknown", + } + + @pytest.mark.unit + def test_scaling_factors_to_dict_blockdata_descend_false(self, model): + sdict = scaling_factors_to_dict(model, descend_into=False) + + assert sdict == { + "v[1]": 5, + "v[2]": 10, + "v[3]": 15, + "v[4]": 20, + "c[1]": 5, + "c[2]": 25, + "c[3]": 125, + "c[4]": 625, + "block_name": "unknown", + } + + @pytest.mark.unit + def test_scaling_factors_to_dict_blockdata_descend_true(self, model): + sdict = scaling_factors_to_dict(model, descend_into=True) + + assert sdict == { + "v[1]": 5, + "v[2]": 10, + "v[3]": 15, + "v[4]": 20, + "c[1]": 5, + "c[2]": 25, + "c[3]": 125, + "c[4]": 625, + "subblock_suffixes": { + "b[1]": { + "v2": 10, + "subblock_suffixes": {}, + }, + "b[2]": { + "v2": 10, + "subblock_suffixes": {}, + }, + "b[3]": { + "v2": 10, + "subblock_suffixes": {}, + }, + "b[4]": { + "v2": 10, + "subblock_suffixes": {}, + }, + }, + "block_name": "unknown", + } + + @pytest.mark.unit + def test_scaling_factors_to_dict_indexed_block(self, model): + sdict = scaling_factors_to_dict(model.b, descend_into=True) + + assert sdict == { + "block_datas": { + "b[1]": { + "v2": 10, + "subblock_suffixes": {}, + }, + "b[2]": { + "v2": 10, + "subblock_suffixes": {}, + }, + "b[3]": { + "v2": 10, + "subblock_suffixes": {}, + }, + "b[4]": { + "v2": 10, + "subblock_suffixes": {}, + }, + }, + "block_name": "b", + } + + @pytest.mark.unit + def test_scaling_factors_from_dict_suffix(self, model): + # Partial dict of scaling factors to ensure we only touch things in the dict + sdict = { + "v[1]": 50, + "v[2]": 100, + "c[1]": 50, + "c[2]": 250, + "block_name": "unknown", + } + + scaling_factors_from_dict( + model.scaling_factor, sdict, overwrite=True, verify_names=True + ) + + assert model.scaling_factor[model.v[1]] == 50 + assert model.scaling_factor[model.v[2]] == 100 + assert model.scaling_factor[model.v[3]] == 15 + assert model.scaling_factor[model.v[4]] == 20 + assert model.scaling_factor[model.c[1]] == 50 + assert model.scaling_factor[model.c[2]] == 250 + assert model.scaling_factor[model.c[3]] == 125 + assert model.scaling_factor[model.c[4]] == 625 + assert len(model.scaling_factor) == 8 + + for bd in model.b.values(): + assert bd.scaling_factor[bd.v2] == 10 + assert len(bd.scaling_factor) == 1 + + # Ensure we have not mutated original dict + assert sdict == { + "v[1]": 50, + "v[2]": 100, + "c[1]": 50, + "c[2]": 250, + "block_name": "unknown", + } + + @pytest.mark.unit + def test_scaling_factors_from_dict_suffix_overwrite_false(self, model): + # Partial dict of scaling factors to ensure we only touch things in the dict + sdict = { + "v[1]": 50, + "v[2]": 100, + "c[1]": 50, + "c[2]": 250, + "block_name": "unknown", + } + + scaling_factors_from_dict( + model.scaling_factor, sdict, overwrite=False, verify_names=True + ) + + assert model.scaling_factor[model.v[1]] == 5 + assert model.scaling_factor[model.v[2]] == 10 + assert model.scaling_factor[model.v[3]] == 15 + assert model.scaling_factor[model.v[4]] == 20 + assert model.scaling_factor[model.c[1]] == 5 + assert model.scaling_factor[model.c[2]] == 25 + assert model.scaling_factor[model.c[3]] == 125 + assert model.scaling_factor[model.c[4]] == 625 + assert len(model.scaling_factor) == 8 + + for bd in model.b.values(): + assert bd.scaling_factor[bd.v2] == 10 + assert len(bd.scaling_factor) == 1 + + # Ensure we have not mutated original dict + assert sdict == { + "v[1]": 50, + "v[2]": 100, + "c[1]": 50, + "c[2]": 250, + "block_name": "unknown", + } + + @pytest.mark.unit + def test_scaling_factors_from_dict_suffix_verify_fail(self, model): + # Partial dict of scaling factors to ensure we only touch things in the dict + sdict = { + "v[1]": 50, + "v[2]": 100, + "c[1]": 50, + "c[2]": 250, + "block_name": "foo", + } + + with pytest.raises( + ValueError, + match=re.escape( + "Name of parent block (unknown) does not match that recorded in json_dict (foo)" + ), + ): + scaling_factors_from_dict( + model.scaling_factor, sdict, overwrite=True, verify_names=True + ) + + # Ensure we have not mutated original dict + assert sdict == { + "v[1]": 50, + "v[2]": 100, + "c[1]": 50, + "c[2]": 250, + "block_name": "foo", + } + + @pytest.mark.unit + def test_scaling_factors_from_dict_block_data(self, model): + # Partial dict of scaling factors to ensure we only touch things in the dict + sdict = { + "v[1]": 50, + "v[2]": 100, + "c[1]": 50, + "c[2]": 250, + "block_name": "unknown", + } + + scaling_factors_from_dict(model, sdict, overwrite=True, verify_names=True) + + assert model.scaling_factor[model.v[1]] == 50 + assert model.scaling_factor[model.v[2]] == 100 + assert model.scaling_factor[model.v[3]] == 15 + assert model.scaling_factor[model.v[4]] == 20 + assert model.scaling_factor[model.c[1]] == 50 + assert model.scaling_factor[model.c[2]] == 250 + assert model.scaling_factor[model.c[3]] == 125 + assert model.scaling_factor[model.c[4]] == 625 + assert len(model.scaling_factor) == 8 + + for bd in model.b.values(): + assert bd.scaling_factor[bd.v2] == 10 + assert len(bd.scaling_factor) == 1 + + # Ensure we have not mutated original dict + assert sdict == { + "v[1]": 50, + "v[2]": 100, + "c[1]": 50, + "c[2]": 250, + "block_name": "unknown", + } + + @pytest.mark.unit + def test_scaling_factors_from_dict_block_data_overwrite_false(self, model): + # Partial dict of scaling factors to ensure we only touch things in the dict + sdict = { + "v[1]": 50, + "v[2]": 100, + "c[1]": 50, + "c[2]": 250, + "block_name": "unknown", + } + + scaling_factors_from_dict(model, sdict, overwrite=False, verify_names=True) + + assert model.scaling_factor[model.v[1]] == 5 + assert model.scaling_factor[model.v[2]] == 10 + assert model.scaling_factor[model.v[3]] == 15 + assert model.scaling_factor[model.v[4]] == 20 + assert model.scaling_factor[model.c[1]] == 5 + assert model.scaling_factor[model.c[2]] == 25 + assert model.scaling_factor[model.c[3]] == 125 + assert model.scaling_factor[model.c[4]] == 625 + assert len(model.scaling_factor) == 8 + + for bd in model.b.values(): + assert bd.scaling_factor[bd.v2] == 10 + assert len(bd.scaling_factor) == 1 + + # Ensure we have not mutated original dict + assert sdict == { + "v[1]": 50, + "v[2]": 100, + "c[1]": 50, + "c[2]": 250, + "block_name": "unknown", + } + + @pytest.mark.unit + def test_scaling_factors_from_dict_block_data_verify_fail(self, model): + # Partial dict of scaling factors to ensure we only touch things in the dict + sdict = { + "v[1]": 50, + "v[2]": 100, + "c[1]": 50, + "c[2]": 250, + "block_name": "foo", + } + + with pytest.raises( + ValueError, + match=re.escape( + "Block name (unknown) does not match that recorded in json_dict (foo)" + ), + ): + scaling_factors_from_dict(model, sdict, overwrite=True, verify_names=True) + + # Ensure we have not mutated original dict + assert sdict == { + "v[1]": 50, + "v[2]": 100, + "c[1]": 50, + "c[2]": 250, + "block_name": "foo", + } + + @pytest.mark.unit + def test_scaling_factors_from_dict_indexed_block(self, model): + # Partial dict of scaling factors to ensure we only touch things in the dict + sdict = { + "block_datas": { + "b[1]": { + "v2": 42, + }, + "b[2]": { + "v2": 42, + }, + }, + "block_name": "b", + } + + scaling_factors_from_dict(model.b, sdict, overwrite=True, verify_names=True) + + assert model.scaling_factor[model.v[1]] == 5 + assert model.scaling_factor[model.v[2]] == 10 + assert model.scaling_factor[model.v[3]] == 15 + assert model.scaling_factor[model.v[4]] == 20 + assert model.scaling_factor[model.c[1]] == 5 + assert model.scaling_factor[model.c[2]] == 25 + assert model.scaling_factor[model.c[3]] == 125 + assert model.scaling_factor[model.c[4]] == 625 + assert len(model.scaling_factor) == 8 + + for k in [1, 2]: + assert model.b[k].scaling_factor[model.b[k].v2] == 42 + assert len(model.b[k].scaling_factor) == 1 + for k in [3, 4]: + assert model.b[k].scaling_factor[model.b[k].v2] == 10 + assert len(model.b[k].scaling_factor) == 1 + + # Ensure we have not mutated original dict + assert sdict == { + "block_datas": { + "b[1]": { + "v2": 42, + }, + "b[2]": { + "v2": 42, + }, + }, + "block_name": "b", + } + + @pytest.mark.unit + def test_scaling_factors_from_dict_indexed_block_overwrite_false(self, model): + # Partial dict of scaling factors to ensure we only touch things in the dict + sdict = { + "block_datas": { + "b[1]": { + "v2": 42, + }, + "b[2]": { + "v2": 42, + }, + }, + "block_name": "b", + } + + scaling_factors_from_dict(model.b, sdict, overwrite=False, verify_names=True) + + assert model.scaling_factor[model.v[1]] == 5 + assert model.scaling_factor[model.v[2]] == 10 + assert model.scaling_factor[model.v[3]] == 15 + assert model.scaling_factor[model.v[4]] == 20 + assert model.scaling_factor[model.c[1]] == 5 + assert model.scaling_factor[model.c[2]] == 25 + assert model.scaling_factor[model.c[3]] == 125 + assert model.scaling_factor[model.c[4]] == 625 + assert len(model.scaling_factor) == 8 + + for k in [1, 2, 3, 4]: + assert model.b[k].scaling_factor[model.b[k].v2] == 10 + assert len(model.b[k].scaling_factor) == 1 + + # Ensure we have not mutated original dict + assert sdict == { + "block_datas": { + "b[1]": { + "v2": 42, + }, + "b[2]": { + "v2": 42, + }, + }, + "block_name": "b", + } + + @pytest.mark.unit + def test_scaling_factors_from_dict_verify_names_failure(self, model): + # Partial dict of scaling factors to ensure we only touch things in the dict + sdict = { + "block_datas": { + "b[1]": { + "v2": 42, + }, + "b[2]": { + "v2": 42, + }, + }, + "block_name": "foo", + } + + with pytest.raises( + ValueError, + match=re.escape( + "Block name (b) does not match that recorded in json_dict (foo)" + ), + ): + scaling_factors_from_dict(model.b, sdict, overwrite=True, verify_names=True) + + # Ensure we have not mutated original dict + assert sdict == { + "block_datas": { + "b[1]": { + "v2": 42, + }, + "b[2]": { + "v2": 42, + }, + }, + "block_name": "foo", + } + + @pytest.mark.unit + def test_scaling_factors_from_dict_invalid_component(self, model): + # Partial dict of scaling factors to ensure we only touch things in the dict + sdict = { + "block_datas": { + "b[1]": { + "v2": 42, + }, + "b[2]": { + "v2": 42, + }, + }, + "block_name": "foo", + } + + with pytest.raises( + TypeError, match=re.escape("v is not an instance of a Block of Suffix.") + ): + scaling_factors_from_dict(model.v, sdict, overwrite=True, verify_names=True) + + @pytest.mark.unit + def test_scaling_factors_to_json_file(self, model): + temp_context = TempfileManager.new_context() + tmpfile = temp_context.create_tempfile(suffix=".json") + + scaling_factors_to_json_file(model, tmpfile) + + with open(tmpfile, "r") as f: + lines = f.read() + f.close() + + print(lines) + + expected = """{ + "v[1]": 5, + "c[1]": 5, + "v[2]": 10, + "c[2]": 25, + "v[3]": 15, + "c[3]": 125, + "v[4]": 20, + "c[4]": 625, + "subblock_suffixes": { + "b[1]": { + "v2": 10, + "subblock_suffixes": {} + }, + "b[2]": { + "v2": 10, + "subblock_suffixes": {} + }, + "b[3]": { + "v2": 10, + "subblock_suffixes": {} + }, + "b[4]": { + "v2": 10, + "subblock_suffixes": {} + } + }, + "block_name": "unknown" +}""" + + assert lines == expected + + # Check for clean up + temp_context.release(remove=True) + assert not os.path.exists(tmpfile) + + @pytest.mark.unit + def test_scaling_factors_from_json_file(self, model): + fname = os.path.join(currdir, "load_scaling_factors.json") + + scaling_factors_from_json_file(model, fname, overwrite=True) + + assert model.scaling_factor[model.v[1]] == 50 + assert model.scaling_factor[model.v[2]] == 100 + assert model.scaling_factor[model.v[3]] == 150 + assert model.scaling_factor[model.v[4]] == 200 + assert model.scaling_factor[model.c[1]] == 50 + assert model.scaling_factor[model.c[2]] == 250 + assert model.scaling_factor[model.c[3]] == 1250 + assert model.scaling_factor[model.c[4]] == 6250 + assert len(model.scaling_factor) == 8 + + for k in [1, 2, 3, 4]: + assert model.b[k].scaling_factor[model.b[k].v2] == 100 + assert len(model.b[k].scaling_factor) == 1 + + +class TestGetScalingFactor: + @pytest.mark.unit + def test_get_scaling_factor_block(self): + m = ConcreteModel() + + with pytest.raises(TypeError, match="Blocks cannot have scaling factors."): + get_scaling_factor(m) + + @pytest.mark.unit + def test_get_scaling_factor(self): + m = ConcreteModel() + m.v = Var() + + m.scaling_factor = Suffix(direction=Suffix.EXPORT) + m.scaling_factor[m.v] = 10 + + assert get_scaling_factor(m.v) == 10 + + @pytest.mark.unit + def test_get_scaling_factor_none(self): + m = ConcreteModel() + m.v = Var() + + m.scaling_factor = Suffix(direction=Suffix.EXPORT) + + assert get_scaling_factor(m.v) is None + + @pytest.mark.unit + def test_get_scaling_factor_no_suffix(self): + m = ConcreteModel() + m.v = Var() + + assert get_scaling_factor(m.v) is None + + +class TestSetScalingFactor: + @pytest.mark.unit + def test_set_scaling_factor(self): + m = ConcreteModel() + m.scaling_factor = Suffix(direction=Suffix.EXPORT) + + m.v = Var() + + set_scaling_factor(m.v, 42) + + assert m.scaling_factor[m.v] == 42 + + @pytest.mark.unit + def test_set_scaling_factor_new_suffix(self, caplog): + caplog.set_level( + idaeslog.DEBUG, + logger="idaes", + ) + + m = ConcreteModel() + m.v = Var() + + set_scaling_factor(m.v, 42) + + assert m.scaling_factor[m.v] == 42.0 + + assert "Created new scaling suffix for unknown" in caplog.text + + @pytest.mark.unit + def test_set_scaling_factor_not_float(self): + m = ConcreteModel() + m.v = Var() + + with pytest.raises( + ValueError, match="could not convert string to float: 'foo'" + ): + set_scaling_factor(m.v, "foo") + + @pytest.mark.unit + def test_set_scaling_factor_negative(self): + m = ConcreteModel() + m.v = Var() + + with pytest.raises( + ValueError, + match=re.escape( + "scaling factor for v is negative (-42.0). " + "Scaling factors must be strictly positive." + ), + ): + set_scaling_factor(m.v, -42) + + @pytest.mark.unit + def test_set_scaling_factor_zero(self): + m = ConcreteModel() + m.v = Var() + + with pytest.raises( + ValueError, + match=re.escape( + "scaling factor for v is zero. " + "Scaling factors must be strictly positive." + ), + ): + set_scaling_factor(m.v, 0) + + @pytest.mark.unit + def test_set_scaling_factor_overwrite_false(self, caplog): + caplog.set_level( + idaeslog.DEBUG, + logger="idaes", + ) + + m = ConcreteModel() + m.v = Var() + + m.scaling_factor = Suffix(direction=Suffix.EXPORT) + m.scaling_factor[m.v] = 10 + + set_scaling_factor(m.v, 42, overwrite=False) + + assert ( + "Existing scaling factor for v found and overwrite=False. " + "Scaling factor unchanged." in caplog.text + ) + assert m.scaling_factor[m.v] == 10 + + +class TestDelScalingFactor: + @pytest.mark.unit + def test_del_scaling_factor(self): + m = ConcreteModel() + m.v = Var() + + m.scaling_factor = Suffix(direction=Suffix.EXPORT) + m.scaling_factor[m.v] = 10 + + del_scaling_factor(m.v) + + assert len(m.scaling_factor) == 0 + + @pytest.mark.unit + def test_del_scaling_factor_not_present(self, caplog): + caplog.set_level( + idaeslog.DEBUG, + logger="idaes", + ) + + m = ConcreteModel() + m.v = Var() + + m.scaling_factor = Suffix(direction=Suffix.EXPORT) + + del_scaling_factor(m.v) + + assert len(m.scaling_factor) == 0 + + @pytest.mark.unit + def test_del_scaling_factor_delete_empty(self, caplog): + caplog.set_level( + idaeslog.DEBUG, + logger="idaes", + ) + + m = ConcreteModel() + m.v = Var() + + m.scaling_factor = Suffix(direction=Suffix.EXPORT) + m.scaling_factor[m.v] = 10 + + del_scaling_factor(m.v, delete_empty_suffix=True) + + assert not hasattr(m, "scaling_factor") + + assert "Deleting empty scaling suffix from unknown" in caplog.text + + +class TestReportScalingFactors: + @pytest.fixture + def model(self): + m = ConcreteModel() + m.s = Set(initialize=[1, 2, 3, 4]) + + m.v = Var(m.s) + + @m.Constraint(m.s) + def c(b, i): + return b.v[i] == i + + m.b = Block(m.s) + + for i, bd in m.b.items(): + bd.v2 = Var() + + # Need to check all possible behaviours + # Set values for half the variables (indexes 1 and 3) + for i in [1, 3]: + m.v[i].set_value(42) + m.b[i].v2.set_value(42) + + # Set scaling factors for half the components (indexed 1 and 2) + m.scaling_factor = Suffix(direction=Suffix.EXPORT) + for i in [1, 2]: + m.scaling_factor[m.v[i]] = 5 * i + m.scaling_factor[m.c[i]] = 5**i + m.b[i].scaling_factor = Suffix(direction=Suffix.EXPORT) + m.b[i].scaling_factor[m.b[i].v2] = 10 + + return m + + @pytest.mark.unit + def test_report_scaling_factors_all(self, model): + stream = StringIO() + + report_scaling_factors(model, descend_into=True, stream=stream) + + expected = """Scaling Factors for unknown + +Variable Scaling Factor Value Scaled Value +v[1] 5.000E+00 4.200E+01 2.100E+02 +v[2] 1.000E+01 None None +v[3] None 4.200E+01 4.200E+01 +v[4] None None None +b[1].v2 1.000E+01 4.200E+01 4.200E+02 +b[2].v2 1.000E+01 None None +b[3].v2 None 4.200E+01 4.200E+01 +b[4].v2 None None None + +Constraint Scaling Factor +c[1] 5.000E+00 +c[2] 2.500E+01 +c[3] None +c[4] None +""" + + print(stream.getvalue()) + assert stream.getvalue() == expected + + @pytest.mark.unit + def test_report_scaling_factors_descend_false(self, model): + stream = StringIO() + + report_scaling_factors(model, descend_into=False, stream=stream) + + expected = """Scaling Factors for unknown + +Variable Scaling Factor Value Scaled Value +v[1] 5.000E+00 4.200E+01 2.100E+02 +v[2] 1.000E+01 None None +v[3] None 4.200E+01 4.200E+01 +v[4] None None None + +Constraint Scaling Factor +c[1] 5.000E+00 +c[2] 2.500E+01 +c[3] None +c[4] None +""" + + assert stream.getvalue() == expected + + @pytest.mark.unit + def test_report_scaling_factors_vars_only(self, model): + stream = StringIO() + + report_scaling_factors(model, descend_into=True, ctype=Var, stream=stream) + + expected = """Scaling Factors for unknown + +Variable Scaling Factor Value Scaled Value +v[1] 5.000E+00 4.200E+01 2.100E+02 +v[2] 1.000E+01 None None +v[3] None 4.200E+01 4.200E+01 +v[4] None None None +b[1].v2 1.000E+01 4.200E+01 4.200E+02 +b[2].v2 1.000E+01 None None +b[3].v2 None 4.200E+01 4.200E+01 +b[4].v2 None None None +""" + + assert stream.getvalue() == expected + + @pytest.mark.unit + def test_report_scaling_factors_constraints_only(self, model): + stream = StringIO() + + report_scaling_factors( + model, descend_into=True, stream=stream, ctype=Constraint + ) + + expected = """Scaling Factors for unknown + +Constraint Scaling Factor +c[1] 5.000E+00 +c[2] 2.500E+01 +c[3] None +c[4] None +""" + + assert stream.getvalue() == expected + + @pytest.mark.unit + def test_report_scaling_factors_indexed_block(self, model): + stream = StringIO() + + report_scaling_factors(model.b, descend_into=True, stream=stream) + + expected = """Scaling Factors for b + +Variable Scaling Factor Value Scaled Value +b[1].v2 1.000E+01 4.200E+01 4.200E+02 +b[2].v2 1.000E+01 None None +b[3].v2 None 4.200E+01 4.200E+01 +b[4].v2 None None None +""" + + assert stream.getvalue() == expected + + @pytest.mark.unit + def test_report_scaling_factors_not_block(self, model): + stream = StringIO() + + with pytest.raises( + TypeError, + match="report_scaling_factors: blk must be an instance of a Pyomo Block.", + ): + report_scaling_factors( + model.v, descend_into=True, stream=stream, ctype=Constraint + ) + + @pytest.mark.unit + def test_report_scaling_factors_invalid_ctype(self, model): + stream = StringIO() + + with pytest.raises( + ValueError, + match="report_scaling_factors only supports None, Var or Constraint for argument ctype: " + "received foo.", + ): + report_scaling_factors(model, descend_into=True, stream=stream, ctype="foo") diff --git a/idaes/core/scaling/util.py b/idaes/core/scaling/util.py new file mode 100644 index 0000000000..da3e4d7e48 --- /dev/null +++ b/idaes/core/scaling/util.py @@ -0,0 +1,812 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +Utility functions for scaling. + +Author: Andrew Lee +""" + +from copy import deepcopy +import math +import sys + +import json + +from pyomo.environ import ( + Binary, + Block, + Boolean, + Constraint, + NegativeIntegers, + NegativeReals, + NonNegativeIntegers, + NonNegativeReals, + NonPositiveIntegers, + NonPositiveReals, + PositiveIntegers, + PositiveReals, + Suffix, + value, + Var, +) +from pyomo.core.base.block import BlockData +from pyomo.core.base.var import VarData +from pyomo.core.base.param import ParamData +from pyomo.core import expr as EXPR +from pyomo.common.numeric_types import native_types +from pyomo.core.base.units_container import _PyomoUnit + +from idaes.core.util.exceptions import BurntToast +import idaes.logger as idaeslog + + +_log = idaeslog.getLogger(__name__) + +TAB = " " * 4 + + +def get_scaling_suffix(component): + """ + Get scaling suffix for component. + + If component is not a Block, gets scaling suffix from parent block. + Creates a new suffix if one is not found. + + Args: + component: component to get suffix for + + Returns: + Pyomo scaling Suffix + + Raises: + TypeError is component is an IndexedBlock + """ + if isinstance(component, BlockData): + blk = component + elif isinstance(component, Block): + raise TypeError( + "IndexedBlocks cannot have scaling factors attached to them. " + "Please assign scaling factors to the elements of the IndexedBlock." + ) + else: + blk = component.parent_block() + + try: + sfx = blk.scaling_factor + except AttributeError: + # No existing suffix, create one + _log.debug(f"Created new scaling suffix for {blk.name}") + sfx = blk.scaling_factor = Suffix(direction=Suffix.EXPORT) + + return sfx + + +def scaling_factors_to_dict(blk_or_suffix, descend_into: bool = True): + """ + Write scaling suffixes to a serializable json dict. + + Component objects are replaced by their local names so they can be + serialized. + + Args: + blk_or_suffix: Pyomo Block or Suffix object to covert to dict + descend_into: for Blocks, whether to descend into any child blocks + + Returns + dict containing scaling factors indexed by component names + + Raises: + TypeError if blk_or_suffix is not an instance of Block or Suffix + + """ + # First, determine what type of component we have + if isinstance(blk_or_suffix, Suffix): + # Suffix + sdict = _suffix_to_dict(blk_or_suffix) + blk = blk_or_suffix.parent_block() + elif isinstance(blk_or_suffix, BlockData): + # Scalar block or element of indexed block + sdict = _collect_block_suffixes(blk_or_suffix, descend_into=descend_into) + blk = blk_or_suffix + elif isinstance(blk_or_suffix, Block): + # Indexed block + blk = blk_or_suffix + sdict = {} + sdict["block_datas"] = {} + for bd in blk_or_suffix.values(): + sdict["block_datas"][bd.name] = _collect_block_suffixes( + bd, descend_into=descend_into + ) + else: + # Not a block or suffix + raise TypeError( + f"{blk_or_suffix.name} is not an instance of a Block of Suffix." + ) + + # Attach block name for future verification + sdict["block_name"] = blk.name + + return sdict + + +def scaling_factors_from_dict( + blk_or_suffix, json_dict: dict, overwrite: bool = False, verify_names: bool = True +): + """ + Set scaling factors based on values in a serializable json dict. + + This method expects components to be referenced by their local names. + + Args: + blk_or_suffix: Pyomo Block or Suffix object to set scaling factors on + json_dict: dict of scaling factors to load into model + overwrite: (bool) whether to overwrite existing scaling factors or not + verify_names: (bool) whether to verify that all names in dict exist on block + + Returns + None + + Raises: + TypeError if blk_or_suffix is not an instance of Block or Suffix + + """ + # First, copy json_dict so we do not mutate original + sdict = deepcopy(json_dict) + # Pop block name for verification + block_name = sdict.pop("block_name") + + # Next, determine what type of component we have + if isinstance(blk_or_suffix, Suffix): + # Suffix + if verify_names and block_name != blk_or_suffix.parent_block().name: + raise ValueError( + f"Name of parent block ({blk_or_suffix.parent_block().name}) does " + f"not match that recorded in json_dict ({block_name})" + ) + _suffix_from_dict( + blk_or_suffix, sdict, overwrite=overwrite, verify_names=verify_names + ) + elif isinstance(blk_or_suffix, BlockData): + # Scalar block or element of indexed block + if verify_names and block_name != blk_or_suffix.name: + raise ValueError( + f"Block name ({blk_or_suffix.name}) does " + f"not match that recorded in json_dict ({block_name})" + ) + _set_block_suffixes_from_dict( + blk_or_suffix, sdict, overwrite=overwrite, verify_names=verify_names + ) + elif isinstance(blk_or_suffix, Block): + # Indexed block + if verify_names and block_name != blk_or_suffix.name: + raise ValueError( + f"Block name ({blk_or_suffix.name}) does " + f"not match that recorded in json_dict ({block_name})" + ) + for bd_name, bd_dict in sdict["block_datas"].items(): + bd = blk_or_suffix.parent_block().find_component(bd_name) + _set_block_suffixes_from_dict( + bd, bd_dict, overwrite=overwrite, verify_names=verify_names + ) + else: + # Not a block or suffix + raise TypeError( + f"{blk_or_suffix.name} is not an instance of a Block of Suffix." + ) + + +def scaling_factors_to_json_file(blk_or_suffix, filename: str): + """ + Serialize scaling factors to file in json format. + + Args: + blk_of_suffix: Block or Suffix to save scaling factors for + filename: name of file to write to as string + + Returns: + None + + Raises: + TypeError if blk_or_suffix is not an instance of Block or Suffix + """ + with open(filename, "w") as fd: + json.dump(scaling_factors_to_dict(blk_or_suffix), fd, indent=3) + + +def scaling_factors_from_json_file( + blk_or_suffix, filename: str, overwrite: bool = False, verify_names: bool = True +): + """ + Load scaling factors from json file. + + Args: + blk_of_suffix: Block or Suffix to load scaling factors for + filename: name of file to load as string + overwrite: (bool) whether to overwrite existing scaling factors or not + verify_names: (bool) whether to verify that all names in dict exist on block + + Returns: + None + + Raises: + TypeError if blk_or_suffix is not an instance of Block or Suffix + """ + with open(filename, "r") as f: + scaling_factors_from_dict( + blk_or_suffix, json.load(f), overwrite=overwrite, verify_names=verify_names + ) + f.close() + + +def _collect_block_suffixes(block_data, descend_into=True): + suffix = get_scaling_suffix(block_data) + sdict = _suffix_to_dict(suffix) + + if descend_into: + sdict["subblock_suffixes"] = {} + for sb in block_data.component_data_objects(Block, descend_into=False): + sdict["subblock_suffixes"][sb.local_name] = _collect_block_suffixes( + sb, descend_into + ) + + return sdict + + +def _set_block_suffixes_from_dict( + block_data, json_dict, verify_names=True, overwrite=False +): + # First, copy dict so we can take it apart + sdict = deepcopy(json_dict) + + # Pop any subblock suffixes + sb_dict = sdict.pop("subblock_suffixes", None) + + # Set local suffix values + suffix = get_scaling_suffix(block_data) + _suffix_from_dict(suffix, sdict, verify_names=verify_names, overwrite=overwrite) + + if sb_dict is not None: + # Get each subblock and apply function recursively + for sb, sb_dict_value in sb_dict.items(): + subblock = block_data.find_component(sb) + + if subblock is not None: + _set_block_suffixes_from_dict( + subblock, + sb_dict_value, + verify_names=verify_names, + overwrite=overwrite, + ) + elif verify_names: + raise AttributeError( + f"Block {block_data.name} does not have a subblock named {sb}." + ) + + +def _suffix_to_dict(suffix): + sdict = {} + + for k, v in suffix.items(): + # Record components by their local name so we can use + # find_Component to retrieve them later + sdict[k.local_name] = v + + return sdict + + +def _suffix_from_dict(suffix, json_dict, verify_names=True, overwrite=False): + parent_block = suffix.parent_block() + + for k, v in json_dict.items(): + # Safety catch in case this gets left in by other functions + if k == "parent_name": + continue + + comp = parent_block.find_component(k) + if comp is not None: + if overwrite or comp not in suffix: + suffix[comp] = v + elif verify_names: + raise ValueError( + f"Could not find component {k} on block {parent_block.name}." + ) + + +def get_scaling_factor(component): + """ + Get scaling factor for component. + + Args: + component: component to get scaling factor for + + Returns: + float scaling factor + + Raises: + TypeError if component is a Block + """ + if isinstance(component, (Block, BlockData)): + raise TypeError("Blocks cannot have scaling factors.") + + try: + sfx = get_scaling_suffix(component) + return sfx[component] + except (AttributeError, KeyError): + # No scaling factor found, return None + return None + + +def set_scaling_factor(component, scaling_factor: float, overwrite: bool = False): + """ + Set scaling factor for component. + + Scaling factors must be positive, non-zero floats. + + Args: + component: component to set scaling factor for + scaling_factor: scaling factor to assign + overwrite: (bool) whether to overwrite existing scaling factor + + Returns: + None + + Raises: + ValueError is scaling_factor is 0 or negative + """ + # Cast to float to catch any garbage inputs + scaling_factor = float(scaling_factor) + + # Check for negative or 0 scaling factors + if scaling_factor < 0: + raise ValueError( + f"scaling factor for {component.name} is negative ({scaling_factor}). " + "Scaling factors must be strictly positive." + ) + elif scaling_factor == 0: + raise ValueError( + f"scaling factor for {component.name} is zero. " + "Scaling factors must be strictly positive." + ) + + # Get suffix and assign scaling factor + sfx = get_scaling_suffix(component) + + if not overwrite and component in sfx: + _log.debug( + f"Existing scaling factor for {component.name} found and overwrite=False. " + "Scaling factor unchanged." + ) + else: + sfx[component] = scaling_factor + + +def del_scaling_factor(component, delete_empty_suffix: bool = False): + """ + Delete scaling factor for component. + + Args: + component: component to delete scaling factor for + delete_empty_suffix: (bool) whether to delete scaling Suffix if it is + empty after deletion. + """ + # Get suffix + parent = component.parent_block() + sfx = get_scaling_suffix(parent) + + # Delete entry for component if it exists + # Pyomo handles case where value does not exist in suffix with a no-op + sfx.clear_value(component) + + if delete_empty_suffix: + # Check if Suffix is empty (i.e. length 0) + if len(sfx) == 0: + # If so, delete suffix from parent block of component + _log.debug(f"Deleting empty scaling suffix from {parent.name}") + parent.del_component(sfx) + + +def report_scaling_factors( + blk: Block, ctype=None, descend_into: bool = False, stream=None +): + """ + Write the scaling factors for all components in a Block to a stream. + + Args: + blk: Block to get scaling factors from. + ctype: None, Var or Constraint. Type of component to show scaling factors for + (if None, shows both Vars and Constraints). + descend_into: whether to show scaling factors for components in sub-blocks. + stream: StringIO object to write results to. If not provided, writes to stdout. + + Raises: + TypeError if blk is not a Pyomo Block. + ValueError is ctype is not None, Var or Constraint. + """ + if stream is None: + stream = sys.stdout + + if ctype not in [None, Var, Constraint]: + raise ValueError( + f"report_scaling_factors only supports None, Var or Constraint for argument ctype: " + f"received {ctype}." + ) + + if not isinstance(blk, (Block, BlockData)): + raise TypeError( + "report_scaling_factors: blk must be an instance of a Pyomo Block." + ) + + stream.write(f"Scaling Factors for {blk.name}\n") + + # We will report Vars and Constraint is separate sections for clarity - iterate separately + if ctype != Constraint: + # Collect Vars + vdict = {} + for blkdata in blk.values(): + for vardata in blkdata.component_data_objects( + Var, descend_into=descend_into + ): + val = vardata.value + sf = get_scaling_factor(vardata) + + if sf is not None: + sfstr = "{:.3E}".format(sf) + else: + sfstr = "None " + + if val is not None: + vstr = "{:.3E}".format(val) + if sf is not None: + sval = "{:.3E}".format(value(vardata * sf)) + else: + sval = vstr + else: + vstr = "None " + sval = "None" + + vdict[vardata.name] = (sfstr, vstr, sval) + + # Write Var section - skip if no Vars + if len(vdict) > 0: + # Get longest var name + header = "Variable" + maxname = len(max(vdict.keys(), key=len)) + if maxname < len(header): + maxname = len(header) + + stream.write( + f"\n{header}{' '*(maxname-len(header))}{TAB}Scaling Factor{TAB}Value{' '*4}{TAB}Scaled Value\n" + ) + + for n, i in vdict.items(): + # Pad name to length + stream.write( + f"{n + ' '*(maxname-len(n))}{TAB}{i[0]}{' '*5}{TAB}{i[1]}{TAB}{i[2]}\n" + ) + + if ctype != Var: + # Collect Constraints + for blkdata in blk.values(): + cdict = {} + for condata in blkdata.component_data_objects( + Constraint, descend_into=descend_into + ): + sf = get_scaling_factor(condata) + + if sf is not None: + sfstr = "{:.3E}".format(sf) + else: + sfstr = "None" + + cdict[condata.name] = sfstr + + # Write Constraint section - skip if no Constraints + if len(cdict) > 0: + # Get longest con name + header = "Constraint" + maxname = len(max(cdict.keys(), key=len)) + if maxname < len(header): + maxname = len(header) + + stream.write( + f"\n{header}{' ' * (maxname - len(header))}{TAB}Scaling Factor\n" + ) + + for n, i in cdict.items(): + # Pad name to length + stream.write(f"{n + ' ' * (maxname - len(n))}{TAB}{i}\n") + + +def get_nominal_value(component): + """ + Get the signed nominal value for a VarData or ParamData component. + + For fixed Vars and Params, the current value of the component will be returned. + + For unfixed Vars, the nominal value is determined using the assigned scaling factor + and the sign determined based on the bounds and domain of the variable (defaulting to + positive). If no scaling factor is set, then the current value will be used if set, + otherwise the absolute nominal value will be equal to 1. + + Args: + component: component to determine nominal value for + + Returns: + signed float with nominal value + + Raises: + TypeError if component is not instance of VarData or ParamData + """ + # Determine if Var or Param + if isinstance(component, VarData): + if component.fixed: + # Nominal value of a fixed Var is its value + return value(component) + + # Get scaling factor for Var + sf = get_scaling_factor(component) + if sf is None: + # No scaling factor - see if Var has a value + if component.value is not None: + # If it has a value, use that as the nominal value + # As we are using the actual value, do not need to determine sign + return value(component) + else: + # Otherwise assign a nominal value of 1 + sf = 1 + + # Try to determine expected sign of node + ub = component.ub + lb = component.lb + domain = component.domain + + # To avoid NoneType errors, assign dummy values in place of None + if ub is None: + # No upper bound, take a positive value + ub = 1000 + if lb is None: + # No lower bound, take a negative value + lb = -1000 + + if lb >= 0 or domain in [ + NonNegativeReals, + PositiveReals, + PositiveIntegers, + NonNegativeIntegers, + Boolean, + Binary, + ]: + # Strictly positive + sign = 1 + elif ub <= 0 or domain in [ + NegativeReals, + NonPositiveReals, + NegativeIntegers, + NonPositiveIntegers, + ]: + # Strictly negative + sign = -1 + else: + # Unbounded, see if there is a current value + # Assume positive until proven otherwise + sign = 1 + if component.value is not None: + val = value(component) + if val < 0: + # Assigned negative value, assume value will remain negative + sign = -1 + + return sign / sf + + elif isinstance(component, ParamData): + # Nominal value of a parameter is always its value + return value(component) + else: + # Not a Var or Param - invalid component type + raise TypeError( + f"get_nominal_value - {component.name} is not an instance of a Var or Param." + ) + + +class NominalValueExtractionVisitor(EXPR.StreamBasedExpressionVisitor): + """ + Expression walker for collecting scaling factors in an expression and determining the + expected value of the expression using the scaling factors as nominal inputs. + + By default, the get_nominal_value method is used to determine the nominal value for + all Vars and Params in the expression, however this can be changed by setting the + nominal_value_callback argument. + + Returns a list of expected values for each additive term in the expression. + + In order to properly assess the expected value of terms within functions, the sign + of each term is maintained throughout thus returned values may be negative. Functions + using this walker should handle these appropriately. + """ + + def __init__(self, nominal_value_callback=get_nominal_value): + """ + Visitor class used to determine nominal values of all terms in an expression based on + scaling factors assigned to the associated variables. Do not use this class directly. + + Args: + nominal_value_callback - method to use to get nominal value of root nodes. + + Notes + ----- + This class inherits from the :class:`StreamBasedExpressionVisitor` to implement + a walker that returns the nominal value corresponding to all additive terms in an + expression. + There are class attributes (dicts) that map the expression node type to the + particular method that should be called to return the nominal value of the node based + on the nominal value of its child arguments. This map is used in exitNode. + """ + super().__init__() + + self._nominal_value_callback = nominal_value_callback + + def _get_magnitude_base_type(self, node): + try: + return [self._nominal_value_callback(node)] + except TypeError: + # Not a Var or Param - something went wrong + raise BurntToast( + "NominalValueExtractionVisitor found root node that was not a Var or Param. " + "This should never happen - please contact the developers with this bug." + ) + + def _get_nominal_value_for_sum_subexpression(self, child_nominal_values): + return sum(i for i in child_nominal_values) + + def _get_nominal_value_for_sum(self, node, child_nominal_values): + # For sums, collect all child values into a list + mag = [] + for i in child_nominal_values: + for j in i: + mag.append(j) + return mag + + def _get_nominal_value_for_product(self, node, child_nominal_values): + mag = [] + for i in child_nominal_values[0]: + for j in child_nominal_values[1]: + mag.append(i * j) + return mag + + def _get_nominal_value_for_division(self, node, child_nominal_values): + numerator = self._get_nominal_value_for_sum_subexpression( + child_nominal_values[0] + ) + denominator = self._get_nominal_value_for_sum_subexpression( + child_nominal_values[1] + ) + if denominator == 0: + # Assign a nominal value of 1 so that we can continue + denominator = 1 + # Log a warning for the user + _log.warning( + "Nominal value of 0 found in denominator of division expression. " + "Assigning a value of 1. You should check you scaling factors and models to " + "ensure there are no values of 0 that can appear in these functions." + ) + return [numerator / denominator] + + def _get_nominal_value_for_power(self, node, child_nominal_values): + # Use the absolute value of the base term to avoid possible complex numbers + base = abs( + self._get_nominal_value_for_sum_subexpression(child_nominal_values[0]) + ) + exponent = self._get_nominal_value_for_sum_subexpression( + child_nominal_values[1] + ) + + return [base**exponent] + + def _get_nominal_value_single_child(self, node, child_nominal_values): + return child_nominal_values[0] + + def _get_nominal_value_abs(self, node, child_nominal_values): + return [abs(i) for i in child_nominal_values[0]] + + def _get_nominal_value_negation(self, node, child_nominal_values): + return [-i for i in child_nominal_values[0]] + + def _get_nominal_value_for_unary_function(self, node, child_nominal_values): + func_name = node.getname() + func_nominal = self._get_nominal_value_for_sum_subexpression( + child_nominal_values[0] + ) + func = getattr(math, func_name) + try: + return [func(func_nominal)] + except ValueError: + raise ValueError( + f"Evaluation error occurred when getting nominal value in {func_name} " + f"expression with input {func_nominal}. You should check you scaling factors " + f"and model to address any numerical issues or scale this constraint manually." + ) + + def _get_nominal_value_expr_if(self, node, child_nominal_values): + return child_nominal_values[1] + child_nominal_values[2] + + def _get_nominal_value_external_function(self, node, child_nominal_values): + # First, need to get expected magnitudes of input terms, which may be sub-expressions + input_mag = [ + self._get_nominal_value_for_sum_subexpression(i) + for i in child_nominal_values + ] + + # Next, create a copy of the external function with expected magnitudes as inputs + newfunc = node.create_node_with_local_data(input_mag) + + # Evaluate new function and return the absolute value + return [value(newfunc)] + + node_type_method_map = { + EXPR.EqualityExpression: _get_nominal_value_for_sum, + EXPR.InequalityExpression: _get_nominal_value_for_sum, + EXPR.RangedExpression: _get_nominal_value_for_sum, + EXPR.SumExpression: _get_nominal_value_for_sum, + EXPR.NPV_SumExpression: _get_nominal_value_for_sum, + EXPR.ProductExpression: _get_nominal_value_for_product, + EXPR.MonomialTermExpression: _get_nominal_value_for_product, + EXPR.NPV_ProductExpression: _get_nominal_value_for_product, + EXPR.DivisionExpression: _get_nominal_value_for_division, + EXPR.NPV_DivisionExpression: _get_nominal_value_for_division, + EXPR.PowExpression: _get_nominal_value_for_power, + EXPR.NPV_PowExpression: _get_nominal_value_for_power, + EXPR.NegationExpression: _get_nominal_value_negation, + EXPR.NPV_NegationExpression: _get_nominal_value_negation, + EXPR.AbsExpression: _get_nominal_value_abs, + EXPR.NPV_AbsExpression: _get_nominal_value_abs, + EXPR.UnaryFunctionExpression: _get_nominal_value_for_unary_function, + EXPR.NPV_UnaryFunctionExpression: _get_nominal_value_for_unary_function, + EXPR.Expr_ifExpression: _get_nominal_value_expr_if, + EXPR.ExternalFunctionExpression: _get_nominal_value_external_function, + EXPR.NPV_ExternalFunctionExpression: _get_nominal_value_external_function, + EXPR.LinearExpression: _get_nominal_value_for_sum, + } + + def exitNode(self, node, data): + """Callback for :class:`pyomo.core.current.StreamBasedExpressionVisitor`. This + method is called when moving back up the tree in a depth first search.""" + + # first check if the node is a leaf + nodetype = type(node) + + if nodetype in native_types: + return [node] + + node_func = self.node_type_method_map.get(nodetype, None) + if node_func is not None: + return node_func(self, node, data) + + elif not node.is_expression_type(): + # this is a leaf, but not a native type + if nodetype is _PyomoUnit: + return [1] + else: + return self._get_magnitude_base_type(node) + # might want to add other common types here + + # not a leaf - check if it is a named expression + if ( + hasattr(node, "is_named_expression_type") + and node.is_named_expression_type() + ): + return self._get_nominal_value_single_child(node, data) + + raise TypeError( + f"An unhandled expression node type: {str(nodetype)} was encountered while " + f"retrieving the nominal value of expression {str(node)}" + ) diff --git a/idaes/core/util/scaling.py b/idaes/core/util/scaling.py index ccb305c041..8d0aa3aef8 100644 --- a/idaes/core/util/scaling.py +++ b/idaes/core/util/scaling.py @@ -1254,6 +1254,7 @@ def set_variable_scaling_from_current_value( ) +# TODO: Deprecate in favor of new walker class NominalValueExtractionVisitor(EXPR.StreamBasedExpressionVisitor): """ Expression walker for collecting scaling factors in an expression and determining the @@ -1372,7 +1373,7 @@ def _get_magnitude_base_type(self, node): # Either a positive value or no value, assume positive sign = 1 else: - # No ideal, assume positive + # No idea, assume positive sign = 1 try: diff --git a/idaes/models/unit_models/gibbs_reactor.py b/idaes/models/unit_models/gibbs_reactor.py index 90ca226b55..26d1c56458 100644 --- a/idaes/models/unit_models/gibbs_reactor.py +++ b/idaes/models/unit_models/gibbs_reactor.py @@ -14,7 +14,7 @@ Standard IDAES Gibbs reactor model. """ # Import Pyomo libraries -from pyomo.environ import Constraint, Param, Reals, Reference, Set, Var +from pyomo.environ import Constraint, Param, Reals, Reference, Set, units, value, Var from pyomo.common.config import ConfigBlock, ConfigValue, In, ListOf, Bool # Import IDAES cores @@ -28,10 +28,168 @@ ) from idaes.core.util.config import is_physical_parameter_block from idaes.core.util.exceptions import ConfigurationError +from idaes.core.scaling import CustomScalerBase +from idaes.core.util.constants import Constants __author__ = "Jinliang Ma, Andrew Lee" +class GibbsReactorScaler(CustomScalerBase): + """ + Scaler for Gibbs Reactor units. + + Due to the nature of Gibbs Reactors, scaling is highly dependent on the outlet + concentrations which cannot be predicted a priori, thus we rely on users to + provide the best initial guesses they can for the outlet concentrations. + + """ + + UNIT_SCALING_FACTORS = { + # "QuantityName: (reference units, scaling factor) + "Delta Pressure": (units.Pa, 1e-3), + "Heat": (units.J / units.s, 1e-6), + } + + def variable_scaling_routine( + self, model, overwrite: bool = False, submodel_scalers: dict = None + ): + """ + Variable scaling routine for Gibbs reactors. + + Due to the nature of Gibbs Reactors, scaling is highly dependent on the outlet + concentrations which cannot be predicted a priori, thus we rely on users to + provide the best initial guesses they can for the outlet concentrations. + + Args: + model: instance of GibbsReactor to be scaled + overwrite: whether to overwrite existing scaling factors + submodel_scalers: dict of Scalers to use for sub-models, keyed by submodel local name + + Returns: + None + """ + if submodel_scalers is None: + submodel_scalers = {} + + # Step 1: Property scaling + + # Step 1a: propagate any existing scaling from inlet to outlet + # This is likely a very poor approximation for concentrations, but we expect users + # to provide better scaling factors manually (as there is no way for us to know). + # This will do a first pass fill in however if the user does not provide any information. + self.propagate_state_scaling( + target_state=model.control_volume.properties_out, + source_state=model.control_volume.properties_in, + overwrite=overwrite, + ) + + # Step 1b: Call Scalers for state blocks + # Inlet properties + self.call_submodel_scaler_method( + model=model, + submodel="control_volume.properties_in", + submodel_scalers=submodel_scalers, + method="variable_scaling_routine", + overwrite=overwrite, + ) + # Outlet properties + self.call_submodel_scaler_method( + model=model, + submodel="control_volume.properties_out", + submodel_scalers=submodel_scalers, + method="variable_scaling_routine", + overwrite=overwrite, + ) + + # Step 2: Scaling Gibbs reactor variables + # Control volume variables - support only heat and deltaP + if hasattr(model.control_volume, "heat"): + for v in model.control_volume.heat.values(): + self.scale_variable_by_units(v, overwrite=overwrite) + if hasattr(model.control_volume, "deltaP"): + for v in model.control_volume.deltaP.values(): + self.scale_variable_by_units(v, overwrite=overwrite) + + # Lagrangian multipliers + # Best guess scaling for these is R*T, need to convert units + p_units = ( + model.control_volume.config.property_package.get_metadata().get_derived_units + ) + for (t, _), v in model.lagrange_mult.items(): + tsf = self.get_scaling_factor( + model.control_volume.properties_out[t].temperature + ) + if tsf is not None: + nominal_t = 1 / tsf + else: + nominal_t = 500 + lsf = value( + 1 + / units.convert( + Constants.gas_constant * nominal_t * p_units("temperature"), + to_units=p_units("energy_mole"), + ) + ) + self.set_variable_scaling_factor(v, lsf, overwrite=overwrite) + + def constraint_scaling_routine( + self, model, overwrite: bool = False, submodel_scalers: dict = None + ): + """ + Routine to apply scaling factors to constraints in model. + + Constraints will be scaled based on nominal Jacobian norms, and thus will + be heavily dependent on variable scaling. + + Args: + model: model to be scaled + overwrite: whether to overwrite existing scaling factors + submodel_scalers: dict of Scalers to use for sub-models, keyed by submodel local name + + Returns: + None + """ + # Step 1: Call Scalers for state blocks + # Inlet properties + self.call_submodel_scaler_method( + model=model, + submodel="control_volume.properties_in", + submodel_scalers=submodel_scalers, + method="constraint_scaling_routine", + overwrite=overwrite, + ) + # Outlet properties + self.call_submodel_scaler_method( + model=model, + submodel="control_volume.properties_out", + submodel_scalers=submodel_scalers, + method="constraint_scaling_routine", + overwrite=overwrite, + ) + + # Step 2: Scale all the control volume constraints + for cd in model.control_volume.component_data_objects( + ctype=Constraint, descend_into=False + ): + self.scale_constraint_by_nominal_value( + cd, scheme="inverse_sum", overwrite=overwrite + ) + + # Step 3: Scale local constraints + # Scale Gibbs minimization constraints + for cd in model.gibbs_minimization.values(): + self.scale_constraint_by_nominal_value( + cd, scheme="inverse_sum", overwrite=overwrite + ) + + # Scale inert species balance if they are present + if hasattr(model, "inert_species_balance"): + for cd in model.inert_species_balance.values(): + self.scale_constraint_by_nominal_value( + cd, scheme="inverse_sum", overwrite=overwrite + ) + + @declare_process_block_class("GibbsReactor") class GibbsReactorData(UnitModelBlockData): """ @@ -164,6 +322,8 @@ class GibbsReactorData(UnitModelBlockData): ), ) + default_scaler = GibbsReactorScaler + def build(self): """ Begin building model (pre-DAE transformation). diff --git a/idaes/models/unit_models/tests/test_gibbs.py b/idaes/models/unit_models/tests/test_gibbs.py index d571e83763..b74e4517d3 100644 --- a/idaes/models/unit_models/tests/test_gibbs.py +++ b/idaes/models/unit_models/tests/test_gibbs.py @@ -18,7 +18,7 @@ import pytest from pyomo.environ import ( - check_optimal_termination, + assert_optimal_termination, ConcreteModel, Constraint, Suffix, @@ -28,7 +28,7 @@ ) from idaes.core import FlowsheetBlock, EnergyBalanceType, MomentumBalanceType -from idaes.models.unit_models.gibbs_reactor import GibbsReactor +from idaes.models.unit_models.gibbs_reactor import GibbsReactor, GibbsReactorScaler from idaes.models.properties.activity_coeff_models.methane_combustion_ideal import ( MethaneParameterBlock as MethaneCombustionParameterBlock, ) @@ -49,6 +49,12 @@ InitializationStatus, ) from idaes.core.util import DiagnosticsToolbox +from idaes.core.scaling import AutoScaler, CustomScalerBase, set_scaling_factor +from idaes.core.util.scaling import ( + jacobian_cond, + extreme_jacobian_rows, + extreme_jacobian_columns, +) # Natural gas property package for integration testing from idaes.models.properties.modular_properties.base.generic_property import ( @@ -59,8 +65,7 @@ # ----------------------------------------------------------------------------- # Get default solver for testing -# TODO: Does not solve with MA57 - check again once scaling is deployed -solver = get_solver(solver="ipopt_v2", solver_options={"linear_solver": "ma27"}) +solver = get_solver(solver="ipopt_v2", writer_config={"scale_model": True}) # ----------------------------------------------------------------------------- @@ -90,6 +95,8 @@ def test_config(): assert not hasattr(m.fs.unit, "inert_species_balance") + assert m.fs.unit.default_scaler is GibbsReactorScaler + class TestGibbsInerts: @pytest.mark.unit @@ -320,580 +327,24 @@ def test_initialize_temperature(self, methane): @pytest.mark.skipif(solver is None, reason="Solver not available") @pytest.mark.component def test_solve_temperature(self, methane): - methane.scaling_factor = Suffix(direction=Suffix.EXPORT) - - methane.scaling_factor[ - methane.fs.unit.control_volume.element_balances[0.0, "C"] - ] = 0.0038968315684515787 - methane.scaling_factor[ - methane.fs.unit.control_volume.element_balances[0.0, "H"] - ] = 0.0009690314543471861 - methane.scaling_factor[ - methane.fs.unit.control_volume.element_balances[0.0, "N"] - ] = 0.0016665906198716563 - methane.scaling_factor[ - methane.fs.unit.control_volume.element_balances[0.0, "O"] - ] = 0.0067608566657646 - methane.scaling_factor[ - methane.fs.unit.control_volume.enthalpy_balances[0.0] - ] = 6.343688225967796e-08 - methane.scaling_factor[methane.fs.unit.control_volume.heat[0.0]] = ( - 1.3415588575040103e-07 - ) - methane.scaling_factor[methane.fs.unit.control_volume.pressure_balance[0.0]] = ( - 9.869232667160129e-06 - ) - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].enth_mol_phase["Vap"] - ] = 0.00010271414106049353 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].enth_mol_phase_comp[ - "Vap", "CH4" - ] - ] = 1.3404825737265415e-05 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].enth_mol_phase_comp[ - "Vap", "CO2" - ] - ] = 2.5411669038422445e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].enth_mol_phase_comp[ - "Vap", "CO" - ] - ] = 9.047317470370035e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].enth_mol_phase_comp[ - "Vap", "H2O" - ] - ] = 4.135136252739528e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].enth_mol_phase_comp[ - "Vap", "H2" - ] - ] = 1 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].enth_mol_phase_comp[ - "Vap", "N2" - ] - ] = 1 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].enth_mol_phase_comp[ - "Vap", "NH3" - ] - ] = 2.1786492374727668e-05 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].enth_mol_phase_comp[ - "Vap", "O2" - ] - ] = 1 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_comp["CH4"] - ] = 0.03334222459322486 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_comp["CO2"] - ] = 434.782608695652 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_comp["CO"] - ] = 434.782608695652 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_comp["H2O"] - ] = 434.782608695652 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_comp["H2"] - ] = 0.09995002498750626 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_comp["N2"] - ] = 0.00666640001066624 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_comp["NH3"] - ] = 434.782608695652 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_comp["O2"] - ] = 0.025001875140635548 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_enth_mol_phase["Vap"] - ] = 5.9334197643529735e-05 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_enth_mol_phase_comp[ - "Vap", "CH4" - ] - ] = 1.3404825737265415e-05 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_enth_mol_phase_comp[ - "Vap", "CO2" - ] - ] = 2.5411669038422445e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_enth_mol_phase_comp[ - "Vap", "CO" - ] - ] = 9.047317470370035e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_enth_mol_phase_comp[ - "Vap", "H2O" - ] - ] = 4.135136252739528e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_enth_mol_phase_comp[ - "Vap", "H2" - ] - ] = 1.0 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_enth_mol_phase_comp[ - "Vap", "N2" - ] - ] = 1.0 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_enth_mol_phase_comp[ - "Vap", "NH3" - ] - ] = 2.1786492374727668e-05 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_enth_mol_phase_comp[ - "Vap", "O2" - ] - ] = 1.0 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_total - ] = 0.004347826086956522 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].flow_mol_phase["Vap"] - ] = 0.004347826086956522 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].mole_frac_phase_comp[ - "Vap", "CH4" - ] - ] = 7.668711656441719 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].mole_frac_phase_comp[ - "Vap", "CO2" - ] - ] = 99999.99999999997 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].mole_frac_phase_comp[ - "Vap", "CO" - ] - ] = 99999.99999999997 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].mole_frac_phase_comp[ - "Vap", "H2O" - ] - ] = 99999.99999999997 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].mole_frac_phase_comp[ - "Vap", "H2" - ] - ] = 22.98850574712644 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].mole_frac_phase_comp[ - "Vap", "N2" - ] - ] = 1.5332720024532351 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].mole_frac_phase_comp[ - "Vap", "NH3" - ] - ] = 99999.99999999997 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].mole_frac_phase_comp[ - "Vap", "O2" - ] - ] = 5.750431282346176 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].enth_mol_phase["Vap"] - ] = 2.5797225634987406e-05 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].enth_mol_phase_comp[ - "Vap", "CH4" - ] - ] = 9.131356578608373e-05 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].enth_mol_phase_comp[ - "Vap", "CO2" - ] - ] = 3.8432408761344524e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].enth_mol_phase_comp[ - "Vap", "CO" - ] - ] = 8.617482264231038e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].enth_mol_phase_comp[ - "Vap", "H2O" - ] - ] = 5.614692038136825e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].enth_mol_phase_comp[ - "Vap", "H2" - ] - ] = 1.1286872760356127e-05 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].enth_mol_phase_comp[ - "Vap", "N2" - ] - ] = 0.0007529656318755848 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].enth_mol_phase_comp[ - "Vap", "NH3" - ] - ] = 0.000147611121632822 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].enth_mol_phase_comp[ - "Vap", "O2" - ] - ] = 0.00011456899776931075 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].entr_mol_phase_comp[ - "Vap", "CH4" - ] - ] = 0.002273113949091129 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].entr_mol_phase_comp[ - "Vap", "CO2" - ] - ] = 0.0032746835561418483 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].entr_mol_phase_comp[ - "Vap", "CO" - ] - ] = 0.004593745058364088 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].entr_mol_phase_comp[ - "Vap", "H2O" - ] - ] = 0.004278478461249582 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].entr_mol_phase_comp[ - "Vap", "H2" - ] - ] = 0.005287458822573278 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].entr_mol_phase_comp[ - "Vap", "N2" - ] - ] = 0.005016168207707185 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].entr_mol_phase_comp[ - "Vap", "NH3" - ] - ] = 0.0029937456289480776 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].entr_mol_phase_comp[ - "Vap", "O2" - ] - ] = 0.003654125560818891 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_comp["CH4"] - ] = 0.002827763345315764 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_comp["CO2"] - ] = 0.08851222463945824 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_comp["CO"] - ] = 0.020535852025143013 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_comp["H2O"] - ] = 0.011302753862264373 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_comp["H2"] - ] = 0.019411801141636542 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_comp["N2"] - ] = 0.0033331760499149495 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_comp["NH3"] - ] = 3476.4619148053275 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_comp["O2"] - ] = 8.288029770353534 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_enth_mol_phase["Vap"] - ] = 1.5616764299049252e-05 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_enth_mol_phase_comp[ - "Vap", "CH4" - ] - ] = 9.131356578608373e-05 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_enth_mol_phase_comp[ - "Vap", "CO2" - ] - ] = 3.8432408761344524e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_enth_mol_phase_comp[ - "Vap", "CO" - ] - ] = 8.617482264231038e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_enth_mol_phase_comp[ - "Vap", "H2O" - ] - ] = 5.614692038136825e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_enth_mol_phase_comp[ - "Vap", "H2" - ] - ] = 1.1286872760356127e-05 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_enth_mol_phase_comp[ - "Vap", "N2" - ] - ] = 0.0007529656318755848 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_enth_mol_phase_comp[ - "Vap", "NH3" - ] - ] = 0.000147611121632822 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_enth_mol_phase_comp[ - "Vap", "O2" - ] - ] = 0.00011456899776931075 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_entr_mol_phase_comp[ - "Vap", "CH4" - ] - ] = 1.2281784483868407e-12 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_entr_mol_phase_comp[ - "Vap", "CO2" - ] - ] = 0.0032722586564032413 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_entr_mol_phase_comp[ - "Vap", "CO" - ] - ] = 0.004587058233184441 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_entr_mol_phase_comp[ - "Vap", "H2O" - ] - ] = 0.004273074490321421 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_entr_mol_phase_comp[ - "Vap", "H2" - ] - ] = 0.005277269330062221 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_entr_mol_phase_comp[ - "Vap", "N2" - ] - ] = 0.005007465522090417 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_entr_mol_phase_comp[ - "Vap", "NH3" - ] - ] = 0.0029918924828130663 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_entr_mol_phase_comp[ - "Vap", "O2" - ] - ] = 0.003650757205096906 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_gibbs_mol_phase_comp[ - "Vap", "CH4" - ] - ] = 5.675584743853875e-07 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_gibbs_mol_phase_comp[ - "Vap", "CO2" - ] - ] = 6.906678667100022e-07 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_gibbs_mol_phase_comp[ - "Vap", "CO" - ] - ] = 1.0328408888379269e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_gibbs_mol_phase_comp[ - "Vap", "H2O" - ] - ] = 9.189402632308884e-07 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_gibbs_mol_phase_comp[ - "Vap", "H2" - ] - ] = 1.415412212742754e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_gibbs_mol_phase_comp[ - "Vap", "N2" - ] - ] = 1.248468142622135e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_gibbs_mol_phase_comp[ - "Vap", "NH3" - ] - ] = 7.41580819400193e-07 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_gibbs_mol_phase_comp[ - "Vap", "O2" - ] - ] = 9.033010042075156e-07 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_mol_frac_out - ] = 0.8416262137210224 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_total - ] = 0.002827763345315764 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].flow_mol - ] = 0.003999061274127067 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].flow_mol_phase["Vap"] - ] = 0.003999061274127067 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].gibbs_mol_phase_comp[ - "Vap", "CH4" - ] - ] = 8.062155808993221e-07 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].gibbs_mol_phase_comp[ - "Vap", "CO2" - ] - ] = 8.859012593383004e-07 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].gibbs_mol_phase_comp[ - "Vap", "CO" - ] - ] = 1.3601211872489997e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].gibbs_mol_phase_comp[ - "Vap", "H2O" - ] - ] = 1.1863588686277076e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].gibbs_mol_phase_comp[ - "Vap", "H2" - ] - ] = 2.225437251662786e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].gibbs_mol_phase_comp[ - "Vap", "N2" - ] - ] = 1.767676703310131e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].gibbs_mol_phase_comp[ - "Vap", "NH3" - ] - ] = 1.0450609388337302e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].gibbs_mol_phase_comp[ - "Vap", "O2" - ] - ] = 1.2704369868483197e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["CH4"] - ] = 1 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["CO2"] - ] = 44.26650084714147 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["CO"] - ] = 10.270336270166638 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["H2O"] - ] = 5.6527035158927825 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["H2"] - ] = 9.70817890049651 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["N2"] - ] = 1.6669792340916454 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["NH3"] - ] = 1738638.9837520984 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["O2"] - ] = 4144.987636961217 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].mole_frac_phase_comp[ - "Vap", "CH4" - ] - ] = 1 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].mole_frac_phase_comp[ - "Vap", "CO2" - ] - ] = 44.26650084714147 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].mole_frac_phase_comp[ - "Vap", "CO" - ] - ] = 10.270336270166638 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].mole_frac_phase_comp[ - "Vap", "H2O" - ] - ] = 5.6527035158927825 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].mole_frac_phase_comp[ - "Vap", "H2" - ] - ] = 9.70817890049651 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].mole_frac_phase_comp[ - "Vap", "N2" - ] - ] = 1.6669792340916454 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].mole_frac_phase_comp[ - "Vap", "NH3" - ] - ] = 1738638.9837520984 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].mole_frac_phase_comp[ - "Vap", "O2" - ] - ] = 4144.987636961217 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].pressure - ] = 9.869232667160129e-06 - methane.scaling_factor[ - methane.fs.unit.gibbs_minimization[0.0, "Vap", "CH4"] - ] = 6.37201815837067e-07 - methane.scaling_factor[ - methane.fs.unit.gibbs_minimization[0.0, "Vap", "CO2"] - ] = 7.052606407804767e-07 - methane.scaling_factor[methane.fs.unit.gibbs_minimization[0.0, "Vap", "CO"]] = ( - 1.1096130740800528e-06 - ) - methane.scaling_factor[ - methane.fs.unit.gibbs_minimization[0.0, "Vap", "H2O"] - ] = 9.679516364316312e-07 - methane.scaling_factor[methane.fs.unit.gibbs_minimization[0.0, "Vap", "H2"]] = ( - 1.5736217717559093e-06 - ) - methane.scaling_factor[methane.fs.unit.gibbs_minimization[0.0, "Vap", "N2"]] = ( - 1.2499361838560746e-06 - ) - methane.scaling_factor[ - methane.fs.unit.gibbs_minimization[0.0, "Vap", "NH3"] - ] = 8.304717457164266e-07 - methane.scaling_factor[methane.fs.unit.gibbs_minimization[0.0, "Vap", "O2"]] = ( - 8.983346084706515e-07 - ) - methane.scaling_factor[methane.fs.unit.lagrange_mult[0.0, "C"]] = ( - 2.926858666284934e-06 - ) - methane.scaling_factor[methane.fs.unit.lagrange_mult[0.0, "H"]] = ( - 4.450874503325572e-06 - ) - methane.scaling_factor[methane.fs.unit.lagrange_mult[0.0, "N"]] = ( - 3.535353406620262e-06 - ) - methane.scaling_factor[methane.fs.unit.lagrange_mult[0.0, "O"]] = ( - 2.5408739736966393e-06 - ) + scaler = AutoScaler() + scaler.scale_variables_by_magnitude(methane) + scaler.scale_constraints_by_jacobian_norm(methane) - scaling = TransformationFactory("core.scale_model") - sm = scaling.create_using(methane, rename=False) + results = solver.solve(methane) - results = solver.solve(sm) + # Check for optimal solution + assert_optimal_termination(results) - scaling.propagate_solution(sm, methane) + @pytest.mark.solver + @pytest.mark.skipif(solver is None, reason="Solver not available") + @pytest.mark.component + def test_verify_scaling_temperature(self, methane): + assert jacobian_cond(methane, scaled=False) == pytest.approx(5.703e17, rel=1e-3) + assert jacobian_cond(methane, scaled=True) == pytest.approx(2511, abs=1) - # Check for optimal solution - assert check_optimal_termination(results) + assert len(extreme_jacobian_rows(methane, scaled=True)) == 0 + assert len(extreme_jacobian_columns(methane, scaled=True)) == 0 @pytest.mark.solver @pytest.mark.skipif(solver is None, reason="Solver not available") @@ -1013,583 +464,23 @@ def test_initialize_duty(self, methane): @pytest.mark.skipif(solver is None, reason="Solver not available") @pytest.mark.component def test_solve_heat_duty(self, methane): - # Remove previous scaling factors - methane.del_component(methane.scaling_factor) - - methane.scaling_factor = Suffix(direction=Suffix.EXPORT) - - methane.scaling_factor[ - methane.fs.unit.control_volume.element_balances[0.0, "C"] - ] = 0.003895771416286546 - methane.scaling_factor[ - methane.fs.unit.control_volume.element_balances[0.0, "H"] - ] = 0.0009691213173623994 - methane.scaling_factor[ - methane.fs.unit.control_volume.element_balances[0.0, "N"] - ] = 0.0016665908743918427 - methane.scaling_factor[ - methane.fs.unit.control_volume.element_balances[0.0, "O"] - ] = 0.006733906435857895 - methane.scaling_factor[ - methane.fs.unit.control_volume.enthalpy_balances[0.0] - ] = 7.199382898409605e-08 - methane.scaling_factor[methane.fs.unit.control_volume.pressure_balance[0.0]] = ( - 9.869232667160129e-06 - ) - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].enth_mol_phase["Vap"] - ] = 0.00010271414106049353 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].enth_mol_phase_comp[ - "Vap", "CH4" - ] - ] = 1.3404825737265415e-05 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].enth_mol_phase_comp[ - "Vap", "CO2" - ] - ] = 2.5411669038422445e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].enth_mol_phase_comp[ - "Vap", "CO" - ] - ] = 9.047317470370035e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].enth_mol_phase_comp[ - "Vap", "H2O" - ] - ] = 4.135136252739528e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].enth_mol_phase_comp[ - "Vap", "H2" - ] - ] = 1 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].enth_mol_phase_comp[ - "Vap", "N2" - ] - ] = 1 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].enth_mol_phase_comp[ - "Vap", "NH3" - ] - ] = 2.1786492374727668e-05 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].enth_mol_phase_comp[ - "Vap", "O2" - ] - ] = 1 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_comp["CH4"] - ] = 0.03334222459322486 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_comp["CO2"] - ] = 434.782608695652 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_comp["CO"] - ] = 434.782608695652 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_comp["H2O"] - ] = 434.782608695652 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_comp["H2"] - ] = 0.09995002498750626 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_comp["N2"] - ] = 0.00666640001066624 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_comp["NH3"] - ] = 434.782608695652 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_comp["O2"] - ] = 0.025001875140635548 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_enth_mol_phase["Vap"] - ] = 5.9334197643529735e-05 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_enth_mol_phase_comp[ - "Vap", "CH4" - ] - ] = 1.3404825737265415e-05 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_enth_mol_phase_comp[ - "Vap", "CO2" - ] - ] = 2.5411669038422445e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_enth_mol_phase_comp[ - "Vap", "CO" - ] - ] = 9.047317470370035e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_enth_mol_phase_comp[ - "Vap", "H2O" - ] - ] = 4.135136252739528e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_enth_mol_phase_comp[ - "Vap", "H2" - ] - ] = 1.0 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_enth_mol_phase_comp[ - "Vap", "N2" - ] - ] = 1.0 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_enth_mol_phase_comp[ - "Vap", "NH3" - ] - ] = 2.1786492374727668e-05 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_enth_mol_phase_comp[ - "Vap", "O2" - ] - ] = 1.0 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].eq_total - ] = 0.004347826086956522 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].flow_mol_phase["Vap"] - ] = 0.004347826086956522 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].mole_frac_phase_comp[ - "Vap", "CH4" - ] - ] = 7.668711656441719 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].mole_frac_phase_comp[ - "Vap", "CO2" - ] - ] = 99999.99999999997 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].mole_frac_phase_comp[ - "Vap", "CO" - ] - ] = 99999.99999999997 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].mole_frac_phase_comp[ - "Vap", "H2O" - ] - ] = 99999.99999999997 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].mole_frac_phase_comp[ - "Vap", "H2" - ] - ] = 22.98850574712644 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].mole_frac_phase_comp[ - "Vap", "N2" - ] - ] = 1.5332720024532351 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].mole_frac_phase_comp[ - "Vap", "NH3" - ] - ] = 99999.99999999997 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_in[0.0].mole_frac_phase_comp[ - "Vap", "O2" - ] - ] = 5.750431282346176 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].enth_mol_phase["Vap"] - ] = 2.579089235408562e-05 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].enth_mol_phase_comp[ - "Vap", "CH4" - ] - ] = 0.00013694447144485197 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].enth_mol_phase_comp[ - "Vap", "CO2" - ] - ] = 3.048997164495024e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].enth_mol_phase_comp[ - "Vap", "CO" - ] - ] = 1.032900097747103e-05 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].enth_mol_phase_comp[ - "Vap", "H2O" - ] - ] = 4.977043447527836e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].enth_mol_phase_comp[ - "Vap", "H2" - ] - ] = 2.3899280715507977e-05 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].enth_mol_phase_comp[ - "Vap", "N2" - ] - ] = 6.31659002014246e-05 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].enth_mol_phase_comp[ - "Vap", "NH3" - ] - ] = 0.0002135671616883595 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].enth_mol_phase_comp[ - "Vap", "O2" - ] - ] = 8.194560075137228e-05 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].entr_mol_phase_comp[ - "Vap", "CH4" - ] - ] = 0.002416655313418302 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].entr_mol_phase_comp[ - "Vap", "CO2" - ] - ] = 0.0035442516743102712 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].entr_mol_phase_comp[ - "Vap", "CO" - ] - ] = 0.004456473678792036 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].entr_mol_phase_comp[ - "Vap", "H2O" - ] - ] = 0.004453410785746496 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].entr_mol_phase_comp[ - "Vap", "H2" - ] - ] = 0.005822282444612275 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].entr_mol_phase_comp[ - "Vap", "N2" - ] - ] = 0.004883049120795377 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].entr_mol_phase_comp[ - "Vap", "NH3" - ] - ] = 0.0030506224994020967 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].entr_mol_phase_comp[ - "Vap", "O2" - ] - ] = 0.0031405744684195823 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_comp["CH4"] - ] = 0.0028284396804244337 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_comp["CO2"] - ] = 0.12404165785988744 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_comp["CO"] - ] = 0.019256175462170257 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_comp["H2O"] - ] = 0.010875650523130753 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_comp["H2"] - ] = 0.020815988487732813 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_comp["N2"] - ] = 0.003333178086069927 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_comp["NH3"] - ] = 1528.6054813156613 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_comp["O2"] - ] = 696.9862666112366 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_enth_mol_phase["Vap"] - ] = 1.4497108873652463e-05 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_enth_mol_phase_comp[ - "Vap", "CH4" - ] - ] = 6.9059089891022035e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_enth_mol_phase_comp[ - "Vap", "CO2" - ] - ] = 2.444674689765088e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_enth_mol_phase_comp[ - "Vap", "CO" - ] - ] = 1.0075547572052885e-05 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_enth_mol_phase_comp[ - "Vap", "H2O" - ] - ] = 4.335306573370538e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_enth_mol_phase_comp[ - "Vap", "H2" - ] - ] = 5.8333326983598445e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_enth_mol_phase_comp[ - "Vap", "N2" - ] - ] = 5.6424839623995114e-05 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_enth_mol_phase_comp[ - "Vap", "NH3" - ] - ] = 1.7166243852082998e-05 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_enth_mol_phase_comp[ - "Vap", "O2" - ] - ] = 3.12933823458493e-05 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_entr_mol_phase_comp[ - "Vap", "CH4" - ] - ] = 1.1897376657481283e-11 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_entr_mol_phase_comp[ - "Vap", "CO2" - ] - ] = 0.003320984391726235 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_entr_mol_phase_comp[ - "Vap", "CO" - ] - ] = 0.004446525169209352 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_entr_mol_phase_comp[ - "Vap", "H2O" - ] - ] = 0.004347558430163033 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_entr_mol_phase_comp[ - "Vap", "H2" - ] - ] = 0.00536900022840844 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_entr_mol_phase_comp[ - "Vap", "N2" - ] - ] = 0.004874347940539925 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_entr_mol_phase_comp[ - "Vap", "NH3" - ] - ] = 0.003039965534465019 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_entr_mol_phase_comp[ - "Vap", "O2" - ] - ] = 0.0031359732136764075 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_gibbs_mol_phase_comp[ - "Vap", "CH4" - ] - ] = 5.950956432532686e-07 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_gibbs_mol_phase_comp[ - "Vap", "CO2" - ] - ] = 7.15349973612882e-07 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_gibbs_mol_phase_comp[ - "Vap", "CO" - ] - ] = 1.0278492215315099e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_gibbs_mol_phase_comp[ - "Vap", "H2O" - ] - ] = 9.452962659471516e-07 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_gibbs_mol_phase_comp[ - "Vap", "H2" - ] - ] = 1.484262777482121e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_gibbs_mol_phase_comp[ - "Vap", "N2" - ] - ] = 1.2185350436147026e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_gibbs_mol_phase_comp[ - "Vap", "NH3" - ] - ] = 7.51571959805243e-07 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_gibbs_mol_phase_comp[ - "Vap", "O2" - ] - ] = 7.795160552447015e-07 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_mol_frac_out - ] = 0.8409200257976712 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].eq_total - ] = 0.0028284396804244332 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].flow_mol - ] = 0.004000017756410457 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].flow_mol_phase["Vap"] - ] = 0.004000017756410457 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].gibbs_mol_phase_comp[ - "Vap", "CH4" - ] - ] = 1.0256057645532026e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].gibbs_mol_phase_comp[ - "Vap", "CO2" - ] - ] = 1.0123237586232911e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].gibbs_mol_phase_comp[ - "Vap", "CO" - ] - ] = 1.6087623724494755e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].gibbs_mol_phase_comp[ - "Vap", "H2O" - ] - ] = 1.377288144399382e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].gibbs_mol_phase_comp[ - "Vap", "H2" - ] - ] = 2.77905882129248e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].gibbs_mol_phase_comp[ - "Vap", "N2" - ] - ] = 2.159334308222618e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].gibbs_mol_phase_comp[ - "Vap", "NH3" - ] - ] = 1.2965057859756173e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].gibbs_mol_phase_comp[ - "Vap", "O2" - ] - ] = 1.3652606771737144e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["CH4"] - ] = 1 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["CO2"] - ] = 62.020553614342035 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["CO"] - ] = 9.628044991205439 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["H2O"] - ] = 5.437801122608198 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["H2"] - ] = 10.407948041917045 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["N2"] - ] = 1.6665816449080268 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["NH3"] - ] = 764299.3478545976 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["O2"] - ] = 348491.5863157065 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].mole_frac_phase_comp[ - "Vap", "CH4" - ] - ] = 1 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].mole_frac_phase_comp[ - "Vap", "CO2" - ] - ] = 62.020553614342035 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].mole_frac_phase_comp[ - "Vap", "CO" - ] - ] = 9.628044991205439 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].mole_frac_phase_comp[ - "Vap", "H2O" - ] - ] = 5.437801122608198 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].mole_frac_phase_comp[ - "Vap", "H2" - ] - ] = 10.407948041917045 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].mole_frac_phase_comp[ - "Vap", "N2" - ] - ] = 1.6665816449080268 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].mole_frac_phase_comp[ - "Vap", "NH3" - ] - ] = 764299.3478545976 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].mole_frac_phase_comp[ - "Vap", "O2" - ] - ] = 348491.5863157065 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].pressure - ] = 9.869232667160129e-06 - methane.scaling_factor[ - methane.fs.unit.control_volume.properties_out[0.0].temperature - ] = 0.0004275929186958249 - methane.scaling_factor[ - methane.fs.unit.gibbs_minimization[0.0, "Vap", "CH4"] - ] = 8.074436602893497e-07 - methane.scaling_factor[ - methane.fs.unit.gibbs_minimization[0.0, "Vap", "CO2"] - ] = 7.961846197368761e-07 - methane.scaling_factor[methane.fs.unit.gibbs_minimization[0.0, "Vap", "CO"]] = ( - 1.306639729794169e-06 - ) - methane.scaling_factor[ - methane.fs.unit.gibbs_minimization[0.0, "Vap", "H2O"] - ] = 1.1245365150847992e-06 - methane.scaling_factor[methane.fs.unit.gibbs_minimization[0.0, "Vap", "H2"]] = ( - 1.9650913378522065e-06 - ) - methane.scaling_factor[methane.fs.unit.gibbs_minimization[0.0, "Vap", "N2"]] = ( - 1.5268799321929755e-06 - ) - methane.scaling_factor[ - methane.fs.unit.gibbs_minimization[0.0, "Vap", "NH3"] - ] = 1.0314993300171925e-06 - methane.scaling_factor[methane.fs.unit.gibbs_minimization[0.0, "Vap", "O2"]] = ( - 9.653850829168712e-07 - ) - methane.scaling_factor[methane.fs.unit.lagrange_mult[0.0, "C"]] = ( - 3.915957066473961e-06 - ) - methane.scaling_factor[methane.fs.unit.lagrange_mult[0.0, "H"]] = ( - 5.55811764258496e-06 - ) - methane.scaling_factor[methane.fs.unit.lagrange_mult[0.0, "N"]] = ( - 4.318668616445236e-06 - ) - methane.scaling_factor[methane.fs.unit.lagrange_mult[0.0, "O"]] = ( - 2.730521354347429e-06 - ) + scaler = AutoScaler(overwrite=True) + scaler.scale_variables_by_magnitude(methane) + scaler.scale_constraints_by_jacobian_norm(methane) - scaling = TransformationFactory("core.scale_model") - sm = scaling.create_using(methane, rename=False) + results = solver.solve(methane) - results = solver.solve(sm) + # Check for optimal solution + assert_optimal_termination(results) - scaling.propagate_solution(sm, methane) + @pytest.mark.solver + @pytest.mark.skipif(solver is None, reason="Solver not available") + @pytest.mark.component + def test_verify_scaling_duty(self, methane): + assert jacobian_cond(methane, scaled=True) == pytest.approx(9191, abs=1) - # Check for optimal solution - assert check_optimal_termination(results) + assert len(extreme_jacobian_rows(methane, scaled=True)) == 0 + assert len(extreme_jacobian_columns(methane, scaled=True)) == 0 @pytest.mark.solver @pytest.mark.skipif(solver is None, reason="Solver not available") @@ -1662,6 +553,18 @@ def test_get_performance_contents(self, methane): } +# TODO: Replace once scaling deployed to property package +class PropertyScaler(CustomScalerBase): + def variable_scaling_routine(self, model, overwrite): + pass + + def constraint_scaling_routine(self, model, overwrite): + for c in model.component_data_objects(ctype=Constraint, descend_into=True): + self.scale_constraint_by_nominal_value( + c, scheme="inverse_sum", overwrite=overwrite + ) + + class TestInitializers: @pytest.fixture def model(self): @@ -1691,25 +594,62 @@ def model(self): m.fs.unit.outlet.temperature[0].fix(2844.38) m.fs.unit.deltaP.fix(0) + # Apply scaling - Best guesses for unit model and outlet state conditions + set_scaling_factor(m.fs.unit.control_volume.heat[0.0], 1e-6) + + set_scaling_factor(m.fs.unit.control_volume.properties_out[0.0].flow_mol, 1e-2) + set_scaling_factor( + m.fs.unit.control_volume.properties_out[0.0].flow_mol_phase, 1e-2 + ) # Only 1 phase, so we "know" this + # N2 is inert, so will be order 0.1, assume CH4 and H2 are near-totally consumed, assume most O2 consumed + # Assume moderate amounts of CO2 and H2O, small amounts of CO, trace NH3 NH3 + set_scaling_factor( + m.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["H2"], 1e4 + ) + set_scaling_factor( + m.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["N2"], 10 + ) + set_scaling_factor( + m.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["O2"], 1e2 + ) + set_scaling_factor( + m.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["CO2"], 10 + ) + set_scaling_factor( + m.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["CH4"], 1e4 + ) + set_scaling_factor( + m.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["CO"], 1e3 + ) + set_scaling_factor( + m.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["H2O"], 10 + ) + set_scaling_factor( + m.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["NH3"], 1e4 + ) + set_scaling_factor( + m.fs.unit.control_volume.properties_out[0.0].temperature, 1e-3 + ) + set_scaling_factor(m.fs.unit.control_volume.properties_out[0.0].pressure, 1e-5) + + scaler = GibbsReactorScaler() + scaler.scale_model( + m.fs.unit, + submodel_scalers={ + "control_volume.properties_in": PropertyScaler, + "control_volume.properties_out": PropertyScaler, + }, + ) + return m @pytest.mark.component def test_general_hierarchical(self, model): - initializer = SingleControlVolumeUnitInitializer() + initializer = SingleControlVolumeUnitInitializer( + writer_config={"scale_model": True} + ) initializer.initialize( model.fs.unit, - initial_guesses={ - "control_volume.properties_out[0].pressure": 101325.0, - "control_volume.properties_out[0].flow_mol": 251.05, - "control_volume.properties_out[0].mole_frac_comp[CH4]": 1e-5, - "control_volume.properties_out[0].mole_frac_comp[CO]": 0.0916, - "control_volume.properties_out[0].mole_frac_comp[CO2]": 0.0281, - "control_volume.properties_out[0].mole_frac_comp[H2]": 0.1155, - "control_volume.properties_out[0].mole_frac_comp[H2O]": 0.1633, - "control_volume.properties_out[0].mole_frac_comp[N2]": 0.59478, - "control_volume.properties_out[0].mole_frac_comp[NH3]": 1e-5, - "control_volume.properties_out[0].mole_frac_comp[O2]": 0.0067, - }, ) assert initializer.summary[model.fs.unit]["status"] == InitializationStatus.Ok @@ -1762,7 +702,7 @@ def test_general_hierarchical(self, model): def test_block_triangularization(self, model): initializer = BlockTriangularizationInitializer( constraint_tolerance=2e-5, - block_solver_writer_config={"linear_presolve": False}, + block_solver_writer_config={"linear_presolve": False, "scale_model": True}, ) initializer.initialize( model.fs.unit, diff --git a/idaes/models/unit_models/tests/test_gibbs_scaling.py b/idaes/models/unit_models/tests/test_gibbs_scaling.py new file mode 100644 index 0000000000..a9a7d7fbd8 --- /dev/null +++ b/idaes/models/unit_models/tests/test_gibbs_scaling.py @@ -0,0 +1,469 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +Tests for Gibbs reactor Scaler. + +Author: Andrew Lee +""" +import os +import pytest + +from pyomo.environ import ( + assert_optimal_termination, + ComponentMap, + ConcreteModel, + Constraint, + Suffix, + TransformationFactory, + units, + value, + Var, +) + +from idaes.core import FlowsheetBlock +from idaes.models.unit_models.gibbs_reactor import GibbsReactor, GibbsReactorScaler +from idaes.models.properties.activity_coeff_models.methane_combustion_ideal import ( + MethaneParameterBlock as MethaneCombustionParameterBlock, +) +from idaes.core.util.testing import PhysicalParameterTestBlock +from idaes.core.util.scaling import ( + jacobian_cond, +) +from idaes.core.util import from_json, StoreSpec +from idaes.core.scaling import CustomScalerBase, get_scaling_factor, set_scaling_factor + + +# Get solution json from scaling tests +FILENAME = "gibbs_solution.json" +local_path = os.path.dirname(os.path.realpath(__file__)) +fname = os.path.join(local_path, "..", "..", "..", "core", "scaling", "tests", FILENAME) + + +# ----------------------------------------------------------------------------- +@pytest.fixture +def test_model(): + m = ConcreteModel() + m.fs = FlowsheetBlock(dynamic=False) + + m.fs.properties = PhysicalParameterTestBlock() + + m.fs.unit = GibbsReactor( + property_package=m.fs.properties, + has_heat_transfer=True, + has_pressure_change=True, + ) + + return m + + +class DummyScaler: + def variable_scaling_routine(self, model, overwrite): + model._dummy_scaler_test = overwrite + + def constraint_scaling_routine(self, model, overwrite): + model._dummy_scaler_test = overwrite + + +@pytest.mark.unit +class TestVariableScaling: + + def test_variable_scaling_no_input(self, test_model): + scaler = GibbsReactorScaler() + + scaler.variable_scaling_routine(test_model.fs.unit) + + for v in test_model.fs.unit.lagrange_mult.values(): + assert test_model.fs.unit.scaling_factor[v] == pytest.approx( + 1 / (8.314 * 500), rel=1e-4 + ) + + for v in test_model.fs.unit.control_volume.heat.values(): + assert test_model.fs.unit.control_volume.scaling_factor[v] == pytest.approx( + 1e-6, rel=1e-4 + ) + + for v in test_model.fs.unit.control_volume.deltaP.values(): + assert test_model.fs.unit.control_volume.scaling_factor[v] == pytest.approx( + 1e-3, rel=1e-4 + ) + + def test_variable_scaling_no_heat_deltaP(self): + m = ConcreteModel() + m.fs = FlowsheetBlock(dynamic=False) + + m.fs.properties = PhysicalParameterTestBlock() + + m.fs.unit = GibbsReactor( + property_package=m.fs.properties, + has_heat_transfer=False, + has_pressure_change=False, + ) + + scaler = GibbsReactorScaler() + + scaler.variable_scaling_routine(m.fs.unit) + + for v in m.fs.unit.lagrange_mult.values(): + assert m.fs.unit.scaling_factor[v] == pytest.approx( + 1 / (8.314 * 500), rel=1e-4 + ) + + def test_variable_scaling_inlet_state(self, test_model): + prop_in = test_model.fs.unit.control_volume.properties_in[0] + sfx = prop_in.scaling_factor = Suffix(direction=Suffix.EXPORT) + sfx[prop_in.temperature] = 1e-2 + sfx[prop_in.pressure] = 1e-5 + for j in prop_in.flow_mol_phase_comp.values(): + sfx[j] = 1e-2 + + scaler = GibbsReactorScaler() + + scaler.variable_scaling_routine(test_model.fs.unit) + + # Outlet properties should now have scaling factors + prop_out = test_model.fs.unit.control_volume.properties_out[0] + assert prop_out.scaling_factor[prop_out.temperature] == 1e-2 + assert prop_out.scaling_factor[prop_out.pressure] == 1e-5 + for j in prop_out.flow_mol_phase_comp.values(): + prop_out.scaling_factor[j] == 1e-2 + + for v in test_model.fs.unit.lagrange_mult.values(): + assert test_model.fs.unit.scaling_factor[v] == pytest.approx( + 1 / (8.314 * 100), rel=1e-4 + ) + + for v in test_model.fs.unit.control_volume.heat.values(): + assert test_model.fs.unit.control_volume.scaling_factor[v] == pytest.approx( + 1e-6, rel=1e-4 + ) + + for v in test_model.fs.unit.control_volume.deltaP.values(): + assert test_model.fs.unit.control_volume.scaling_factor[v] == pytest.approx( + 1e-3, rel=1e-4 + ) + + def test_variable_scaling_submodel_scalers(self, test_model): + scaler = GibbsReactorScaler() + + scaler_map = ComponentMap() + scaler_map[test_model.fs.unit.control_volume.properties_in] = DummyScaler() + scaler_map[test_model.fs.unit.control_volume.properties_out] = DummyScaler() + + scaler.variable_scaling_routine( + test_model.fs.unit, + submodel_scalers=scaler_map, + ) + + # Check to see if testing attribute was created correctly + assert not test_model.fs.unit.control_volume.properties_in[0]._dummy_scaler_test + assert not test_model.fs.unit.control_volume.properties_out[ + 0 + ]._dummy_scaler_test + + +@pytest.mark.unit +class TestConstraintScaling: + + def test_constraint_scaling_no_inputs(self, test_model): + scaler = GibbsReactorScaler() + + scaler.constraint_scaling_routine(test_model.fs.unit) + + sfx = test_model.fs.unit.control_volume.scaling_factor + + assert sfx[ + test_model.fs.unit.control_volume.element_balances[0.0, "H"] + ] == pytest.approx(0.05, rel=1e-5) + assert sfx[ + test_model.fs.unit.control_volume.element_balances[0.0, "He"] + ] == pytest.approx(0.0357143, rel=1e-5) + assert sfx[ + test_model.fs.unit.control_volume.element_balances[0.0, "Li"] + ] == pytest.approx(0.0277778, rel=1e-5) + assert sfx[ + test_model.fs.unit.control_volume.enthalpy_balances[0.0] + ] == pytest.approx(0.25, rel=1e-5) + assert sfx[ + test_model.fs.unit.control_volume.pressure_balance[0.0] + ] == pytest.approx(5e-6, rel=1e-5) + + for k, v in test_model.fs.unit.gibbs_minimization.items(): + if k[2] == "c1": + assert test_model.fs.unit.scaling_factor[v] == pytest.approx( + 1.53846e-3, rel=1e-5 + ) + else: + assert test_model.fs.unit.scaling_factor[v] == pytest.approx( + 6.45161e-4, rel=1e-5 + ) + + def test_constraint_scaling_inerts(self): + m = ConcreteModel() + m.fs = FlowsheetBlock(dynamic=False) + + m.fs.properties = PhysicalParameterTestBlock() + + m.fs.unit = GibbsReactor( + property_package=m.fs.properties, + has_heat_transfer=True, + has_pressure_change=True, + inert_species=["c1"], + ) + + scaler = GibbsReactorScaler() + + scaler.constraint_scaling_routine(m.fs.unit) + + sfx = m.fs.unit.control_volume.scaling_factor + + assert sfx[ + m.fs.unit.control_volume.element_balances[0.0, "H"] + ] == pytest.approx(0.05, rel=1e-5) + assert sfx[ + m.fs.unit.control_volume.element_balances[0.0, "He"] + ] == pytest.approx(0.0357143, rel=1e-5) + assert sfx[ + m.fs.unit.control_volume.element_balances[0.0, "Li"] + ] == pytest.approx(0.0277778, rel=1e-5) + assert sfx[m.fs.unit.control_volume.enthalpy_balances[0.0]] == pytest.approx( + 0.25, rel=1e-5 + ) + assert sfx[m.fs.unit.control_volume.pressure_balance[0.0]] == pytest.approx( + 5e-6, rel=1e-5 + ) + + for k, v in m.fs.unit.gibbs_minimization.items(): + assert m.fs.unit.scaling_factor[v] == pytest.approx(6.45161e-4, rel=1e-5) + + for k, v in m.fs.unit.inert_species_balance.items(): + assert m.fs.unit.scaling_factor[v] == pytest.approx(0.5, rel=1e-5) + + def test_constraint_scaling_submodel_scalers(self, test_model): + scaler = GibbsReactorScaler() + + scaler_map = ComponentMap() + scaler_map[test_model.fs.unit.control_volume.properties_in] = DummyScaler() + scaler_map[test_model.fs.unit.control_volume.properties_out] = DummyScaler() + + scaler.constraint_scaling_routine( + test_model.fs.unit, + submodel_scalers=scaler_map, + ) + + # Check to see if testing attribute was created correctly + assert not test_model.fs.unit.control_volume.properties_in[0]._dummy_scaler_test + assert not test_model.fs.unit.control_volume.properties_out[ + 0 + ]._dummy_scaler_test + + +# ----------------------------------------------------------------------------- +class SMScaler(CustomScalerBase): + def variable_scaling_routine(self, model, overwrite): + pass + + def constraint_scaling_routine(self, model, overwrite): + for c in model.component_data_objects(ctype=Constraint, descend_into=True): + self.scale_constraint_by_nominal_value( + c, scheme="inverse_sum", overwrite=overwrite + ) + + +# TODO: Turn this into a testing harness? +@pytest.mark.integration +class TestMethaneScaling(object): + @pytest.fixture + def methane(self): + model = ConcreteModel() + model.fs = FlowsheetBlock(dynamic=False) + + model.fs.properties = MethaneCombustionParameterBlock() + + model.fs.unit = GibbsReactor( + property_package=model.fs.properties, + has_heat_transfer=True, + has_pressure_change=True, + ) + + model.fs.unit.inlet.flow_mol[0].fix(230.0) + model.fs.unit.inlet.mole_frac_comp[0, "H2"].fix(0.0435) + model.fs.unit.inlet.mole_frac_comp[0, "N2"].fix(0.6522) + model.fs.unit.inlet.mole_frac_comp[0, "O2"].fix(0.1739) + model.fs.unit.inlet.mole_frac_comp[0, "CO2"].fix(1e-5) + model.fs.unit.inlet.mole_frac_comp[0, "CH4"].fix(0.1304) + model.fs.unit.inlet.mole_frac_comp[0, "CO"].fix(1e-5) + model.fs.unit.inlet.mole_frac_comp[0, "H2O"].fix(1e-5) + model.fs.unit.inlet.mole_frac_comp[0, "NH3"].fix(1e-5) + model.fs.unit.inlet.temperature[0].fix(1500.0) + model.fs.unit.inlet.pressure[0].fix(101325.0) + + model.fs.unit.outlet.temperature[0].fix(2844.38) + model.fs.unit.deltaP.fix(0) + + # Set imperfect scaling factors for all variables, representing an initial "best-guess" + # Feed states are known exactly - set scaling based on these + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].flow_mol, 1 / 230 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].flow_mol_phase, 1 / 230 + ) # Only 1 phase, so we "know" this + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].mole_frac_comp["H2"], + 1 / 0.0435, + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].mole_frac_comp["N2"], + 1 / 0.6522, + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].mole_frac_comp["O2"], + 1 / 0.1739, + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].mole_frac_comp["CO2"], 1e5 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].mole_frac_comp["CH4"], + 1 / 0.1304, + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].mole_frac_comp["CO"], 1e5 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].mole_frac_comp["H2O"], 1e5 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].mole_frac_comp["NH3"], 1e5 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].temperature, 1 / 1500 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_in[0.0].pressure, 1e-5 + ) + # Assume user does not know anything about enthalpy + + # Best guesses for unit model and outlet state conditions + set_scaling_factor(model.fs.unit.control_volume.heat[0.0], 1e-6) + + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].flow_mol, 1e-2 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].flow_mol_phase, 1e-2 + ) # Only 1 phase, so we "know" this + # N2 is inert, so will be order 0.1, assume CH4 and H2 are near-totally consumed, assume most O2 consumed + # Assume moderate amounts of CO2 and H2O, small amounts of CO, trace NH3 NH3 + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["H2"], 1e4 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["N2"], 10 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["O2"], 1e2 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["CO2"], 10 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["CH4"], 1e4 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["CO"], 1e3 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["H2O"], 10 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].mole_frac_comp["NH3"], 1e4 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].temperature, 1e-3 + ) + set_scaling_factor( + model.fs.unit.control_volume.properties_out[0.0].pressure, 1e-5 + ) + + from_json(model, fname=fname, wts=StoreSpec.value()) + + return model + + def test_variable_scaling_only(self, methane): + unscaled = jacobian_cond(methane, scaled=False) + + scaler_map = ComponentMap() + scaler_map[methane.fs.unit.control_volume.properties_in] = SMScaler() + scaler_map[methane.fs.unit.control_volume.properties_out] = SMScaler() + + scaler = GibbsReactorScaler() + scaler.variable_scaling_routine( + methane.fs.unit, + submodel_scalers=scaler_map, + ) + + scaled = jacobian_cond(methane, scaled=True) + + count = 0 + for c in methane.component_data_objects(ctype=[Var], descend_into=True): + sf = get_scaling_factor(c) + if sf is None: + count += 1 + assert count == 52 + + assert scaled < unscaled + assert scaled == pytest.approx(8.908989e16, rel=1e-5) + + def test_constraint_scaling_only(self, methane): + unscaled = jacobian_cond(methane, scaled=False) + + scaler_map = ComponentMap() + scaler_map[methane.fs.unit.control_volume.properties_in] = SMScaler() + scaler_map[methane.fs.unit.control_volume.properties_out] = SMScaler() + + scaler = GibbsReactorScaler() + scaler.constraint_scaling_routine( + methane.fs.unit, + submodel_scalers=scaler_map, + ) + + scaled = jacobian_cond(methane, scaled=True) + + count = 0 + for c in methane.component_data_objects(ctype=[Constraint], descend_into=True): + sf = get_scaling_factor(c) + if sf is None: + count += 1 + assert count == 0 + + assert scaled < unscaled + assert scaled == pytest.approx(9.316e15, rel=1e-2) + + def test_full_scaling(self, methane): + unscaled = jacobian_cond(methane, scaled=False) + + scaler_map = ComponentMap() + scaler_map[methane.fs.unit.control_volume.properties_in] = SMScaler() + scaler_map[methane.fs.unit.control_volume.properties_out] = SMScaler() + + scaler = GibbsReactorScaler() + scaler.scale_model( + methane.fs.unit, + submodel_scalers=scaler_map, + ) + + scaled = jacobian_cond(methane, scaled=True) + + assert scaled < unscaled + assert scaled == pytest.approx(7.653e15, rel=1e-2) From 2515fe859ca8dac24eb500f04561ae8d0fb1f4fe Mon Sep 17 00:00:00 2001 From: Andrew Lee Date: Fri, 18 Oct 2024 13:45:22 -0400 Subject: [PATCH 2/5] Fix issue with enthalpy based states trying to use EoS before construction (#1505) --- .../modular_properties/base/generic_property.py | 9 +++++++++ .../modular_properties/base/tests/dummy_eos.py | 2 +- .../modular_properties/state_definitions/FPhx.py | 11 +---------- .../modular_properties/state_definitions/FcPh.py | 11 +---------- .../state_definitions/tests/test_FPhx.py | 15 ++++++++------- .../state_definitions/tests/test_FTPx.py | 1 + .../state_definitions/tests/test_FcPh.py | 15 ++++++++------- .../state_definitions/tests/test_FcTP.py | 1 + .../state_definitions/tests/test_FpcTP.py | 1 + 9 files changed, 31 insertions(+), 35 deletions(-) diff --git a/idaes/models/properties/modular_properties/base/generic_property.py b/idaes/models/properties/modular_properties/base/generic_property.py index 43ffad3806..26a31b3d57 100644 --- a/idaes/models/properties/modular_properties/base/generic_property.py +++ b/idaes/models/properties/modular_properties/base/generic_property.py @@ -2105,6 +2105,15 @@ def build(self): pobj = self.params.get_phase(p) pobj.config.equation_of_state.common(self, pobj) + # Check to see if state definition uses enthalpy + if self.is_property_constructed("enth_mol"): + # State definition uses enthalpy, need to add constraint on phase enthalpies + @self.Constraint(doc="Total molar enthalpy mixing rule") + def enth_mol_eqn(b): + return b.enth_mol == sum( + b.enth_mol_phase[p] * b.phase_frac[p] for p in b.phase_list + ) + # Add phase equilibrium constraints if necessary if self.params.config.phases_in_equilibrium is not None and ( not self.config.defined_state or self.always_flash diff --git a/idaes/models/properties/modular_properties/base/tests/dummy_eos.py b/idaes/models/properties/modular_properties/base/tests/dummy_eos.py index 4da20f2500..8efb87a37d 100644 --- a/idaes/models/properties/modular_properties/base/tests/dummy_eos.py +++ b/idaes/models/properties/modular_properties/base/tests/dummy_eos.py @@ -103,7 +103,7 @@ def energy_internal_mol_phase_comp(b, p, j): @staticmethod def enth_mol_phase(b, p): - return 1e2 * b.temperature + return 1e2 * pyunits.J / pyunits.mol / pyunits.K * b.temperature @staticmethod def enth_mol_phase_comp(b, p, j): diff --git a/idaes/models/properties/modular_properties/state_definitions/FPhx.py b/idaes/models/properties/modular_properties/state_definitions/FPhx.py index 9922d80332..d683e850d0 100644 --- a/idaes/models/properties/modular_properties/state_definitions/FPhx.py +++ b/idaes/models/properties/modular_properties/state_definitions/FPhx.py @@ -176,15 +176,6 @@ def flow_mol_phase_comp_rule(b, p, j): expr=1 == sum(b.mole_frac_comp[i] for i in b.component_list) ) - def rule_enth_mol(b): - return b.enth_mol == sum( - b.enth_mol_phase[p] * b.phase_frac[p] for p in b.phase_list - ) - - b.enth_mol_eq = Constraint( - rule=rule_enth_mol, doc="Total molar enthalpy mixing rule" - ) - if len(b.phase_list) == 1: def rule_total_mass_balance(b): @@ -451,7 +442,7 @@ def calculate_scaling_factors(b): b.sum_mole_frac_out, min(sf_mf.values()), overwrite=False ) - iscale.constraint_scaling_transform(b.enth_mol_eq, sf_h, overwrite=False) + iscale.constraint_scaling_transform(b.enth_mol_eqn, sf_h, overwrite=False) if len(b.phase_list) == 1: iscale.constraint_scaling_transform( diff --git a/idaes/models/properties/modular_properties/state_definitions/FcPh.py b/idaes/models/properties/modular_properties/state_definitions/FcPh.py index d53b5ebb82..8d4de71610 100644 --- a/idaes/models/properties/modular_properties/state_definitions/FcPh.py +++ b/idaes/models/properties/modular_properties/state_definitions/FcPh.py @@ -182,15 +182,6 @@ def rule_mole_frac_comp(b, j): b.mole_frac_comp_eq = Constraint(b.component_list, rule=rule_mole_frac_comp) - def rule_enth_mol(b): - return b.enth_mol == sum( - b.enth_mol_phase[p] * b.phase_frac[p] for p in b.phase_list - ) - - b.enth_mol_eq = Constraint( - rule=rule_enth_mol, doc="Total molar enthalpy mixing rule" - ) - if len(b.phase_list) == 1: def rule_total_mass_balance(b): @@ -456,7 +447,7 @@ def calculate_scaling_factors(b): b.mole_frac_comp_eq[j], sf_j, overwrite=False ) - iscale.constraint_scaling_transform(b.enth_mol_eq, sf_h, overwrite=False) + iscale.constraint_scaling_transform(b.enth_mol_eqn, sf_h, overwrite=False) if len(b.phase_list) == 1: iscale.constraint_scaling_transform( diff --git a/idaes/models/properties/modular_properties/state_definitions/tests/test_FPhx.py b/idaes/models/properties/modular_properties/state_definitions/tests/test_FPhx.py index c3c4514411..0ded46518b 100644 --- a/idaes/models/properties/modular_properties/state_definitions/tests/test_FPhx.py +++ b/idaes/models/properties/modular_properties/state_definitions/tests/test_FPhx.py @@ -47,6 +47,7 @@ pytestmark = pytest.mark.unit +# Note: The state definition is set by importing functions for the relevant module above @declare_process_block_class("DummyParameterBlock") class DummyParameterData(GenericParameterData): pass @@ -137,7 +138,7 @@ def test_mole_frac(self, caplog): class Test1PhaseDefinedStateFalseNoBounds(object): # Test define_state method with no bounds and defined_State = False - @pytest.fixture(scope="class") + @pytest.fixture() def frame(self): m = ConcreteModel() @@ -263,7 +264,7 @@ def test_constraints(self, frame): class Test1PhaseDefinedStateTrueWithBounds(object): # Test define_state method with no bounds and defined_State = False - @pytest.fixture(scope="class") + @pytest.fixture() def frame(self): m = ConcreteModel() @@ -389,7 +390,7 @@ def test_constraints(self, frame): class Test2PhaseDefinedStateFalseNoBounds(object): # Test define_state method with no bounds and defined_State = False - @pytest.fixture(scope="class") + @pytest.fixture() def frame(self): m = ConcreteModel() @@ -551,7 +552,7 @@ def test_constraints(self, frame): class Test2PhaseDefinedStateTrueWithBounds(object): # Test define_state method with no bounds and defined_State = False - @pytest.fixture(scope="class") + @pytest.fixture() def frame(self): m = ConcreteModel() @@ -712,7 +713,7 @@ def test_constraints(self, frame): class Test3PhaseDefinedStateFalseNoBounds(object): # Test define_state method with no bounds and defined_State = False - @pytest.fixture(scope="class") + @pytest.fixture() def frame(self): m = ConcreteModel() @@ -862,7 +863,7 @@ def test_constraints(self, frame): class Test3PhaseDefinedStateTrueWithBounds(object): # Test define_state method with no bounds and defined_State = False - @pytest.fixture(scope="class") + @pytest.fixture() def frame(self): m = ConcreteModel() @@ -1020,7 +1021,7 @@ def test_constraints(self, frame): class TestCommon(object): - @pytest.fixture(scope="class") + @pytest.fixture() def frame(self): m = ConcreteModel() diff --git a/idaes/models/properties/modular_properties/state_definitions/tests/test_FTPx.py b/idaes/models/properties/modular_properties/state_definitions/tests/test_FTPx.py index 50e44b7a50..ad4540b74e 100644 --- a/idaes/models/properties/modular_properties/state_definitions/tests/test_FTPx.py +++ b/idaes/models/properties/modular_properties/state_definitions/tests/test_FTPx.py @@ -55,6 +55,7 @@ import idaes.logger as idaeslog +# Note: The state definition is set by importing functions for the relevant module above @declare_process_block_class("DummyParameterBlock") class DummyParameterData(GenericParameterData): pass diff --git a/idaes/models/properties/modular_properties/state_definitions/tests/test_FcPh.py b/idaes/models/properties/modular_properties/state_definitions/tests/test_FcPh.py index f991791d34..24a6a3131f 100644 --- a/idaes/models/properties/modular_properties/state_definitions/tests/test_FcPh.py +++ b/idaes/models/properties/modular_properties/state_definitions/tests/test_FcPh.py @@ -51,6 +51,7 @@ import idaes.logger as idaeslog +# Note: The state definition is set by importing functions for the relevant module above @declare_process_block_class("DummyParameterBlock") class DummyParameterData(GenericParameterData): pass @@ -141,7 +142,7 @@ def test_mole_frac(self, caplog): class Test1PhaseDefinedStateFalseNoBounds(object): # Test define_state method with no bounds and defined_State = False - @pytest.fixture(scope="class") + @pytest.fixture() def frame(self): m = ConcreteModel() @@ -276,7 +277,7 @@ def test_constraints(self, frame): class Test1PhaseDefinedStateTrueWithBounds(object): # Test define_state method with no bounds and defined_State = False - @pytest.fixture(scope="class") + @pytest.fixture() def frame(self): m = ConcreteModel() @@ -429,7 +430,7 @@ def test_constraints(self, frame): class Test2PhaseDefinedStateFalseNoBounds(object): # Test define_state method with no bounds and defined_State = False - @pytest.fixture(scope="class") + @pytest.fixture() def frame(self): m = ConcreteModel() @@ -600,7 +601,7 @@ def test_constraints(self, frame): class Test2PhaseDefinedStateTrueWithBounds(object): # Test define_state method with no bounds and defined_State = False - @pytest.fixture(scope="class") + @pytest.fixture() def frame(self): m = ConcreteModel() @@ -788,7 +789,7 @@ def test_constraints(self, frame): class Test3PhaseDefinedStateFalseNoBounds(object): # Test define_state method with no bounds and defined_State = False - @pytest.fixture(scope="class") + @pytest.fixture() def frame(self): m = ConcreteModel() @@ -947,7 +948,7 @@ def test_constraints(self, frame): class Test3PhaseDefinedStateTrueWithBounds(object): # Test define_state method with no bounds and defined_State = False - @pytest.fixture(scope="class") + @pytest.fixture() def frame(self): m = ConcreteModel() @@ -1121,7 +1122,7 @@ def test_constraints(self, frame): class TestCommon(object): - @pytest.fixture(scope="class") + @pytest.fixture() def frame(self): m = ConcreteModel() diff --git a/idaes/models/properties/modular_properties/state_definitions/tests/test_FcTP.py b/idaes/models/properties/modular_properties/state_definitions/tests/test_FcTP.py index 1c1d754fbd..07307bfe92 100644 --- a/idaes/models/properties/modular_properties/state_definitions/tests/test_FcTP.py +++ b/idaes/models/properties/modular_properties/state_definitions/tests/test_FcTP.py @@ -50,6 +50,7 @@ import idaes.logger as idaeslog +# Note: The state definition is set by importing functions for the relevant module above @declare_process_block_class("DummyParameterBlock") class DummyParameterData(GenericParameterData): pass diff --git a/idaes/models/properties/modular_properties/state_definitions/tests/test_FpcTP.py b/idaes/models/properties/modular_properties/state_definitions/tests/test_FpcTP.py index 8c339ee86f..6e6fca7729 100644 --- a/idaes/models/properties/modular_properties/state_definitions/tests/test_FpcTP.py +++ b/idaes/models/properties/modular_properties/state_definitions/tests/test_FpcTP.py @@ -58,6 +58,7 @@ from idaes.core.util.model_statistics import degrees_of_freedom, large_residuals_set +# Note: The state definition is set by importing functions for the relevant module above @declare_process_block_class("DummyParameterBlock") class DummyParameterData(GenericParameterData): pass From 736972822d2f9e31413c988932d6bedf3fc58638 Mon Sep 17 00:00:00 2001 From: Ali Zaidi <41302741+azaidi06@users.noreply.github.com> Date: Fri, 18 Oct 2024 22:38:06 -0400 Subject: [PATCH 3/5] Pin ubuntu version to resolve failing gha jobs until support available in idaes extension (#1506) --- .github/workflows/core.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/core.yml b/.github/workflows/core.yml index 8c8313088e..36a11a7d30 100644 --- a/.github/workflows/core.yml +++ b/.github/workflows/core.yml @@ -225,7 +225,7 @@ jobs: compat: name: Compatibility tests - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v4 - name: Set up Conda environment From 96237b9394e925013c92d66a0acc561808ef199f Mon Sep 17 00:00:00 2001 From: Andrew Lee Date: Tue, 22 Oct 2024 15:16:22 -0400 Subject: [PATCH 4/5] Scaler for equilibrium reactor and saponification properties (#1500) * Adding infrastructure to support ipopt_v2 * Moving core/util to ipopt_v2 * Moving MH initializer to ipopt_v2 * Fixing pint version issue * Set TSA to use old IPOPT interface * Trying to resolve Windows failures * Working on platofrm dependent failure * BTInitializer with presolve * Moving last bits of core code to ipopt_v2 * Starting on idaes/models * Removing ma57_automatic_scaling default and updating idaes/models/control * idaes/model/properties part 1 * Remaining parts of idaes/models/proeprties * Fixing typo * Switching idaes/models/unit_models to ipopt_v2 * Attempt to work around HXLC issues for now * Some clean up * Switching modular properties initializer to solver indexed blocks * Addressing comments * Recovering from previous branch * Some clean up * Adding ScalerBase class and tests * Working on CustomScalerBase * Nominal value constraint scaling * Adding some initial integration tests for scaling * Some more nominal magnitude scaling approaches * Prototyping pseudojacobian scaler * Trying to debug pseudojacobian * Removing unnecessary import * Addressing pylint issues * Cleaning up nominal jacobian * More methods for CustomScalerBase * Prototyping Gibbs reactor scaler * Gibbs reactor constraint scaling * Working on testing and profiling * Refining Gibbs scaler * Refining nominal value walker * Fixing walker tests * Testing GibbsScaler with initialization * Fixing auto norm scaling on indexed blocks * Testing scaling profiler * Fixing typos * Fixing pylint issue * Imrpoving some doc strings * Apply suggestions from code review Co-authored-by: MarcusHolly <96305519+MarcusHolly@users.noreply.github.com> * Fixing issue with autoscaling vars with value None * Adding profiler to __init__ * Fixing name for RSS method * Fixing import of pyomo.environ * Allowing default scaling for indexed components * Adding catch for critical solver failure in profiler * Starting on docs * Finishing docs and unifiying method names * Profiler report methods and docs * Fixing typos * Pylint: fix unnecessary f-string * Working on equilibrium reactor scaler * Finishing tests for equilibrium reactor scaler * Updaing requil scaler * Fixing typo * Addressing pylint issue * Improving guess for heat duty scaling * Updating test value for heat duty scaling * Minor fix * Adding Enum to __init__ * Adding doc strings for saponification scalers * Fixing typo --------- Co-authored-by: MarcusHolly <96305519+MarcusHolly@users.noreply.github.com> --- .../scaling_toolbox/scaling_theory.rst | 2 +- .../generic/unit_models/equilibrium.rst | 6 + idaes/core/base/property_base.py | 8 + idaes/core/base/reaction_base.py | 8 + idaes/core/scaling/__init__.py | 2 +- idaes/core/scaling/custom_scaler_base.py | 1 + idaes/core/scaling/scaler_profiling.py | 16 +- .../tests/test_custom_scaling_integration.py | 2 - .../examples/saponification_reactions.py | 74 ++++ .../examples/saponification_thermo.py | 46 ++ .../tests/test_saponification_reaction.py | 97 ++++- .../tests/test_saponification_thermo.py | 91 +++- .../models/unit_models/equilibrium_reactor.py | 161 ++++++- .../tests/test_equilibrium_reactor.py | 404 +++++++++++++++++- 14 files changed, 900 insertions(+), 18 deletions(-) diff --git a/docs/explanations/scaling_toolbox/scaling_theory.rst b/docs/explanations/scaling_toolbox/scaling_theory.rst index 6c8a9a4300..52b2a00e9d 100644 --- a/docs/explanations/scaling_toolbox/scaling_theory.rst +++ b/docs/explanations/scaling_toolbox/scaling_theory.rst @@ -14,7 +14,7 @@ Types of Scaling * Constraint residual scaling refers to the magnitude of the residual of all constraints in the model. This is important as this is required to determine whether or not a model has converged, thus it is important that a residual equal to the solver tolerance does not significantly alter the solution to the model. * E.g. consider a constraint A=B. If the magnitude of A and B are 1e6 and the solver tolerance is 1e-6, this means that A and B need to be solved to 12 significant figures of precision to converge the constraint (which may be unnecessarily onerous). Similarly, if A and B were of order 1e-6, then they will be considered equal if A=1e-6 and B=0; i.e. ±100% error. - + * Jacobian scaling refers to the overall scaling and conditioning of the problem Jacobian. This is important as this determines the ability of the solver to find a path from the initial state to the final solution. It is important to ensure that the Jacobian is well-conditioned both in order to get good numerical behavior from the solver and also to ensure that floating point round-off error does not result in large changes to the variables at the solution. These aspects are not always complimentary, and there are often cases where improving one aspect of the model scaling can negatively affect another aspect (e.g., focusing too much on Jacobian condition number can often result in poor constraint residual tolerances). Additionally, each aspect affects different parts of the solver routine; Jacobian scaling is important for determining the step direction and size at each iteration, whilst constraint residual scaling is important for determining if and when a solution is found. The relative importance of each of these depends on the solver being used (e.g., for IPOPT constraint residual scaling is more important than Jacobian scaling as IPOPT does its own internal scaling of the Jacobian), however in general all of these will have an impact upon the solver behavior and users should endeavor to have good scaling for all aspects of the model. diff --git a/docs/reference_guides/model_libraries/generic/unit_models/equilibrium.rst b/docs/reference_guides/model_libraries/generic/unit_models/equilibrium.rst index d34c60f844..9f7536639c 100644 --- a/docs/reference_guides/model_libraries/generic/unit_models/equilibrium.rst +++ b/docs/reference_guides/model_libraries/generic/unit_models/equilibrium.rst @@ -51,3 +51,9 @@ EquilibriumReactorData Class .. autoclass:: EquilibriumReactorData :members: + +EquilibriumReactorScaler Class +------------------------------ + +.. autoclass:: EquilibriumReactorScaler + :members: diff --git a/idaes/core/base/property_base.py b/idaes/core/base/property_base.py index 2e0743f994..b7afb011bb 100644 --- a/idaes/core/base/property_base.py +++ b/idaes/core/base/property_base.py @@ -593,6 +593,14 @@ def include_inherent_reactions(self): # pylint: disable-next=protected-access return self.parent_component()._include_inherent_reactions() + @property + def default_initializer(self): + return self.parent_component().default_initializer + + @property + def default_scaler(self): + return self.parent_component().default_scaler + def build(self): """ General build method for StateBlockDatas. diff --git a/idaes/core/base/reaction_base.py b/idaes/core/base/reaction_base.py index 7a42c36f0f..5d43c5485c 100644 --- a/idaes/core/base/reaction_base.py +++ b/idaes/core/base/reaction_base.py @@ -267,6 +267,14 @@ def phase_list(self): def phase_component_set(self): return self.state_ref.phase_component_set + @property + def default_initializer(self): + return self.parent_component().default_initializer + + @property + def default_scaler(self): + return self.parent_component().default_scaler + def lock_attribute_creation_context(self): """Returns a context manager that does not allow attributes to be created while in the context and allows attributes to be created normally outside diff --git a/idaes/core/scaling/__init__.py b/idaes/core/scaling/__init__.py index 0d1b13ee0f..e3e3661d2b 100644 --- a/idaes/core/scaling/__init__.py +++ b/idaes/core/scaling/__init__.py @@ -11,7 +11,7 @@ # for full copyright and license information. ################################################################################# from .autoscaling import AutoScaler -from .custom_scaler_base import CustomScalerBase +from .custom_scaler_base import CustomScalerBase, ConstraintScalingScheme from .scaler_profiling import ScalingProfiler from .util import ( scaling_factors_from_json_file, diff --git a/idaes/core/scaling/custom_scaler_base.py b/idaes/core/scaling/custom_scaler_base.py index 4da3cdb20b..0d47673805 100644 --- a/idaes/core/scaling/custom_scaler_base.py +++ b/idaes/core/scaling/custom_scaler_base.py @@ -571,6 +571,7 @@ def call_submodel_scaler_method( submodel: local name of submodel to be scaled as str submodel_scalers: user provided ComponentMap of Scalers to use for submodels method: name of method to call from submodel (as string) + overwrite: whether to overwrite existing scaling factors Returns: None diff --git a/idaes/core/scaling/scaler_profiling.py b/idaes/core/scaling/scaler_profiling.py index fe8b72e187..9bef06ead3 100644 --- a/idaes/core/scaling/scaler_profiling.py +++ b/idaes/core/scaling/scaler_profiling.py @@ -19,7 +19,11 @@ from pyomo.common.tempfiles import TempfileManager from idaes.core.util.scaling import jacobian_cond -from idaes.core.scaling import AutoScaler, CustomScalerBase +from idaes.core.scaling.autoscaling import AutoScaler +from idaes.core.scaling.custom_scaler_base import ( + CustomScalerBase, + ConstraintScalingScheme, +) from idaes.core.solvers import get_solver @@ -83,23 +87,23 @@ def __init__( "Vars Only": (None, {}), "Harmonic": ( cscaler.scale_constraint_by_nominal_value, - {"scheme": "harmonic_mean"}, + {"scheme": ConstraintScalingScheme.harmonicMean}, ), "Inverse Sum": ( cscaler.scale_constraint_by_nominal_value, - {"scheme": "inverse_sum"}, + {"scheme": ConstraintScalingScheme.inverseSum}, ), "Inverse Root Sum Squares": ( cscaler.scale_constraint_by_nominal_value, - {"scheme": "inverse_root_sum_squared"}, + {"scheme": ConstraintScalingScheme.inverseRSS}, ), "Inverse Maximum": ( cscaler.scale_constraint_by_nominal_value, - {"scheme": "inverse_maximum"}, + {"scheme": ConstraintScalingScheme.inverseMaximum}, ), "Inverse Minimum": ( cscaler.scale_constraint_by_nominal_value, - {"scheme": "inverse_minimum"}, + {"scheme": ConstraintScalingScheme.inverseMinimum}, ), "Nominal L1 Norm": ( cscaler.scale_constraint_by_nominal_derivative_norm, diff --git a/idaes/core/scaling/tests/test_custom_scaling_integration.py b/idaes/core/scaling/tests/test_custom_scaling_integration.py index 548846f786..68c5e3af5c 100644 --- a/idaes/core/scaling/tests/test_custom_scaling_integration.py +++ b/idaes/core/scaling/tests/test_custom_scaling_integration.py @@ -38,8 +38,6 @@ from idaes.models.properties.activity_coeff_models.methane_combustion_ideal import ( MethaneParameterBlock as MethaneCombustionParameterBlock, ) -from idaes.core.util.testing import PhysicalParameterTestBlock, initialization_tester -from idaes.core.solvers import get_solver from idaes.core.util import to_json, from_json, StoreSpec from idaes.core.util.scaling import jacobian_cond from idaes.core.scaling import AutoScaler, CustomScalerBase, set_scaling_factor diff --git a/idaes/models/properties/examples/saponification_reactions.py b/idaes/models/properties/examples/saponification_reactions.py index a855868104..ffd2334685 100644 --- a/idaes/models/properties/examples/saponification_reactions.py +++ b/idaes/models/properties/examples/saponification_reactions.py @@ -31,11 +31,13 @@ from idaes.core.util.misc import add_object_reference from idaes.core.util.constants import Constants as const import idaes.logger as idaeslog +from idaes.core.scaling import CustomScalerBase # Some more information about this module __author__ = "Andrew Lee" +from idaes.core.util.scaling import get_scaling_factor # Set up logger _log = idaeslog.getLogger(__name__) @@ -111,12 +113,84 @@ def define_metadata(cls, obj): ) +class SaponificationReactionScaler(CustomScalerBase): + """ + Scaler for saponification reaction package. + + Variables are scaled by nominal order of magnitude, and constraints + using the inverse maximum scheme. + """ + + DEFAULT_SCALING_FACTORS = {"reaction_rate": 1e2} + + def variable_scaling_routine( + self, model, overwrite: bool = False, submodel_scalers: dict = None + ): + if model.is_property_constructed("k_rxn"): + # First check to see if k_rxn is already scaled + sf = get_scaling_factor(model.k_rxn) + + if sf is not None and not overwrite: + # k_rxn already has a scaling factor and we are not set to overwrite - move on + pass + else: + # Hopefully temperature has been scaled, so we can get the nominal value of k_rxn + # by walking the expression in the constraint. + nominals = self.get_expression_nominal_values(model.arrhenius_eqn) + + # We should get two values, k_rxn (LHS) and the Arrhenius equation (RHS) + # As of 10/3/2024, the LHS will be the 0-th element of the list, and the RHS the 1st + # However, we cannot assume this will always be the case + + # If LHS has been scaled, nominal will be 1/sf, otherwise it will be k_rxn.value + # Find the value which does NOT match this - guess that this is the 1st element + if nominals[1] != model.k_rxn.value and sf is None: + # This is the most likely case, so check it first + nominal = nominals[1] + elif sf is not None and nominals[1] != 1 / sf: + # Next, check for case where k_rxn was already scaled + nominal = nominals[1] + else: + # Otherwise we have the case where something changed in Pyomo since 10/3/2024 + nominal = nominals[0] + + self.set_variable_scaling_factor( + model.k_rxn, + 1 / nominal, + overwrite=overwrite, + ) + + if model.is_property_constructed("reaction_rate"): + for j in model.reaction_rate.values(): + self.scale_variable_by_default(j, overwrite=overwrite) + + def constraint_scaling_routine( + self, model, overwrite: bool = False, submodel_scalers: dict = None + ): + if model.is_property_constructed("arrhenius_eqn"): + self.scale_constraint_by_nominal_value( + model.arrhenius_eqn, + scheme="inverse_maximum", + overwrite=overwrite, + ) + + if model.is_property_constructed("rate_expression"): + for j in model.rate_expression.values(): + self.scale_constraint_by_nominal_value( + j, + scheme="inverse_maximum", + overwrite=overwrite, + ) + + class _ReactionBlock(ReactionBlockBase): """ This Class contains methods which should be applied to Reaction Blocks as a whole, rather than individual elements of indexed Reaction Blocks. """ + default_scaler = SaponificationReactionScaler + def initialize(blk, outlvl=idaeslog.NOTSET, **kwargs): """ Initialization routine for reaction package. diff --git a/idaes/models/properties/examples/saponification_thermo.py b/idaes/models/properties/examples/saponification_thermo.py index e21ed6094d..7086858fe7 100644 --- a/idaes/models/properties/examples/saponification_thermo.py +++ b/idaes/models/properties/examples/saponification_thermo.py @@ -44,6 +44,7 @@ from idaes.core.util.model_statistics import degrees_of_freedom from idaes.core.util.initialization import fix_state_vars, revert_state_vars import idaes.logger as idaeslog +from idaes.core.scaling import CustomScalerBase # Some more information about this module __author__ = "Andrew Lee" @@ -135,12 +136,57 @@ def define_metadata(cls, obj): ) +class SaponificationPropertiesScaler(CustomScalerBase): + """ + Scaler for saponification properties package. + + Flow and concentration are scaled by default value (if no user input provided), + pressure is scaled assuming order of magnitude of 1e5 Pa, and temperature is + scaled using the average of the bounds. Constraints using the inverse maximum + scheme. + """ + + UNIT_SCALING_FACTORS = { + # "QuantityName: (reference units, scaling factor) + "Pressure": (units.Pa, 1e-5), + } + + DEFAULT_SCALING_FACTORS = { + "flow_vol": 1e2, + "conc_mol_comp": 1e-2, + } + + def variable_scaling_routine( + self, model, overwrite: bool = False, submodel_scalers: dict = None + ): + self.scale_variable_by_default(model.flow_vol, overwrite=overwrite) + self.scale_variable_by_units(model.pressure, overwrite=overwrite) + self.scale_variable_by_bounds(model.temperature, overwrite=overwrite) + for k, v in model.conc_mol_comp.items(): + if k == "H2O": + self.set_variable_scaling_factor(v, 1e-4, overwrite=overwrite) + else: + self.scale_variable_by_default(v, overwrite=overwrite) + + def constraint_scaling_routine( + self, model, overwrite: bool = False, submodel_scalers: dict = None + ): + if model.is_property_constructed("conc_water_eqn"): + self.set_constraint_scaling_factor( + model.conc_water_eqn, + 1e-4, + overwrite=overwrite, + ) + + class _StateBlock(StateBlock): """ This Class contains methods which should be applied to Property Blocks as a whole, rather than individual elements of indexed Property Blocks. """ + default_scaler = SaponificationPropertiesScaler + def fix_initialization_states(self): """ Fixes state variables for state blocks. diff --git a/idaes/models/properties/examples/tests/test_saponification_reaction.py b/idaes/models/properties/examples/tests/test_saponification_reaction.py index 1da9acbd77..fdb879b76e 100644 --- a/idaes/models/properties/examples/tests/test_saponification_reaction.py +++ b/idaes/models/properties/examples/tests/test_saponification_reaction.py @@ -14,18 +14,20 @@ Tests for saponification property package example. Authors: Andrew Lee """ +from math import exp import pytest -from pyomo.environ import ConcreteModel, Constraint, Param, units, value, Var -from idaes.core import MaterialFlowBasis +from pyomo.environ import ConcreteModel, Constraint, Param, Suffix, units, value, Var + +from idaes.core import MaterialFlowBasis from idaes.models.properties.examples.saponification_reactions import ( SaponificationReactionParameterBlock, ReactionBlock, + SaponificationReactionScaler, ) from idaes.models.properties.examples.saponification_thermo import ( SaponificationParameterBlock, ) - from idaes.core.solvers import get_solver @@ -100,6 +102,8 @@ def test_build(self, model): assert model.rxns[1].temperature_ref is model.props[1].temperature assert model.rxns[1].dh_rxn is model.rparams.dh_rxn + assert model.rxns[1].default_scaler is SaponificationReactionScaler + @pytest.mark.unit def test_rate_constant(self, model): assert isinstance(model.rxns[1].k_rxn, Var) @@ -124,3 +128,90 @@ def test_initialize(self, model): def check_units(self, model): units.assert_units_consistent(model) + + +class TestSaponificationReactionScaler(object): + @pytest.mark.unit + def test_variable_scaling_routine(self): + model = ConcreteModel() + model.pparams = SaponificationParameterBlock() + model.rparams = SaponificationReactionParameterBlock( + property_package=model.pparams + ) + + model.props = model.pparams.build_state_block([1]) + model.rxns = model.rparams.build_reaction_block([1], state_block=model.props) + + # Trigger build of reaction properties + model.rxns[1].reaction_rate + + scaler = model.rxns[1].default_scaler() + assert isinstance(scaler, SaponificationReactionScaler) + + scaler.variable_scaling_routine(model.rxns[1]) + + assert isinstance(model.rxns[1].scaling_factor, Suffix) + + sfx = model.rxns[1].scaling_factor + assert len(sfx) == 2 + assert sfx[model.rxns[1].k_rxn] == pytest.approx( + 1 / (3.132e6 * exp(-43000 / (8.31446262 * 298.15))), rel=1e-8 + ) + assert sfx[model.rxns[1].reaction_rate["R1"]] == pytest.approx(1e2, rel=1e-8) + + @pytest.mark.unit + def test_constraint_scaling_routine(self): + model = ConcreteModel() + model.pparams = SaponificationParameterBlock() + model.rparams = SaponificationReactionParameterBlock( + property_package=model.pparams + ) + + model.props = model.pparams.build_state_block([1]) + model.rxns = model.rparams.build_reaction_block([1], state_block=model.props) + + # Trigger build of reaction properties + model.rxns[1].reaction_rate + + scaler = model.rxns[1].default_scaler() + assert isinstance(scaler, SaponificationReactionScaler) + + scaler.constraint_scaling_routine(model.rxns[1]) + + assert isinstance(model.rxns[1].scaling_factor, Suffix) + + sfx = model.rxns[1].scaling_factor + assert len(sfx) == 2 + assert sfx[model.rxns[1].arrhenius_eqn] == pytest.approx(1, rel=1e-8) + assert sfx[model.rxns[1].rate_expression["R1"]] == pytest.approx(1e-4, rel=1e-8) + + @pytest.mark.unit + def test_scale_model(self): + model = ConcreteModel() + model.pparams = SaponificationParameterBlock() + model.rparams = SaponificationReactionParameterBlock( + property_package=model.pparams + ) + + model.props = model.pparams.build_state_block([1]) + model.rxns = model.rparams.build_reaction_block([1], state_block=model.props) + + # Trigger build of reaction properties + model.rxns[1].reaction_rate + + scaler = model.rxns[1].default_scaler() + assert isinstance(scaler, SaponificationReactionScaler) + + scaler.scale_model(model.rxns[1]) + + assert isinstance(model.rxns[1].scaling_factor, Suffix) + + sfx = model.rxns[1].scaling_factor + k_rxn_sf = 1 / (3.132e6 * exp(-43000 / (8.31446262 * 298.15))) + assert len(sfx) == 4 + assert sfx[model.rxns[1].k_rxn] == pytest.approx(k_rxn_sf, rel=1e-8) + assert sfx[model.rxns[1].reaction_rate["R1"]] == pytest.approx(1e2, rel=1e-8) + assert sfx[model.rxns[1].arrhenius_eqn] == pytest.approx(k_rxn_sf, rel=1e-8) + assert sfx[model.rxns[1].rate_expression["R1"]] == pytest.approx( + 1e-4 * k_rxn_sf, rel=1e-8 + ) diff --git a/idaes/models/properties/examples/tests/test_saponification_thermo.py b/idaes/models/properties/examples/tests/test_saponification_thermo.py index a5d7b8ff00..2d298df3c5 100644 --- a/idaes/models/properties/examples/tests/test_saponification_thermo.py +++ b/idaes/models/properties/examples/tests/test_saponification_thermo.py @@ -16,13 +16,14 @@ """ import pytest -from pyomo.environ import ConcreteModel, Constraint, Param, value, Var +from pyomo.environ import ConcreteModel, Constraint, Param, Suffix, value, Var from pyomo.util.check_units import assert_units_consistent from idaes.core import MaterialBalanceType, EnergyBalanceType, MaterialFlowBasis from idaes.models.properties.examples.saponification_thermo import ( SaponificationParameterBlock, SaponificationStateBlock, + SaponificationPropertiesScaler, ) from idaes.core.solvers import get_solver from idaes.core.initialization import ( @@ -102,6 +103,8 @@ def test_build(self, model): assert isinstance(model.props[1].conc_water_eqn, Constraint) assert len(model.props[1].flow_vol) == 1 + assert model.props[1].default_scaler is SaponificationPropertiesScaler + @pytest.mark.unit def test_build_defined_state(self): model = ConcreteModel() @@ -313,3 +316,89 @@ def test_initializer(): initializer.initialize(model.props) assert initializer.summary[model.props]["status"] == InitializationStatus.Ok + + +class TestSaponificationPropertiesScaler: + @pytest.mark.unit + def test_variable_scaling_routine(self): + model = ConcreteModel() + model.params = SaponificationParameterBlock() + + model.props = model.params.build_state_block([1], defined_state=False) + + scaler = model.props[1].default_scaler() + assert isinstance(scaler, SaponificationPropertiesScaler) + + scaler.variable_scaling_routine(model.props[1]) + + assert isinstance(model.props[1].scaling_factor, Suffix) + + sfx = model.props[1].scaling_factor + assert len(sfx) == 8 + assert sfx[model.props[1].flow_vol] == pytest.approx(1e2, rel=1e-8) + assert sfx[model.props[1].pressure] == pytest.approx(1e-5, rel=1e-8) + assert sfx[model.props[1].temperature] == pytest.approx(1 / 310.65, rel=1e-8) + for k, v in model.props[1].conc_mol_comp.items(): + if k == "H2O": + assert sfx[v] == pytest.approx(1e-4, rel=1e-8) + else: + assert sfx[v] == pytest.approx(1e-2, rel=1e-8) + + @pytest.mark.unit + def test_constraint_scaling_routine(self): + model = ConcreteModel() + model.params = SaponificationParameterBlock() + + model.props = model.params.build_state_block([1], defined_state=False) + + scaler = model.props[1].default_scaler() + assert isinstance(scaler, SaponificationPropertiesScaler) + + scaler.constraint_scaling_routine(model.props[1]) + + assert isinstance(model.props[1].scaling_factor, Suffix) + + sfx = model.props[1].scaling_factor + assert len(sfx) == 1 + assert sfx[model.props[1].conc_water_eqn] == pytest.approx(1e-4, rel=1e-8) + + @pytest.mark.unit + def test_constraint_scaling_routine_defined_state(self): + model = ConcreteModel() + model.params = SaponificationParameterBlock() + + model.props = model.params.build_state_block([1], defined_state=True) + + scaler = model.props[1].default_scaler() + assert isinstance(scaler, SaponificationPropertiesScaler) + + scaler.constraint_scaling_routine(model.props[1]) + + # No constraints, so there should be no Suffix + assert not hasattr(model.props[1], "scaling_factor") + + @pytest.mark.unit + def test_scale_model(self): + model = ConcreteModel() + model.params = SaponificationParameterBlock() + + model.props = model.params.build_state_block([1], defined_state=False) + + scaler = model.props[1].default_scaler() + assert isinstance(scaler, SaponificationPropertiesScaler) + + scaler.scale_model(model.props[1]) + + assert isinstance(model.props[1].scaling_factor, Suffix) + + sfx = model.props[1].scaling_factor + assert len(sfx) == 9 + assert sfx[model.props[1].flow_vol] == pytest.approx(1e2, rel=1e-8) + assert sfx[model.props[1].pressure] == pytest.approx(1e-5, rel=1e-8) + assert sfx[model.props[1].temperature] == pytest.approx(1 / 310.65, rel=1e-8) + for k, v in model.props[1].conc_mol_comp.items(): + if k == "H2O": + assert sfx[v] == pytest.approx(1e-4, rel=1e-8) + else: + assert sfx[v] == pytest.approx(1e-2, rel=1e-8) + assert sfx[model.props[1].conc_water_eqn] == pytest.approx(1e-4, rel=1e-8) diff --git a/idaes/models/unit_models/equilibrium_reactor.py b/idaes/models/unit_models/equilibrium_reactor.py index 4015e4ea76..a8a92c96ae 100644 --- a/idaes/models/unit_models/equilibrium_reactor.py +++ b/idaes/models/unit_models/equilibrium_reactor.py @@ -16,7 +16,7 @@ # Import Pyomo libraries from pyomo.common.config import ConfigBlock, ConfigValue, In, Bool -from pyomo.environ import Reference +from pyomo.environ import Constraint, Reference, units # Import IDAES cores from idaes.core import ( @@ -32,16 +32,175 @@ is_physical_parameter_block, is_reaction_parameter_block, ) +from idaes.core.scaling import CustomScalerBase __author__ = "Andrew Lee" +class EquilibriumReactorScaler(CustomScalerBase): + """ + Default modular scaler for Equilibrium reactors. + + This Scaler relies on modular the associated property and reaction packages, + either through user provided options (submodel_scalers argument) or by default + Scalers assigned to the packages. + + Reaction generation terms are scaled based on component flow rates, whilst + extents of reaction are unscaled. Heat duty is scaled to kW and pressure drop + to 0.1 bar. All constraints are scaled using the inverse maximum scheme. + """ + + UNIT_SCALING_FACTORS = { + # "QuantityName: (reference units, scaling factor) + "Pressure Change": (units.bar, 10), + } + + def variable_scaling_routine( + self, model, overwrite: bool = False, submodel_scalers: dict = None + ): + """ + Routine to apply scaling factors to variables in model. + + Submodel Scalers are called for the property and reaction blocks. + Reaction generation terms are scaled based on component flow rates, whilst + extents of reaction are unscaled. Heat duty is scaled to kW and pressure drop + to 0.1 bar. + + Args: + model: model to be scaled + overwrite: whether to overwrite existing scaling factors + submodel_scalers: dict of Scalers to use for sub-models, keyed by submodel local name + + Returns: + None + """ + # Call scaling methods for sub-models + self.call_submodel_scaler_method( + model=model, + submodel="control_volume.properties_in", + method="variable_scaling_routine", + submodel_scalers=submodel_scalers, + overwrite=overwrite, + ) + self.propagate_state_scaling( + target_state=model.control_volume.properties_out, + source_state=model.control_volume.properties_in, + overwrite=overwrite, + ) + + self.call_submodel_scaler_method( + model=model, + submodel="control_volume.properties_out", + method="variable_scaling_routine", + submodel_scalers=submodel_scalers, + overwrite=overwrite, + ) + self.call_submodel_scaler_method( + model=model, + submodel="control_volume.reactions", + method="variable_scaling_routine", + submodel_scalers=submodel_scalers, + overwrite=overwrite, + ) + + # Scaling control volume variables + # Reaction generation and extent are hard to know a priori + # A bad guess is worse than no guess, so leave these unscaled + # AL 10/2024: Tried scaling generation by component flow, but that was bad + + # Pressure drop - optional + if hasattr(model.control_volume, "deltaP"): + for t in model.flowsheet().time: + self.scale_variable_by_units( + model.control_volume.deltaP[t], overwrite=overwrite + ) + + # Heat transfer - optional + # Scale heat based on enthalpy flow entering reactor + if hasattr(model.control_volume, "heat"): + for t in model.flowsheet().time: + h_in = 0 + for p in model.control_volume.properties_in.phase_list: + h_in += sum( + self.get_expression_nominal_values( + model.control_volume.properties_in[ + t + ].get_enthalpy_flow_terms(p) + ) + ) + # Scale for heat is general one order of magnitude less than enthalpy flow + self.set_variable_scaling_factor( + model.control_volume.heat[t], 1 / (0.1 * h_in) + ) + + def constraint_scaling_routine( + self, model, overwrite: bool = False, submodel_scalers: dict = None + ): + """ + Routine to apply scaling factors to constraints in model. + + Submodel Scalers are called for the property and reaction blocks. All other constraints + are scaled using the inverse maximum shceme. + + Args: + model: model to be scaled + overwrite: whether to overwrite existing scaling factors + submodel_scalers: dict of Scalers to use for sub-models, keyed by submodel local name + + Returns: + None + """ + # Call scaling methods for sub-models + self.call_submodel_scaler_method( + model=model, + submodel="control_volume.properties_in", + method="constraint_scaling_routine", + submodel_scalers=submodel_scalers, + overwrite=overwrite, + ) + self.call_submodel_scaler_method( + model=model, + submodel="control_volume.properties_out", + method="constraint_scaling_routine", + submodel_scalers=submodel_scalers, + overwrite=overwrite, + ) + self.call_submodel_scaler_method( + model=model, + submodel="control_volume.reactions", + method="constraint_scaling_routine", + submodel_scalers=submodel_scalers, + overwrite=overwrite, + ) + + # Scale control volume constraints + for c in model.control_volume.component_data_objects( + Constraint, descend_into=False + ): + self.scale_constraint_by_nominal_value( + c, + scheme="inverse_maximum", + overwrite=overwrite, + ) + + # Scale unit level constraints + if hasattr(model, "rate_reaction_constraint"): + for c in model.rate_reaction_constraint.values(): + self.scale_constraint_by_nominal_value( + c, + scheme="inverse_maximum", + overwrite=overwrite, + ) + + @declare_process_block_class("EquilibriumReactor") class EquilibriumReactorData(UnitModelBlockData): """ Standard Equilibrium Reactor Unit Model Class """ + default_scaler = EquilibriumReactorScaler + CONFIG = ConfigBlock() CONFIG.declare( "dynamic", diff --git a/idaes/models/unit_models/tests/test_equilibrium_reactor.py b/idaes/models/unit_models/tests/test_equilibrium_reactor.py index 71fdb23ff8..881bf70df6 100644 --- a/idaes/models/unit_models/tests/test_equilibrium_reactor.py +++ b/idaes/models/unit_models/tests/test_equilibrium_reactor.py @@ -15,9 +15,18 @@ Authors: Andrew Lee """ +from math import exp import pytest -from pyomo.environ import check_optimal_termination, ConcreteModel, value, units +from pyomo.environ import ( + assert_optimal_termination, + ComponentMap, + ConcreteModel, + Suffix, + TransformationFactory, + units, + value, +) from idaes.core import ( FlowsheetBlock, @@ -25,7 +34,10 @@ EnergyBalanceType, MomentumBalanceType, ) -from idaes.models.unit_models.equilibrium_reactor import EquilibriumReactor +from idaes.models.unit_models.equilibrium_reactor import ( + EquilibriumReactor, + EquilibriumReactorScaler, +) from idaes.models.properties.examples.saponification_thermo import ( SaponificationParameterBlock, ) @@ -37,6 +49,10 @@ number_total_constraints, number_unused_variables, ) +from idaes.core.util.scaling import ( + get_jacobian, + jacobian_cond, +) from idaes.core.util.testing import ( PhysicalParameterTestBlock, ReactionParameterTestBlock, @@ -49,6 +65,7 @@ InitializationStatus, ) from idaes.core.util import DiagnosticsToolbox +from idaes.core.scaling import set_scaling_factor # ----------------------------------------------------------------------------- # Get default solver for testing @@ -86,6 +103,7 @@ def test_config(): assert m.fs.unit.config.reaction_package is m.fs.reactions assert m.fs.unit.default_initializer is SingleControlVolumeUnitInitializer + assert m.fs.unit.default_scaler is EquilibriumReactorScaler # ----------------------------------------------------------------------------- @@ -167,7 +185,7 @@ def test_solve(self, sapon): results = solver.solve(sapon) # Check for optimal solution - assert check_optimal_termination(results) + assert_optimal_termination(results) @pytest.mark.solver @pytest.mark.skipif(solver is None, reason="Solver not available") @@ -370,3 +388,383 @@ def test_block_triangularization(self, model): assert not model.fs.unit.inlet.temperature[0].fixed assert not model.fs.unit.inlet.pressure[0].fixed + + +class DummyScaler: + def variable_scaling_routine(self, model, **kwargs): + model._dummy_var_scaler = True + + def constraint_scaling_routine(self, model, **kwargs): + model._dummy_con_scaler = True + + +class TestEquilibriumReactorScaler: + @pytest.fixture + def model(self): + m = ConcreteModel() + m.fs = FlowsheetBlock(dynamic=False) + + m.fs.properties = SaponificationParameterBlock() + m.fs.reactions = SaponificationReactionParameterBlock( + property_package=m.fs.properties + ) + + m.fs.unit = EquilibriumReactor( + property_package=m.fs.properties, + reaction_package=m.fs.reactions, + has_equilibrium_reactions=False, + has_heat_transfer=True, + has_heat_of_reaction=True, + has_pressure_change=True, + ) + + m.fs.unit.inlet.flow_vol[0].set_value(1.0e-03) + m.fs.unit.inlet.conc_mol_comp[0, "H2O"].set_value(55388.0) + m.fs.unit.inlet.conc_mol_comp[0, "NaOH"].set_value(100.0) + m.fs.unit.inlet.conc_mol_comp[0, "EthylAcetate"].set_value(100.0) + m.fs.unit.inlet.conc_mol_comp[0, "SodiumAcetate"].set_value(0.0) + m.fs.unit.inlet.conc_mol_comp[0, "Ethanol"].set_value(0.0) + + m.fs.unit.inlet.temperature[0].set_value(303.15) + m.fs.unit.inlet.pressure[0].set_value(101325.0) + + m.fs.unit.heat_duty.fix(0) + m.fs.unit.deltaP.fix(0) + + return m + + @pytest.mark.component + def test_variable_scaling_routine(self, model): + scaler = model.fs.unit.default_scaler() + + assert isinstance(scaler, EquilibriumReactorScaler) + + scaler.variable_scaling_routine(model.fs.unit) + + # Inlet state + sfx_in = model.fs.unit.control_volume.properties_in[0].scaling_factor + assert isinstance(sfx_in, Suffix) + assert len(sfx_in) == 8 + assert sfx_in[ + model.fs.unit.control_volume.properties_in[0].flow_vol + ] == pytest.approx(1e2, rel=1e-8) + assert sfx_in[ + model.fs.unit.control_volume.properties_in[0].pressure + ] == pytest.approx(1e-5, rel=1e-8) + assert sfx_in[ + model.fs.unit.control_volume.properties_in[0].temperature + ] == pytest.approx(1 / 310.65, rel=1e-8) + for k, v in model.fs.unit.control_volume.properties_in[0].conc_mol_comp.items(): + if k == "H2O": + assert sfx_in[v] == pytest.approx(1e-4, rel=1e-8) + else: + assert sfx_in[v] == pytest.approx(1e-2, rel=1e-8) + + # Outlet state - should be the same as the inlet + sfx_out = model.fs.unit.control_volume.properties_out[0].scaling_factor + assert isinstance(sfx_out, Suffix) + assert len(sfx_out) == 8 + assert sfx_out[ + model.fs.unit.control_volume.properties_out[0].flow_vol + ] == pytest.approx(1e2, rel=1e-8) + assert sfx_out[ + model.fs.unit.control_volume.properties_out[0].pressure + ] == pytest.approx(1e-5, rel=1e-8) + assert sfx_out[ + model.fs.unit.control_volume.properties_out[0].temperature + ] == pytest.approx(1 / 310.65, rel=1e-8) + for k, v in model.fs.unit.control_volume.properties_out[ + 0 + ].conc_mol_comp.items(): + if k == "H2O": + assert sfx_out[v] == pytest.approx(1e-4, rel=1e-8) + else: + assert sfx_out[v] == pytest.approx(1e-2, rel=1e-8) + + # Reaction block + sfx_rxn = model.fs.unit.control_volume.reactions[0].scaling_factor + assert isinstance(sfx_rxn, Suffix) + assert len(sfx_rxn) == 2 + assert sfx_rxn[ + model.fs.unit.control_volume.reactions[0].k_rxn + ] == pytest.approx( + 1 / (3.132e6 * exp(-43000 / (8.31446262 * 310.65))), rel=1e-8 + ) + assert sfx_rxn[ + model.fs.unit.control_volume.reactions[0].reaction_rate["R1"] + ] == pytest.approx(1e2, rel=1e-8) + + # Check that unit model has scaling factors + sfx_cv = model.fs.unit.control_volume.scaling_factor + assert isinstance(sfx_cv, Suffix) + assert len(sfx_cv) == 2 + assert sfx_cv[model.fs.unit.control_volume.heat[0]] == pytest.approx( + 1.917448e-05, rel=1e-3 + ) + assert sfx_cv[model.fs.unit.control_volume.deltaP[0]] == pytest.approx( + 1e-4, rel=1e-3 + ) + + # No unit level variables to scale, so no suffix + assert not hasattr(model.fs.unit, "scaling_factor") + + @pytest.mark.component + def test_variable_scaling_routine_submodel_scaler(self, model): + scaler = model.fs.unit.default_scaler() + + scaler_map = ComponentMap() + scaler_map[model.fs.unit.control_volume.properties_in] = DummyScaler + scaler_map[model.fs.unit.control_volume.properties_out] = DummyScaler + scaler_map[model.fs.unit.control_volume.reactions] = DummyScaler + + scaler.variable_scaling_routine( + model.fs.unit, + submodel_scalers=scaler_map, + ) + + # Should call DummyScaler submethod for each submodel + # Should add _dummy_var_scaler = True to all submodels + assert model.fs.unit.control_volume.properties_in[0]._dummy_var_scaler + assert model.fs.unit.control_volume.properties_out[0]._dummy_var_scaler + assert model.fs.unit.control_volume.reactions[0]._dummy_var_scaler + + @pytest.mark.component + def test_constraint_scaling_routine(self, model): + scaler = model.fs.unit.default_scaler() + + assert isinstance(scaler, EquilibriumReactorScaler) + + scaler.constraint_scaling_routine(model.fs.unit) + + # Check that sub-models have suffixes - we will assume they are right at this point + sfx_in = model.fs.unit.control_volume.properties_in[0].scaling_factor + assert isinstance(sfx_in, Suffix) + assert ( + len(sfx_in) == 0 + ) # inlet has no constraints. Not quite sure why the Suffix exists + + sfx_out = model.fs.unit.control_volume.properties_out[0].scaling_factor + assert isinstance(sfx_out, Suffix) + assert len(sfx_out) == 1 + assert sfx_out[ + model.fs.unit.control_volume.properties_out[0.0].conc_water_eqn + ] == pytest.approx(1e-4, rel=1e-8) + + sfx_rxn = model.fs.unit.control_volume.reactions[0].scaling_factor + assert isinstance(sfx_rxn, Suffix) + assert len(sfx_rxn) == 2 + assert sfx_rxn[ + model.fs.unit.control_volume.reactions[0.0].arrhenius_eqn + ] == pytest.approx(1, rel=1e-8) + assert sfx_rxn[ + model.fs.unit.control_volume.reactions[0.0].rate_expression["R1"] + ] == pytest.approx(1e-4, rel=1e-8) + + # Check that unit model has scaling factors + sfx_cv = model.fs.unit.control_volume.scaling_factor + assert isinstance(model.fs.unit.control_volume.scaling_factor, Suffix) + assert len(sfx_cv) == 12 + assert sfx_cv[ + model.fs.unit.control_volume.enthalpy_balances[0.0] + ] == pytest.approx(8.03894082e-10, rel=1e-8) + assert sfx_cv[ + model.fs.unit.control_volume.pressure_balance[0.0] + ] == pytest.approx(9.86923267e-6, rel=1e-8) + for c in model.fs.unit.control_volume.material_balances.values(): + assert sfx_cv[c] == pytest.approx(1e-2, rel=1e-8) + for ( + c + ) in ( + model.fs.unit.control_volume.rate_reaction_stoichiometry_constraint.values() + ): + assert sfx_cv[c] == pytest.approx(1, rel=1e-8) + + sfx_unit = model.fs.unit.scaling_factor + assert isinstance(sfx_unit, Suffix) + assert len(sfx_unit) == 1 + assert sfx_unit[ + model.fs.unit.rate_reaction_constraint[0.0, "R1"] + ] == pytest.approx(1, rel=1e-8) + + @pytest.mark.component + def test_constraint_scaling_routine_submodel_scaler(self, model): + scaler = model.fs.unit.default_scaler() + + scaler_map = ComponentMap() + scaler_map[model.fs.unit.control_volume.properties_in] = DummyScaler + scaler_map[model.fs.unit.control_volume.properties_out] = DummyScaler + scaler_map[model.fs.unit.control_volume.reactions] = DummyScaler + + scaler.constraint_scaling_routine( + model.fs.unit, + submodel_scalers=scaler_map, + ) + + # Should call DummyScaler submethod for each submodel + # Should add _dummy_con_scaler = True to all submodels + assert model.fs.unit.control_volume.properties_in[0]._dummy_con_scaler + assert model.fs.unit.control_volume.properties_out[0]._dummy_con_scaler + assert model.fs.unit.control_volume.reactions[0]._dummy_con_scaler + + @pytest.mark.component + def test_scale_model(self, model): + scaler = model.fs.unit.default_scaler() + + assert isinstance(scaler, EquilibriumReactorScaler) + + scaler.scale_model(model.fs.unit) + + # Inlet state + sfx_in = model.fs.unit.control_volume.properties_in[0].scaling_factor + assert isinstance(sfx_in, Suffix) + assert len(sfx_in) == 8 + assert sfx_in[ + model.fs.unit.control_volume.properties_in[0].flow_vol + ] == pytest.approx(1e2, rel=1e-8) + assert sfx_in[ + model.fs.unit.control_volume.properties_in[0].pressure + ] == pytest.approx(1e-5, rel=1e-8) + assert sfx_in[ + model.fs.unit.control_volume.properties_in[0].temperature + ] == pytest.approx(1 / 310.65, rel=1e-8) + for k, v in model.fs.unit.control_volume.properties_in[0].conc_mol_comp.items(): + if k == "H2O": + assert sfx_in[v] == pytest.approx(1e-4, rel=1e-8) + else: + assert sfx_in[v] == pytest.approx(1e-2, rel=1e-8) + + # Outlet state - should be the same as the inlet + sfx_out = model.fs.unit.control_volume.properties_out[0].scaling_factor + assert isinstance(sfx_out, Suffix) + assert len(sfx_out) == 9 + assert sfx_out[ + model.fs.unit.control_volume.properties_out[0].flow_vol + ] == pytest.approx(1e2, rel=1e-8) + assert sfx_out[ + model.fs.unit.control_volume.properties_out[0].pressure + ] == pytest.approx(1e-5, rel=1e-8) + assert sfx_out[ + model.fs.unit.control_volume.properties_out[0].temperature + ] == pytest.approx(1 / 310.65, rel=1e-8) + for k, v in model.fs.unit.control_volume.properties_out[ + 0 + ].conc_mol_comp.items(): + if k == "H2O": + assert sfx_out[v] == pytest.approx(1e-4, rel=1e-8) + else: + assert sfx_out[v] == pytest.approx(1e-2, rel=1e-8) + assert sfx_out[ + model.fs.unit.control_volume.properties_out[0.0].conc_water_eqn + ] == pytest.approx(1e-4, rel=1e-8) + + # Reaction block + sfx_rxn = model.fs.unit.control_volume.reactions[0].scaling_factor + assert isinstance(sfx_rxn, Suffix) + assert len(sfx_rxn) == 4 + assert sfx_rxn[ + model.fs.unit.control_volume.reactions[0].k_rxn + ] == pytest.approx( + 1 / (3.132e6 * exp(-43000 / (8.31446262 * 310.65))), rel=1e-8 + ) + assert sfx_rxn[ + model.fs.unit.control_volume.reactions[0].reaction_rate["R1"] + ] == pytest.approx(1e2, rel=1e-8) + assert sfx_rxn[ + model.fs.unit.control_volume.reactions[0.0].arrhenius_eqn + ] == pytest.approx(5.4240896, rel=1e-8) + assert sfx_rxn[ + model.fs.unit.control_volume.reactions[0.0].rate_expression["R1"] + ] == pytest.approx(5.4240896e-4, rel=1e-8) + + # Check that unit model has scaling factors + sfx_cv = model.fs.unit.control_volume.scaling_factor + assert isinstance(sfx_cv, Suffix) + assert len(sfx_cv) == 14 + assert sfx_cv[model.fs.unit.control_volume.heat[0]] == pytest.approx( + 1.917448e-05, rel=1e-3 + ) + assert sfx_cv[model.fs.unit.control_volume.deltaP[0]] == pytest.approx( + 1e-4, rel=1e-3 + ) + assert sfx_cv[ + model.fs.unit.control_volume.enthalpy_balances[0.0] + ] == pytest.approx(7.71546823e-08, rel=1e-8) + assert sfx_cv[ + model.fs.unit.control_volume.pressure_balance[0.0] + ] == pytest.approx(1e-5, rel=1e-8) + for (_, _, j), c in model.fs.unit.control_volume.material_balances.items(): + if j == "H2O": + assert sfx_cv[c] == pytest.approx(1e-2, rel=1e-8) + else: + assert sfx_cv[c] == pytest.approx(1, rel=1e-8) + for ( + _, + _, + j, + ), c in ( + model.fs.unit.control_volume.rate_reaction_stoichiometry_constraint.items() + ): + assert sfx_cv[c] == pytest.approx(1, rel=1e-8) + + sfx_unit = model.fs.unit.scaling_factor + assert isinstance(sfx_unit, Suffix) + assert len(sfx_unit) == 1 + assert sfx_unit[ + model.fs.unit.rate_reaction_constraint[0.0, "R1"] + ] == pytest.approx(1e2, rel=1e-8) + + @pytest.mark.integration + def test_example_case(self): + m = ConcreteModel() + m.fs = FlowsheetBlock(dynamic=False) + + m.fs.properties = SaponificationParameterBlock() + m.fs.reactions = SaponificationReactionParameterBlock( + property_package=m.fs.properties + ) + + m.fs.equil = EquilibriumReactor( + property_package=m.fs.properties, + reaction_package=m.fs.reactions, + has_equilibrium_reactions=False, + has_heat_of_reaction=True, + ) + + m.fs.equil.inlet.flow_vol.fix(1.0e-03) + m.fs.equil.inlet.conc_mol_comp[0, "H2O"].fix(55388.0) + m.fs.equil.inlet.conc_mol_comp[0, "NaOH"].fix(100.0) + m.fs.equil.inlet.conc_mol_comp[0, "EthylAcetate"].fix(100.0) + m.fs.equil.inlet.conc_mol_comp[0, "SodiumAcetate"].fix(1e-8) + m.fs.equil.inlet.conc_mol_comp[0, "Ethanol"].fix(1e-8) + + m.fs.equil.inlet.temperature.fix(303.15) + m.fs.equil.inlet.pressure.fix(101325.0) + + initializer = BlockTriangularizationInitializer() + initializer.initialize(m.fs.equil) + + set_scaling_factor(m.fs.equil.control_volume.properties_in[0].flow_vol, 1e3) + + scaler = EquilibriumReactorScaler() + scaler.scale_model(m.fs.equil) + + m.fs.equil.inlet.flow_vol.fix(1) + m.fs.equil.inlet.conc_mol_comp[0, "NaOH"].fix(200.0) + m.fs.equil.inlet.conc_mol_comp[0, "EthylAcetate"].fix(100.0) + m.fs.equil.inlet.conc_mol_comp[0, "SodiumAcetate"].fix(50) + m.fs.equil.inlet.conc_mol_comp[0, "Ethanol"].fix(1e-8) + + m.fs.equil.inlet.temperature.fix(320) + + solver = get_solver( + "ipopt_v2", writer_config={"linear_presolve": True, "scale_model": True} + ) + results = solver.solve(m, tee=True) + assert_optimal_termination(results) + + # Check condition number to confirm scaling + sm = TransformationFactory("core.scale_model").create_using(m, rename=False) + jac, _ = get_jacobian(sm, scaled=False) + assert (jacobian_cond(jac=jac, scaled=False)) == pytest.approx( + 4.987e05, rel=1e-3 + ) From d931060d32d2144d0d71e0b7183adf67646c3830 Mon Sep 17 00:00:00 2001 From: Andrew Lee Date: Fri, 25 Oct 2024 09:01:39 -0400 Subject: [PATCH 5/5] Minor improvements to scaling API (#1507) * Adding infrastructure to support ipopt_v2 * Moving core/util to ipopt_v2 * Moving MH initializer to ipopt_v2 * Fixing pint version issue * Set TSA to use old IPOPT interface * Trying to resolve Windows failures * Working on platofrm dependent failure * BTInitializer with presolve * Moving last bits of core code to ipopt_v2 * Starting on idaes/models * Removing ma57_automatic_scaling default and updating idaes/models/control * idaes/model/properties part 1 * Remaining parts of idaes/models/proeprties * Fixing typo * Switching idaes/models/unit_models to ipopt_v2 * Attempt to work around HXLC issues for now * Some clean up * Switching modular properties initializer to solver indexed blocks * Addressing comments * Recovering from previous branch * Some clean up * Adding ScalerBase class and tests * Working on CustomScalerBase * Nominal value constraint scaling * Adding some initial integration tests for scaling * Some more nominal magnitude scaling approaches * Prototyping pseudojacobian scaler * Trying to debug pseudojacobian * Removing unnecessary import * Addressing pylint issues * Cleaning up nominal jacobian * More methods for CustomScalerBase * Prototyping Gibbs reactor scaler * Gibbs reactor constraint scaling * Working on testing and profiling * Refining Gibbs scaler * Refining nominal value walker * Fixing walker tests * Testing GibbsScaler with initialization * Fixing auto norm scaling on indexed blocks * Testing scaling profiler * Fixing typos * Fixing pylint issue * Imrpoving some doc strings * Apply suggestions from code review Co-authored-by: MarcusHolly <96305519+MarcusHolly@users.noreply.github.com> * Fixing issue with autoscaling vars with value None * Adding profiler to __init__ * Fixing name for RSS method * Fixing import of pyomo.environ * Allowing default scaling for indexed components * Adding catch for critical solver failure in profiler * Starting on docs * Finishing docs and unifiying method names * Profiler report methods and docs * Fixing typos * Pylint: fix unnecessary f-string * Working on equilibrium reactor scaler * Finishing tests for equilibrium reactor scaler * Updaing requil scaler * Fixing typo * Addressing pylint issue * Improving guess for heat duty scaling * Updating test value for heat duty scaling * Minor fix * Adding Enum to __init__ * Adding doc strings for saponification scalers * Fixing typo * Minor clean up * Removing need for string constants --------- Co-authored-by: MarcusHolly <96305519+MarcusHolly@users.noreply.github.com> --- idaes/core/scaling/custom_scaler_base.py | 29 +++++------- .../scaling/tests/test_custom_scaler_base.py | 44 ++++--------------- .../models/unit_models/equilibrium_reactor.py | 22 ++++------ idaes/models/unit_models/gibbs_reactor.py | 12 ++--- idaes/models/unit_models/tests/test_gibbs.py | 4 +- 5 files changed, 33 insertions(+), 78 deletions(-) diff --git a/idaes/core/scaling/custom_scaler_base.py b/idaes/core/scaling/custom_scaler_base.py index 0d47673805..8e94cc7c5a 100644 --- a/idaes/core/scaling/custom_scaler_base.py +++ b/idaes/core/scaling/custom_scaler_base.py @@ -554,8 +554,7 @@ def propagate_state_scaling( def call_submodel_scaler_method( self, - model, - submodel: str, + submodel, method: str, submodel_scalers: ComponentMap = None, overwrite: bool = False, @@ -567,8 +566,7 @@ def call_submodel_scaler_method( default scaler for the submodel is used. Args: - model: parent model of submodel - submodel: local name of submodel to be scaled as str + submodel: submodel to be scaled submodel_scalers: user provided ComponentMap of Scalers to use for submodels method: name of method to call from submodel (as string) overwrite: whether to overwrite existing scaling factors @@ -576,39 +574,32 @@ def call_submodel_scaler_method( Returns: None """ - # Get actual submodel object from name - # For this method, we have to use the component name as the Scaler is written - # before the model is constructed. - sm_obj = model.find_component(submodel) - if submodel_scalers is None: submodel_scalers = {} # Iterate over indices of submodel - for smdata in sm_obj.values(): + for smdata in submodel.values(): # Get Scaler for submodel - if sm_obj in submodel_scalers: - scaler = submodel_scalers[sm_obj] + if submodel in submodel_scalers: + scaler = submodel_scalers[submodel] if callable(scaler): # Check to see if Scaler is callable - this implies it is a class and not an instance # Call the class to create an instance scaler = scaler() - _log.debug(f"Using user-defined Scaler for {model}.{submodel}.") + _log.debug(f"Using user-defined Scaler for {submodel}.") else: try: scaler = smdata.default_scaler - _log.debug(f"Using default Scaler for {model}.{submodel}.") + _log.debug(f"Using default Scaler for {submodel}.") except AttributeError: _log.debug( - f"No default Scaler set for {model}.{submodel}. Cannot call {method}." + f"No default Scaler set for {submodel}. Cannot call {method}." ) return if scaler is not None: scaler = scaler() else: - _log.debug( - f"No Scaler found for {model}.{submodel}. Cannot call {method}." - ) + _log.debug(f"No Scaler found for {submodel}. Cannot call {method}.") # If a Scaler is found, call desired method if scaler is not None: @@ -616,6 +607,6 @@ def call_submodel_scaler_method( smeth = getattr(scaler, method) except AttributeError: raise AttributeError( - f"Scaler for {model}.{submodel} does not have a method named {method}." + f"Scaler for {submodel} does not have a method named {method}." ) smeth(smdata, overwrite=overwrite) diff --git a/idaes/core/scaling/tests/test_custom_scaler_base.py b/idaes/core/scaling/tests/test_custom_scaler_base.py index e3e9637807..6cd6697e60 100644 --- a/idaes/core/scaling/tests/test_custom_scaler_base.py +++ b/idaes/core/scaling/tests/test_custom_scaler_base.py @@ -608,15 +608,12 @@ def test_call_submodel_scaler_method_no_scaler(self, caplog): m.b = Block([1, 2, 3]) sb = CustomScalerBase() - sb.call_submodel_scaler_method(m, "b", method="dummy_method", overwrite=True) + sb.call_submodel_scaler_method(m.b, method="dummy_method", overwrite=True) for bd in m.b.values(): assert not hasattr(bd, "_dummy_scaler_test") - assert ( - "No default Scaler set for unknown.b. Cannot call dummy_method." - in caplog.text - ) + assert "No default Scaler set for b. Cannot call dummy_method." in caplog.text @pytest.mark.unit def test_call_submodel_scaler_method_default_scaler(self, caplog): @@ -629,12 +626,12 @@ def test_call_submodel_scaler_method_default_scaler(self, caplog): bd.default_scaler = DummyScaler sb = CustomScalerBase() - sb.call_submodel_scaler_method(m, "b", method="dummy_method", overwrite=True) + sb.call_submodel_scaler_method(m.b, method="dummy_method", overwrite=True) for bd in m.b.values(): assert bd._dummy_scaler_test - assert "Using default Scaler for unknown.b." in caplog.text + assert "Using default Scaler for b." in caplog.text @pytest.mark.unit def test_call_submodel_scaler_method_user_scaler(self, caplog): @@ -649,8 +646,7 @@ def test_call_submodel_scaler_method_user_scaler(self, caplog): sb = CustomScalerBase() sb.call_submodel_scaler_method( - m, - "b", + m.b, method="dummy_method", submodel_scalers=scaler_map, overwrite=False, @@ -659,7 +655,7 @@ def test_call_submodel_scaler_method_user_scaler(self, caplog): for bd in m.b.values(): assert not bd._dummy_scaler_test - assert "Using user-defined Scaler for unknown.b." in caplog.text + assert "Using user-defined Scaler for b." in caplog.text @pytest.mark.unit def test_call_submodel_scaler_method_user_scaler_class(self, caplog): @@ -674,8 +670,7 @@ def test_call_submodel_scaler_method_user_scaler_class(self, caplog): sb = CustomScalerBase() sb.call_submodel_scaler_method( - m, - "b", + m.b, method="dummy_method", submodel_scalers=scaler_map, overwrite=False, @@ -684,27 +679,4 @@ def test_call_submodel_scaler_method_user_scaler_class(self, caplog): for bd in m.b.values(): assert not bd._dummy_scaler_test - assert "Using user-defined Scaler for unknown.b." in caplog.text - - @pytest.mark.unit - def test_call_submodel_scaler_method_invalid_method(self): - # Dummy up a nested model - m = ConcreteModel() - m.b = Block([1, 2, 3]) - - scaler_map = ComponentMap() - scaler_map[m.b] = DummyScaler() - - sb = CustomScalerBase() - - with pytest.raises( - AttributeError, - match="Scaler for unknown.b does not have a method named foo.", - ): - sb.call_submodel_scaler_method( - m, - "b", - method="foo", - submodel_scalers=scaler_map, - overwrite=False, - ) + assert "Using user-defined Scaler for b." in caplog.text diff --git a/idaes/models/unit_models/equilibrium_reactor.py b/idaes/models/unit_models/equilibrium_reactor.py index a8a92c96ae..5c915fb8ba 100644 --- a/idaes/models/unit_models/equilibrium_reactor.py +++ b/idaes/models/unit_models/equilibrium_reactor.py @@ -76,8 +76,7 @@ def variable_scaling_routine( """ # Call scaling methods for sub-models self.call_submodel_scaler_method( - model=model, - submodel="control_volume.properties_in", + submodel=model.control_volume.properties_in, method="variable_scaling_routine", submodel_scalers=submodel_scalers, overwrite=overwrite, @@ -89,15 +88,13 @@ def variable_scaling_routine( ) self.call_submodel_scaler_method( - model=model, - submodel="control_volume.properties_out", + submodel=model.control_volume.properties_out, method="variable_scaling_routine", submodel_scalers=submodel_scalers, overwrite=overwrite, ) self.call_submodel_scaler_method( - model=model, - submodel="control_volume.reactions", + submodel=model.control_volume.reactions, method="variable_scaling_routine", submodel_scalers=submodel_scalers, overwrite=overwrite, @@ -121,6 +118,8 @@ def variable_scaling_routine( for t in model.flowsheet().time: h_in = 0 for p in model.control_volume.properties_in.phase_list: + # The expression for enthalpy flow might include multiple terms, + # so we will sum over all the terms provided h_in += sum( self.get_expression_nominal_values( model.control_volume.properties_in[ @@ -130,7 +129,7 @@ def variable_scaling_routine( ) # Scale for heat is general one order of magnitude less than enthalpy flow self.set_variable_scaling_factor( - model.control_volume.heat[t], 1 / (0.1 * h_in) + model.control_volume.heat[t], abs(1 / (0.1 * h_in)) ) def constraint_scaling_routine( @@ -152,22 +151,19 @@ def constraint_scaling_routine( """ # Call scaling methods for sub-models self.call_submodel_scaler_method( - model=model, - submodel="control_volume.properties_in", + submodel=model.control_volume.properties_in, method="constraint_scaling_routine", submodel_scalers=submodel_scalers, overwrite=overwrite, ) self.call_submodel_scaler_method( - model=model, - submodel="control_volume.properties_out", + submodel=model.control_volume.properties_out, method="constraint_scaling_routine", submodel_scalers=submodel_scalers, overwrite=overwrite, ) self.call_submodel_scaler_method( - model=model, - submodel="control_volume.reactions", + submodel=model.control_volume.reactions, method="constraint_scaling_routine", submodel_scalers=submodel_scalers, overwrite=overwrite, diff --git a/idaes/models/unit_models/gibbs_reactor.py b/idaes/models/unit_models/gibbs_reactor.py index 26d1c56458..d9aa799f7c 100644 --- a/idaes/models/unit_models/gibbs_reactor.py +++ b/idaes/models/unit_models/gibbs_reactor.py @@ -86,16 +86,14 @@ def variable_scaling_routine( # Step 1b: Call Scalers for state blocks # Inlet properties self.call_submodel_scaler_method( - model=model, - submodel="control_volume.properties_in", + submodel=model.control_volume.properties_in, submodel_scalers=submodel_scalers, method="variable_scaling_routine", overwrite=overwrite, ) # Outlet properties self.call_submodel_scaler_method( - model=model, - submodel="control_volume.properties_out", + submodel=model.control_volume.properties_out, submodel_scalers=submodel_scalers, method="variable_scaling_routine", overwrite=overwrite, @@ -152,16 +150,14 @@ def constraint_scaling_routine( # Step 1: Call Scalers for state blocks # Inlet properties self.call_submodel_scaler_method( - model=model, - submodel="control_volume.properties_in", + submodel=model.control_volume.properties_in, submodel_scalers=submodel_scalers, method="constraint_scaling_routine", overwrite=overwrite, ) # Outlet properties self.call_submodel_scaler_method( - model=model, - submodel="control_volume.properties_out", + submodel=model.control_volume.properties_out, submodel_scalers=submodel_scalers, method="constraint_scaling_routine", overwrite=overwrite, diff --git a/idaes/models/unit_models/tests/test_gibbs.py b/idaes/models/unit_models/tests/test_gibbs.py index b74e4517d3..726cd083c0 100644 --- a/idaes/models/unit_models/tests/test_gibbs.py +++ b/idaes/models/unit_models/tests/test_gibbs.py @@ -636,8 +636,8 @@ def model(self): scaler.scale_model( m.fs.unit, submodel_scalers={ - "control_volume.properties_in": PropertyScaler, - "control_volume.properties_out": PropertyScaler, + m.fs.unit.control_volume.properties_in: PropertyScaler, + m.fs.unit.control_volume.properties_out: PropertyScaler, }, )