From cf6528cbc158097c4903f0cab68242ff14bb591b Mon Sep 17 00:00:00 2001 From: Andrew Haigh Date: Tue, 8 Jun 2021 01:05:59 +1000 Subject: [PATCH] Performance improvements to counter context.clone slowdown (#1009) * Add limit to the total number of nodes inferred per context This change abuses mutable references to create a sort of interior mutable cell shared between a context and all of its clones. The idea is that when a node is inferred at the toplevel, it is called with context = None, creating a new InferenceContext and starting a count from zero. However, when a context is cloned we re-use the cell and cause the count in the "parent" context to be incremented when nodes are inferred in the "child" context. * Add global inference cache * Update safe_infer to catch StopIteration --- ChangeLog | 5 ++++ astroid/context.py | 60 ++++++++++++++++++++++++++++++++--------- astroid/helpers.py | 2 +- astroid/node_classes.py | 9 +++++-- astroid/transforms.py | 3 +++ 5 files changed, 64 insertions(+), 15 deletions(-) diff --git a/ChangeLog b/ChangeLog index 1286a980f5..019dbb91a9 100644 --- a/ChangeLog +++ b/ChangeLog @@ -16,6 +16,11 @@ Release Date: TBA * Add lineno and col_offset for ``Keyword`` nodes and Python 3.9+ +* Add global inference cache to speed up inference of long statement blocks + +* Add a limit to the total number of nodes inferred indirectly as a result + of inferring some node + What's New in astroid 2.5.7? ============================ diff --git a/astroid/context.py b/astroid/context.py index 440d1cebae..089858416a 100644 --- a/astroid/context.py +++ b/astroid/context.py @@ -13,7 +13,17 @@ """Various context related utilities, including inference and call contexts.""" import contextlib import pprint -from typing import Optional +from typing import TYPE_CHECKING, MutableMapping, Optional, Sequence, Tuple + +if TYPE_CHECKING: + from astroid.node_classes import NodeNG + + +_INFERENCE_CACHE = {} + + +def _invalidate_cache(): + _INFERENCE_CACHE.clear() class InferenceContext: @@ -28,11 +38,17 @@ class InferenceContext: "lookupname", "callcontext", "boundnode", - "inferred", "extra_context", + "_nodes_inferred", ) - def __init__(self, path=None, inferred=None): + max_inferred = 100 + + def __init__(self, path=None, nodes_inferred=None): + if nodes_inferred is None: + self._nodes_inferred = [0] + else: + self._nodes_inferred = nodes_inferred self.path = path or set() """ :type: set(tuple(NodeNG, optional(str))) @@ -65,14 +81,6 @@ def __init__(self, path=None, inferred=None): e.g. the bound node of object.__new__(cls) is the object node """ - self.inferred = inferred or {} - """ - :type: dict(seq, seq) - - Inferred node contexts to their mapped results - Currently the key is ``(node, lookupname, callcontext, boundnode)`` - and the value is tuple of the inferred results - """ self.extra_context = {} """ :type: dict(NodeNG, Context) @@ -81,6 +89,34 @@ def __init__(self, path=None, inferred=None): for call arguments """ + @property + def nodes_inferred(self): + """ + Number of nodes inferred in this context and all its clones/decendents + + Wrap inner value in a mutable cell to allow for mutating a class + variable in the presence of __slots__ + """ + return self._nodes_inferred[0] + + @nodes_inferred.setter + def nodes_inferred(self, value): + self._nodes_inferred[0] = value + + @property + def inferred( + self, + ) -> MutableMapping[ + Tuple["NodeNG", Optional[str], Optional[str], Optional[str]], Sequence["NodeNG"] + ]: + """ + Inferred node contexts to their mapped results + + Currently the key is ``(node, lookupname, callcontext, boundnode)`` + and the value is tuple of the inferred results + """ + return _INFERENCE_CACHE + def push(self, node): """Push node into inference path @@ -103,7 +139,7 @@ def clone(self): starts with the same context but diverge as each side is inferred so the InferenceContext will need be cloned""" # XXX copy lookupname/callcontext ? - clone = InferenceContext(self.path.copy(), inferred=self.inferred.copy()) + clone = InferenceContext(self.path.copy(), nodes_inferred=self._nodes_inferred) clone.callcontext = self.callcontext clone.boundnode = self.boundnode clone.extra_context = self.extra_context diff --git a/astroid/helpers.py b/astroid/helpers.py index cb16ecdcd5..db86606e11 100644 --- a/astroid/helpers.py +++ b/astroid/helpers.py @@ -150,7 +150,7 @@ def safe_infer(node, context=None): try: inferit = node.infer(context=context) value = next(inferit) - except exceptions.InferenceError: + except (exceptions.InferenceError, StopIteration): return None try: next(inferit) diff --git a/astroid/node_classes.py b/astroid/node_classes.py index ff74fa214c..85e3cea617 100644 --- a/astroid/node_classes.py +++ b/astroid/node_classes.py @@ -357,12 +357,16 @@ def infer(self, context=None, **kwargs): # explicit_inference is not bound, give it self explicitly try: # pylint: disable=not-callable - yield from self._explicit_inference(self, context, **kwargs) + results = tuple(self._explicit_inference(self, context, **kwargs)) + if context is not None: + context.nodes_inferred += len(results) + yield from results return except exceptions.UseInferenceDefault: pass if not context: + # nodes_inferred? yield from self._infer(context, **kwargs) return @@ -378,11 +382,12 @@ def infer(self, context=None, **kwargs): # exponentially exploding possible results. limit = MANAGER.max_inferable_values for i, result in enumerate(generator): - if i >= limit: + if i >= limit or (context.nodes_inferred > context.max_inferred): yield util.Uninferable break results.append(result) yield result + context.nodes_inferred += 1 # Cache generated results for subsequent inferences of the # same node using the same context diff --git a/astroid/transforms.py b/astroid/transforms.py index 1c4081cba0..5314fcb237 100644 --- a/astroid/transforms.py +++ b/astroid/transforms.py @@ -10,6 +10,8 @@ import collections from functools import lru_cache +from astroid import context as contextmod + class TransformVisitor: """A visitor for handling transforms. @@ -42,6 +44,7 @@ def _transform(self, node): # if the transformation function returns something, it's # expected to be a replacement for the node if ret is not None: + contextmod._invalidate_cache() node = ret if ret.__class__ != cls: # Can no longer apply the rest of the transforms.