Skip to content

Commit

Permalink
Performance improvements to counter context.clone slowdown (#1009)
Browse files Browse the repository at this point in the history
* Add limit to the total number of nodes inferred per context

This change abuses mutable references to create a sort of interior
mutable cell shared between a context and all of its clones. The idea is
that when a node is inferred at the toplevel, it is called with context
= None, creating a new InferenceContext and starting a count from zero.
However, when a context is cloned we re-use the cell and cause the count
in the "parent" context to be incremented when nodes are inferred in the
"child" context.

* Add global inference cache
* Update safe_infer to catch StopIteration
  • Loading branch information
nelfin authored Jun 7, 2021
1 parent 8579163 commit cf6528c
Show file tree
Hide file tree
Showing 5 changed files with 64 additions and 15 deletions.
5 changes: 5 additions & 0 deletions ChangeLog
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,11 @@ Release Date: TBA

* Add lineno and col_offset for ``Keyword`` nodes and Python 3.9+

* Add global inference cache to speed up inference of long statement blocks

* Add a limit to the total number of nodes inferred indirectly as a result
of inferring some node


What's New in astroid 2.5.7?
============================
Expand Down
60 changes: 48 additions & 12 deletions astroid/context.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,17 @@
"""Various context related utilities, including inference and call contexts."""
import contextlib
import pprint
from typing import Optional
from typing import TYPE_CHECKING, MutableMapping, Optional, Sequence, Tuple

if TYPE_CHECKING:
from astroid.node_classes import NodeNG


_INFERENCE_CACHE = {}


def _invalidate_cache():
_INFERENCE_CACHE.clear()


class InferenceContext:
Expand All @@ -28,11 +38,17 @@ class InferenceContext:
"lookupname",
"callcontext",
"boundnode",
"inferred",
"extra_context",
"_nodes_inferred",
)

def __init__(self, path=None, inferred=None):
max_inferred = 100

def __init__(self, path=None, nodes_inferred=None):
if nodes_inferred is None:
self._nodes_inferred = [0]
else:
self._nodes_inferred = nodes_inferred
self.path = path or set()
"""
:type: set(tuple(NodeNG, optional(str)))
Expand Down Expand Up @@ -65,14 +81,6 @@ def __init__(self, path=None, inferred=None):
e.g. the bound node of object.__new__(cls) is the object node
"""
self.inferred = inferred or {}
"""
:type: dict(seq, seq)
Inferred node contexts to their mapped results
Currently the key is ``(node, lookupname, callcontext, boundnode)``
and the value is tuple of the inferred results
"""
self.extra_context = {}
"""
:type: dict(NodeNG, Context)
Expand All @@ -81,6 +89,34 @@ def __init__(self, path=None, inferred=None):
for call arguments
"""

@property
def nodes_inferred(self):
"""
Number of nodes inferred in this context and all its clones/decendents
Wrap inner value in a mutable cell to allow for mutating a class
variable in the presence of __slots__
"""
return self._nodes_inferred[0]

@nodes_inferred.setter
def nodes_inferred(self, value):
self._nodes_inferred[0] = value

@property
def inferred(
self,
) -> MutableMapping[
Tuple["NodeNG", Optional[str], Optional[str], Optional[str]], Sequence["NodeNG"]
]:
"""
Inferred node contexts to their mapped results
Currently the key is ``(node, lookupname, callcontext, boundnode)``
and the value is tuple of the inferred results
"""
return _INFERENCE_CACHE

def push(self, node):
"""Push node into inference path
Expand All @@ -103,7 +139,7 @@ def clone(self):
starts with the same context but diverge as each side is inferred
so the InferenceContext will need be cloned"""
# XXX copy lookupname/callcontext ?
clone = InferenceContext(self.path.copy(), inferred=self.inferred.copy())
clone = InferenceContext(self.path.copy(), nodes_inferred=self._nodes_inferred)
clone.callcontext = self.callcontext
clone.boundnode = self.boundnode
clone.extra_context = self.extra_context
Expand Down
2 changes: 1 addition & 1 deletion astroid/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ def safe_infer(node, context=None):
try:
inferit = node.infer(context=context)
value = next(inferit)
except exceptions.InferenceError:
except (exceptions.InferenceError, StopIteration):
return None
try:
next(inferit)
Expand Down
9 changes: 7 additions & 2 deletions astroid/node_classes.py
Original file line number Diff line number Diff line change
Expand Up @@ -357,12 +357,16 @@ def infer(self, context=None, **kwargs):
# explicit_inference is not bound, give it self explicitly
try:
# pylint: disable=not-callable
yield from self._explicit_inference(self, context, **kwargs)
results = tuple(self._explicit_inference(self, context, **kwargs))
if context is not None:
context.nodes_inferred += len(results)
yield from results
return
except exceptions.UseInferenceDefault:
pass

if not context:
# nodes_inferred?
yield from self._infer(context, **kwargs)
return

Expand All @@ -378,11 +382,12 @@ def infer(self, context=None, **kwargs):
# exponentially exploding possible results.
limit = MANAGER.max_inferable_values
for i, result in enumerate(generator):
if i >= limit:
if i >= limit or (context.nodes_inferred > context.max_inferred):
yield util.Uninferable
break
results.append(result)
yield result
context.nodes_inferred += 1

# Cache generated results for subsequent inferences of the
# same node using the same context
Expand Down
3 changes: 3 additions & 0 deletions astroid/transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@
import collections
from functools import lru_cache

from astroid import context as contextmod


class TransformVisitor:
"""A visitor for handling transforms.
Expand Down Expand Up @@ -42,6 +44,7 @@ def _transform(self, node):
# if the transformation function returns something, it's
# expected to be a replacement for the node
if ret is not None:
contextmod._invalidate_cache()
node = ret
if ret.__class__ != cls:
# Can no longer apply the rest of the transforms.
Expand Down

0 comments on commit cf6528c

Please sign in to comment.