From d3ba065af21d60dcbe59a09d6a1d8f1d0890c39a Mon Sep 17 00:00:00 2001 From: antazoey Date: Wed, 20 Sep 2023 16:30:15 -0500 Subject: [PATCH] refactor: move web3 loggers and fix test failures [APE-1404] (#1670) --- .pre-commit-config.yaml | 2 +- pyproject.toml | 2 +- setup.py | 2 +- src/ape/api/providers.py | 5 ++ src/ape/api/query.py | 6 +- src/ape/cli/options.py | 6 +- src/ape/logging.py | 67 +++++++++++---------- src/ape/managers/chain.py | 9 --- src/ape_networks/_cli.py | 4 +- src/ape_plugins/_cli.py | 2 +- src/ape_plugins/utils.py | 4 +- tests/conftest.py | 74 +++++++++++++++++++++++- tests/functional/test_block_container.py | 6 +- tests/functional/test_config.py | 8 ++- tests/functional/test_contracts_cache.py | 17 +----- tests/functional/test_networks.py | 10 ++-- tests/functional/test_query.py | 11 ++-- tests/functional/utils/test_abi.py | 11 ++-- tests/integration/cli/test_compile.py | 13 ++++- tests/integration/cli/test_plugins.py | 7 +-- 20 files changed, 164 insertions(+), 102 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 263bace8b8..29f1701b43 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -10,7 +10,7 @@ repos: - id: isort - repo: https://github.com/psf/black - rev: 23.7.0 + rev: 23.9.1 hooks: - id: black name: black diff --git a/pyproject.toml b/pyproject.toml index cfb46a3f4d..3f240d1bbf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,5 @@ [build-system] -requires = ["setuptools>=51.1.1", "wheel", "setuptools_scm[toml]>=5.0"] +requires = ["setuptools>=51.1.1", "wheel", "setuptools_scm[toml]>=5.0,<8"] [tool.mypy] exclude = ["build/", "dist/", "docs/", "tests/integration/cli/projects/"] diff --git a/setup.py b/setup.py index 33844f7957..28bf4c257c 100644 --- a/setup.py +++ b/setup.py @@ -18,7 +18,7 @@ "hypothesis-jsonschema==0.19.0", # JSON Schema fuzzer extension ], "lint": [ - "black>=23.7.0,<24", # Auto-formatter and linter + "black>=23.9.1,<24", # Auto-formatter and linter "mypy>=1.5.1,<2", # Static type analyzer "types-PyYAML", # Needed due to mypy typeshed "types-requests", # Needed due to mypy typeshed diff --git a/src/ape/api/providers.py b/src/ape/api/providers.py index 3ba0665395..21b251e743 100644 --- a/src/ape/api/providers.py +++ b/src/ape/api/providers.py @@ -723,6 +723,11 @@ class Web3Provider(ProviderAPI, ABC): _web3: Optional[Web3] = None _client_version: Optional[str] = None + def __init__(self, *args, **kwargs): + logger.create_logger("web3.RequestManager") + logger.create_logger("web3.providers.HTTPProvider") + super().__init__(*args, **kwargs) + @property def web3(self) -> Web3: """ diff --git a/src/ape/api/query.py b/src/ape/api/query.py index 78263a629d..d9f9272764 100644 --- a/src/ape/api/query.py +++ b/src/ape/api/query.py @@ -1,5 +1,5 @@ from functools import lru_cache -from typing import Any, Dict, Iterator, List, Optional, Set, Type, Union +from typing import Any, Dict, Iterator, List, Optional, Sequence, Set, Type, Union from ethpm_types.abi import EventABI, MethodABI from pydantic import BaseModel, NonNegativeInt, PositiveInt, root_validator @@ -53,7 +53,9 @@ def _all_columns(Model: Type[BaseInterfaceModel]) -> Set[str]: return columns -def validate_and_expand_columns(columns: List[str], Model: Type[BaseInterfaceModel]) -> List[str]: +def validate_and_expand_columns( + columns: Sequence[str], Model: Type[BaseInterfaceModel] +) -> List[str]: if len(columns) == 1 and columns[0] == "*": # NOTE: By default, only pull explicit fields # (because they are cheap to pull, but properties might not be) diff --git a/src/ape/cli/options.py b/src/ape/cli/options.py index 47804f2621..61ebb7c90a 100644 --- a/src/ape/cli/options.py +++ b/src/ape/cli/options.py @@ -12,7 +12,7 @@ ) from ape.cli.utils import Abort from ape.exceptions import ContractError -from ape.logging import DEFAULT_LOG_LEVEL, CliLogger, LogLevel, logger +from ape.logging import DEFAULT_LOG_LEVEL, ApeLogger, LogLevel, logger from ape.managers.base import ManagerAccessMixin _VERBOSITY_VALUES = ("--verbosity", "-v") @@ -47,7 +47,7 @@ def abort(msg: str, base_error: Optional[Exception] = None) -> NoReturn: raise Abort(msg) -def verbosity_option(cli_logger: Optional[CliLogger] = None, default: str = DEFAULT_LOG_LEVEL): +def verbosity_option(cli_logger: Optional[ApeLogger] = None, default: str = DEFAULT_LOG_LEVEL): """A decorator that adds a `--verbosity, -v` option to the decorated command. """ @@ -57,7 +57,7 @@ def verbosity_option(cli_logger: Optional[CliLogger] = None, default: str = DEFA def _create_verbosity_kwargs( - _logger: Optional[CliLogger] = None, default: str = DEFAULT_LOG_LEVEL + _logger: Optional[ApeLogger] = None, default: str = DEFAULT_LOG_LEVEL ) -> Dict: cli_logger = _logger or logger diff --git a/src/ape/logging.py b/src/ape/logging.py index abfb38697d..04c31f4f5a 100644 --- a/src/ape/logging.py +++ b/src/ape/logging.py @@ -102,15 +102,14 @@ def emit(self, record): self.handleError(record) -class CliLogger: +class ApeLogger: _mentioned_verbosity_option = False + _extra_loggers: Dict[str, logging.Logger] = {} def __init__( self, _logger: logging.Logger, fmt: str, - web3_request_logger: Optional[logging.Logger] = None, - web3_http_logger: Optional[logging.Logger] = None, ): self.error = _logger.error self.warning = _logger.warning @@ -118,30 +117,19 @@ def __init__( self.info = _logger.info self.debug = _logger.debug self._logger = _logger - self._web3_request_manager_logger = web3_request_logger - self._web3_http_provider_logger = web3_http_logger self._load_from_sys_argv() self.fmt = fmt @classmethod - def create(cls, fmt: Optional[str] = None, third_party: bool = True) -> "CliLogger": + def create(cls, fmt: Optional[str] = None) -> "ApeLogger": fmt = fmt or DEFAULT_LOG_FORMAT - kwargs = {} - if third_party: - kwargs["web3_request_logger"] = _get_logger("web3.RequestManager", fmt=fmt) - kwargs["web3_http_logger"] = _get_logger("web3.providers.HTTPProvider", fmt=fmt) - - _logger = _get_logger("ape", fmt=fmt) - return cls(_logger, fmt, **kwargs) + _logger = get_logger("ape", fmt=fmt) + return cls(_logger, fmt) def format(self, fmt: Optional[str] = None): self.fmt = fmt or DEFAULT_LOG_FORMAT fmt = fmt or DEFAULT_LOG_FORMAT _format_logger(self._logger, fmt) - if req_log := self._web3_request_manager_logger: - _format_logger(req_log, fmt) - if prov_log := self._web3_http_provider_logger: - _format_logger(prov_log, fmt) def _load_from_sys_argv(self, default: Optional[Union[str, int]] = None): """ @@ -179,13 +167,9 @@ def set_level(self, level: Union[str, int]): if level == self._logger.level: return - for log in ( - self._logger, - self._web3_http_provider_logger, - self._web3_request_manager_logger, - ): - if obj := log: - obj.setLevel(level) + self._logger.setLevel(level) + for _logger in self._extra_loggers.values(): + _logger.setLevel(level) def log_error(self, err: Exception): """ @@ -228,9 +212,11 @@ def log_debug_stack_trace(self): stack_trace = traceback.format_exc() self._logger.debug(stack_trace) - def _clear_web3_loggers(self): - self._web3_request_manager_logger = None - self._web3_http_provider_logger = None + def create_logger(self, new_name: str) -> logging.Logger: + _logger = get_logger(new_name, self.fmt) + _logger.setLevel(self.level) + self._extra_loggers[new_name] = _logger + return _logger def _format_logger(_logger: logging.Logger, fmt: str): @@ -246,11 +232,21 @@ def _format_logger(_logger: logging.Logger, fmt: str): _logger.addHandler(handler) -def _get_logger(name: str, fmt: Optional[str] = None) -> logging.Logger: - """Get a logger with the given ``name`` and configure it for usage with Click.""" - obj = logging.getLogger(name) - _format_logger(obj, fmt=fmt or DEFAULT_LOG_FORMAT) - return obj +def get_logger(name: str, fmt: Optional[str] = None) -> logging.Logger: + """ + Get a logger with the given ``name`` and configure it for usage with Ape. + + Args: + name (str): The name of the logger. + fmt (Optional[str]): The format of the logger. Defaults to the Ape + logger's default format: ``"%(levelname)s%(plugin)s: %(message)s"``. + + Returns: + ``logging.Logger`` + """ + _logger = logging.getLogger(name) + _format_logger(_logger, fmt=fmt or DEFAULT_LOG_FORMAT) + return _logger def _get_level(level: Optional[Union[str, int]] = None) -> str: @@ -262,7 +258,10 @@ def _get_level(level: Optional[Union[str, int]] = None) -> str: return level -logger = CliLogger.create() +logger = ApeLogger.create() + +# TODO: Can remove this type alias after 0.7 +CliLogger = ApeLogger -__all__ = ["DEFAULT_LOG_LEVEL", "logger", "LogLevel"] +__all__ = ["DEFAULT_LOG_LEVEL", "logger", "LogLevel", "ApeLogger"] diff --git a/src/ape/managers/chain.py b/src/ape/managers/chain.py index 6e30f51bfc..09a8d437a8 100644 --- a/src/ape/managers/chain.py +++ b/src/ape/managers/chain.py @@ -1318,15 +1318,6 @@ def get_deployments(self, contract_container: ContractContainer) -> List[Contrac if not deployments: return [] - if isinstance(deployments[0], str): - # TODO: Remove this migration logic >= version 0.6.0 - logger.debug("Migrating 'deployments_map.json'.") - deployments = [{"address": a} for a in deployments] - self._deployments = { - **self._deployments, - contract_type.name: deployments, - } - instances: List[ContractInstance] = [] for deployment in deployments: address = deployment["address"] diff --git a/src/ape_networks/_cli.py b/src/ape_networks/_cli.py index c9ee7c951b..c7b61f1cc4 100644 --- a/src/ape_networks/_cli.py +++ b/src/ape_networks/_cli.py @@ -85,8 +85,8 @@ def run(cli_ctx, network): Start a node process """ - # Ignore web3 logs - cli_ctx.logger._clear_web3_loggers() + # Ignore extra loggers, such as web3 loggers. + cli_ctx.logger._extra_loggers = {} network_ctx = cli_ctx.network_manager.parse_network_choice(network) provider = network_ctx._provider diff --git a/src/ape_plugins/_cli.py b/src/ape_plugins/_cli.py index 653388ef67..07cbe8f8bd 100644 --- a/src/ape_plugins/_cli.py +++ b/src/ape_plugins/_cli.py @@ -42,7 +42,7 @@ def plugins_argument(): """ def load_from_file(ctx, file_path: Path) -> List[PluginInstallRequest]: - if file_path.is_file() and file_path.name != CONFIG_FILE_NAME: + if file_path.is_dir() and (file_path / CONFIG_FILE_NAME).is_file(): file_path = file_path / CONFIG_FILE_NAME if file_path.is_file(): diff --git a/src/ape_plugins/utils.py b/src/ape_plugins/utils.py index 46468b1945..a27965a158 100644 --- a/src/ape_plugins/utils.py +++ b/src/ape_plugins/utils.py @@ -5,7 +5,7 @@ from pydantic import root_validator from ape.__modules__ import __modules__ -from ape.logging import CliLogger +from ape.logging import ApeLogger from ape.plugins import clean_plugin_name from ape.utils import BaseInterfaceModel, cached_property, get_package_version, github_client @@ -194,7 +194,7 @@ def __str__(self): class ModifyPluginResultHandler: - def __init__(self, logger: CliLogger, plugin: PluginInstallRequest): + def __init__(self, logger: ApeLogger, plugin: PluginInstallRequest): self._logger = logger self._plugin = plugin diff --git a/tests/conftest.py b/tests/conftest.py index af0b9eb79b..d1c843c1a3 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,10 +1,11 @@ import json import shutil import tempfile +import time from contextlib import contextmanager from pathlib import Path from tempfile import mkdtemp -from typing import Dict, Optional +from typing import Any, Callable, Dict, Optional import pytest import yaml @@ -12,6 +13,7 @@ import ape from ape.exceptions import APINotImplementedError, UnknownSnapshotError +from ape.logging import LogLevel, logger from ape.managers.config import CONFIG_FILE_NAME from ape.types import AddressType from ape.utils import ZERO_ADDRESS @@ -387,3 +389,73 @@ def test_skip_from_converter(): @pytest.fixture def zero_address(): return ZERO_ADDRESS + + +@pytest.fixture +def ape_caplog(caplog): + class ApeCaplog: + def __init__(self): + self.messages_at_start = list(caplog.messages) + self.set_levels() + + def __getattr__(self, name: str) -> Any: + return getattr(caplog, name) + + @property + def fail_message(self) -> str: + if caplog.messages: + last_message = caplog.messages[-1] + return f"Actual last message: {last_message}" + + elif self.messages_at_start: + return ( + f"Failed to detect logs. " + f"However, we did have logs before the operation: " + f"{', '.join(self.messages_at_start)}" + ) + + else: + return "No logs found!" + + @property + def head(self) -> str: + """ + A str representing the latest logged line. + Initialized to empty str. + """ + return caplog.messages[-1] if len(caplog.messages) else "" + + @classmethod + def set_levels(cls): + logger.set_level(LogLevel.INFO) + caplog.set_level(LogLevel.WARNING) + + def assert_last_log(self, message: str): + assert message in self.head, self.fail_message + + def assert_last_log_with_retries( + self, op: Callable, message: str, tries: int = 2, delay: float = 5.0 + ): + times_tried = 0 + return_value = None + while times_tried <= tries: + result = op() + + # Only save the first return value. + if return_value is None and result is not None: + return_value = result + + times_tried += 1 + if message in self.head: + return return_value + + time.sleep(delay) + + # Reset levels in case they got switched. + self.set_levels() + logger.set_level(LogLevel.INFO) + caplog.set_level(LogLevel.WARNING) + + pytest.fail(self.fail_message) + + return ApeCaplog() diff --git a/tests/functional/test_block_container.py b/tests/functional/test_block_container.py index d2f795e491..68ba9bc3ff 100644 --- a/tests/functional/test_block_container.py +++ b/tests/functional/test_block_container.py @@ -100,7 +100,7 @@ def test_poll_blocks(chain_that_mined_5, eth_tester_provider, owner, PollDaemon) assert second == third - 1 -def test_poll_blocks_reorg(chain_that_mined_5, eth_tester_provider, owner, PollDaemon, caplog): +def test_poll_blocks_reorg(chain_that_mined_5, eth_tester_provider, owner, PollDaemon, ape_caplog): blocks: Queue = Queue(maxsize=6) poller = chain_that_mined_5.blocks.poll_blocks() @@ -131,8 +131,8 @@ def test_poll_blocks_reorg(chain_that_mined_5, eth_tester_provider, owner, PollD "Chain has reorganized since returning the last block. " "Try adjusting the required network confirmations." ) - assert caplog.records, "Didn't detect re-org" - assert expected_error in caplog.records[-1].message + assert ape_caplog.records, "Didn't detect re-org" + ape_caplog.assert_last_log(expected_error) # Show that there are duplicate blocks block_numbers: List[int] = [blocks.get().number for _ in range(6)] diff --git a/tests/functional/test_config.py b/tests/functional/test_config.py index 96f67081c4..d69d2266cf 100644 --- a/tests/functional/test_config.py +++ b/tests/functional/test_config.py @@ -21,13 +21,15 @@ def test_integer_deployment_addresses(networks): "ecosystems,networks,err_part", [(["ERRORS"], ["mainnet"], "ecosystem"), (["ethereum"], ["ERRORS"], "network")], ) -def test_bad_value_in_deployments(ecosystems, networks, err_part, caplog, plugin_manager): +def test_bad_value_in_deployments(ecosystems, networks, err_part, ape_caplog, plugin_manager): deployments = _create_deployments() all_ecosystems = dict(plugin_manager.ecosystems) ecosystem_dict = {e: all_ecosystems[e] for e in ecosystems if e in all_ecosystems} DeploymentConfigCollection(deployments, ecosystem_dict, networks) - assert len(caplog.records) > 0, "Nothing was logged" - assert f"Invalid {err_part}" in caplog.records[0].message + ape_caplog.assert_last_log_with_retries( + lambda: DeploymentConfigCollection(deployments, ecosystem_dict, networks), + f"Invalid {err_part}", + ) def _create_deployments(ecosystem_name: str = "ethereum", network_name: str = "local") -> Dict: diff --git a/tests/functional/test_contracts_cache.py b/tests/functional/test_contracts_cache.py index 989e338e75..97cda768dd 100644 --- a/tests/functional/test_contracts_cache.py +++ b/tests/functional/test_contracts_cache.py @@ -46,7 +46,7 @@ def test_instance_at_when_given_name_as_contract_type(chain, contract_instance): @explorer_test -def test_instance_at_uses_given_contract_type_when_retrieval_fails(mocker, chain, caplog): +def test_instance_at_uses_given_contract_type_when_retrieval_fails(mocker, chain, ape_caplog): # The manager always attempts retrieval so that default contact types can # get cached. However, sometimes an explorer plugin may fail. If given a contract-type # in that situation, we can use it and not fail and log the error instead. @@ -65,8 +65,8 @@ def fn(addr, default=None): chain.contracts.get.side_effect = fn actual = chain.contracts.instance_at(new_address, contract_type=expected_contract_type) + ape_caplog.assert_last_log(expected_fail_message) assert actual.contract_type == expected_contract_type - assert caplog.records[-1].message == expected_fail_message @explorer_test @@ -209,17 +209,6 @@ def test_get_deployments_live( assert address_from_api_1 == deployed_contract_1.address -def test_get_deployments_live_migration( - chain, owner, contract_0, dummy_live_network, caplog, use_debug -): - contract = owner.deploy(contract_0, required_confirmations=0) - old_style_map = {"ethereum": {"goerli": {"ApeContract0": [contract.address]}}} - chain.contracts._write_deployments_mapping(old_style_map) - actual = chain.contracts.get_deployments(contract_0) - assert actual == [contract] - assert caplog.records[-1].message == "Migrating 'deployments_map.json'." - - def test_get_multiple_deployments_live( chain, owner, contract_0, contract_1, remove_disk_writes_deployments, dummy_live_network ): @@ -283,7 +272,7 @@ def test_get_multiple_no_addresses(chain, caplog): contract_map = chain.contracts.get_multiple([]) assert not contract_map assert "WARNING" in caplog.records[-1].levelname - assert "No addresses provided." in caplog.records[-1].message + assert "No addresses provided." in caplog.messages[-1] def test_get_all_include_non_contract_address(vyper_contract_instance, chain, owner): diff --git a/tests/functional/test_networks.py b/tests/functional/test_networks.py index f527bd45fd..ac1eda7a03 100644 --- a/tests/functional/test_networks.py +++ b/tests/functional/test_networks.py @@ -205,27 +205,25 @@ def test_block_times(ethereum): assert ethereum.goerli.block_time == 15 -def test_ecosystems_when_default_network_not_exists(temp_config, caplog, networks): +def test_ecosystems_when_default_network_not_exists(temp_config, ape_caplog, networks): bad_network = "NOT_EXISTS" config = {"ethereum": {"default_network": bad_network}} with temp_config(config): assert networks.ecosystems - err = caplog.records[-1].message - assert err == ( + assert ape_caplog.head == ( f"Failed setting default network: " f"'{bad_network}' is not a valid network for ecosystem 'ethereum'." ) -def test_ecosystems_when_default_provider_not_exists(temp_config, caplog, networks): +def test_ecosystems_when_default_provider_not_exists(temp_config, ape_caplog, networks): bad_provider = "NOT_EXISTS" config = {"ethereum": {"goerli": {"default_provider": bad_provider}}} with temp_config(config): assert networks.ecosystems - err = caplog.records[-1].message - assert err == ( + assert ape_caplog.head == ( f"Failed setting default provider: " f"Provider '{bad_provider}' not found in network 'ethereum:goerli'." ) diff --git a/tests/functional/test_query.py b/tests/functional/test_query.py index f730e3762b..865e89c6c3 100644 --- a/tests/functional/test_query.py +++ b/tests/functional/test_query.py @@ -80,20 +80,19 @@ def test_column_expansion(): assert columns == list(Model.__fields__) -def test_column_validation(eth_tester_provider, caplog): +def test_column_validation(eth_tester_provider, ape_caplog): with pytest.raises(ValueError) as exc_info: validate_and_expand_columns(["numbr"], Model) expected = "Unrecognized field(s) 'numbr', must be one of 'number, timestamp'." assert exc_info.value.args[-1] == expected - validate_and_expand_columns(["numbr", "timestamp"], Model) - - assert expected in caplog.records[-1].msg + ape_caplog.assert_last_log_with_retries( + lambda: validate_and_expand_columns(["numbr", "timestamp"], Model), expected + ) validate_and_expand_columns(["number", "timestamp", "number"], Model) - - assert "Duplicate fields in ['number', 'timestamp', 'number']" in caplog.records[-1].msg + assert "Duplicate fields in ['number', 'timestamp', 'number']" in ape_caplog.messages[-1] def test_specify_engine(chain, eth_tester_provider): diff --git a/tests/functional/utils/test_abi.py b/tests/functional/utils/test_abi.py index a8269c65aa..4015fd45e4 100644 --- a/tests/functional/utils/test_abi.py +++ b/tests/functional/utils/test_abi.py @@ -62,13 +62,16 @@ def log_data_missing_trailing_zeroes(): ) -def test_decoding_with_strict(collection, topics, log_data_missing_trailing_zeroes, caplog): +def test_decoding_with_strict(collection, topics, log_data_missing_trailing_zeroes, ape_caplog): """ This test is for a time where Alchemy gave us log data when it was missing trailing zeroes. When using strict=False, it was able to properly decode. In this case, in Ape, we warn the user and still proceed to decode the log. """ - actual = collection.decode(topics, log_data_missing_trailing_zeroes) + actual = ape_caplog.assert_last_log_with_retries( + lambda: collection.decode(topics, log_data_missing_trailing_zeroes), + "However, we are able to get a value using decode(strict=False)", + ) expected = { "name": "Launchnodes", "nodeOperatorId": 30, @@ -76,7 +79,3 @@ def test_decoding_with_strict(collection, topics, log_data_missing_trailing_zero "stakingLimit": 0, } assert actual == expected - assert ( - "However, we are able to get a value using decode(strict=False)" - in caplog.records[-1].message - ) diff --git a/tests/integration/cli/test_compile.py b/tests/integration/cli/test_compile.py index 09cf783204..3736ed2137 100644 --- a/tests/integration/cli/test_compile.py +++ b/tests/integration/cli/test_compile.py @@ -283,13 +283,20 @@ def test_compile_only_dependency(ape_cli, runner, project, clean_cache, caplog): _ = dependency.DependencyInProjectOnly # Pop the log record off here so we can check the tail again below. - log_record = caplog.records.pop() - assert expected_log_message in log_record.message + length_before = len(caplog.records) + assert expected_log_message in caplog.messages[-1] # It should not need to compile again. _ = dependency.DependencyInProjectOnly if caplog.records: - assert expected_log_message not in caplog.records[-1].message, "Compiled twice!" + if expected_log_message in caplog.messages[-1]: + length_after = len(caplog.records) + # The only way it should be the same log is if there + # were not additional logs. + assert length_after == length_before + + else: + pytest.fail("Compiled twice!") # Force a re-compile and trigger the dependency to compile via CLI result = runner.invoke( diff --git a/tests/integration/cli/test_plugins.py b/tests/integration/cli/test_plugins.py index b156f517dd..e8599dc8db 100644 --- a/tests/integration/cli/test_plugins.py +++ b/tests/integration/cli/test_plugins.py @@ -148,17 +148,16 @@ def test_install_multiple_in_one_str(ape_plugins_runner): @github_xfail() -def test_install_from_config_file(ape_cli, runner, temp_config, caplog): +def test_install_from_config_file(ape_cli, runner, temp_config): plugins_config = {"plugins": [{"name": TEST_PLUGIN_NAME}]} with temp_config(plugins_config): result = runner.invoke(ape_cli, ["plugins", "install", "."], catch_exceptions=False) assert result.exit_code == 0, result.output - - assert TEST_PLUGIN_NAME in caplog.records[-1].message + assert TEST_PLUGIN_NAME in result.stdout @github_xfail() -def test_uninstall(ape_cli, runner, installed_plugin, caplog): +def test_uninstall(ape_cli, runner, installed_plugin): result = runner.invoke( ape_cli, ["plugins", "uninstall", TEST_PLUGIN_NAME, "--yes"], catch_exceptions=False )