From 08c9cf3d29d76d202e1756ef707f45faee3b0473 Mon Sep 17 00:00:00 2001 From: Konrad Zawora Date: Tue, 13 Aug 2024 17:15:32 +0300 Subject: [PATCH] format.sh --- vllm/distributed/device_communicators/hpu_communicator.py | 5 ++--- vllm/executor/ray_habana_executor.py | 2 +- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/vllm/distributed/device_communicators/hpu_communicator.py b/vllm/distributed/device_communicators/hpu_communicator.py index 840f26b317972..e68279ffc42d9 100644 --- a/vllm/distributed/device_communicators/hpu_communicator.py +++ b/vllm/distributed/device_communicators/hpu_communicator.py @@ -5,9 +5,8 @@ from vllm.platforms import current_platform from vllm.utils import is_fake_hpu -if current_platform.is_hpu(): - if not is_fake_hpu(): - import habana_frameworks.torch as htorch # noqa: F401 +if current_platform.is_hpu() and not is_fake_hpu(): + import habana_frameworks.torch as htorch # noqa: F401 class HpuCommunicator: diff --git a/vllm/executor/ray_habana_executor.py b/vllm/executor/ray_habana_executor.py index 37498453cc230..c45513e3e5c91 100644 --- a/vllm/executor/ray_habana_executor.py +++ b/vllm/executor/ray_habana_executor.py @@ -88,7 +88,7 @@ def _init_workers_ray(self, placement_group: "PlacementGroup", worker_wrapper_kwargs = self._get_worker_wrapper_args() for bundle_id, bundle in enumerate(placement_group.bundle_specs): resource_name = "HPU" if not is_fake_hpu() else "CPU" - if not bundle.get(resource_name,0): + if not bundle.get(resource_name, 0): continue scheduling_strategy = PlacementGroupSchedulingStrategy( placement_group=placement_group,