From 7fd552a33efb58714a3ea40d059d9f30cdeed3b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Kuligowski?= Date: Thu, 24 Oct 2024 15:36:18 +0200 Subject: [PATCH] Update sampling_metadata.py temporary change based on #246 for testing only, will remove later --- vllm/model_executor/sampling_metadata.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/vllm/model_executor/sampling_metadata.py b/vllm/model_executor/sampling_metadata.py index 84f35f75a0c32..5c66adbad4d33 100644 --- a/vllm/model_executor/sampling_metadata.py +++ b/vllm/model_executor/sampling_metadata.py @@ -266,8 +266,10 @@ def _prepare_seq_groups( if seq_group_metadata.is_prompt: if sampling_params.seed is not None: - generator = torch.Generator(device=device).manual_seed( - sampling_params.seed) + import habana_frameworks.torch.hpu.random as htrandom + generator = \ + htrandom.default_generators[ + 0].manual_seed(sampling_params.seed) if generators is not None: generators[seq_group_metadata.request_id] = generator