Skip to content

Commit

Permalink
fix: fixed bugs
Browse files Browse the repository at this point in the history
  • Loading branch information
Valdanitooooo committed Jul 27, 2024
1 parent 9790f37 commit 181619f
Show file tree
Hide file tree
Showing 9 changed files with 14 additions and 15 deletions.
6 changes: 0 additions & 6 deletions xinference/api/restful_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -841,12 +841,6 @@ async def launch_model(
detail="Invalid input. Allocated gpu must be a multiple of replica.",
)

if model_path is not None:
if not os.path.exists(model_path):
raise ValueError(
f"Invalid input. `model_path`: {model_path} File or directory does not exist."
)

if peft_model_config is not None:
peft_model_config = PeftModelConfig.from_dict(peft_model_config)
else:
Expand Down
5 changes: 5 additions & 0 deletions xinference/core/worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -800,6 +800,11 @@ async def launch_builtin_model(
raise ValueError(
f"PEFT adaptors can only be applied to pytorch-like models"
)
if model_path is not None:
if not os.path.exists(model_path):
raise ValueError(
f"Invalid input. `model_path`: {model_path} File or directory does not exist."
)

assert model_uid not in self._model_uid_to_model
self._check_model_is_valid(model_name, model_format)
Expand Down
2 changes: 1 addition & 1 deletion xinference/model/audio/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ def create_audio_model_instance(
**kwargs,
) -> Tuple[Union[WhisperModel, ChatTTSModel, CosyVoiceModel], AudioModelDescription]:
model_spec = match_audio(model_name, download_hub)
if model_path is None:
if not model_path:
model_path = cache(model_spec)
model: Union[WhisperModel, ChatTTSModel, CosyVoiceModel]
if model_spec.model_family == "whisper":
Expand Down
2 changes: 1 addition & 1 deletion xinference/model/embedding/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -353,7 +353,7 @@ def create_embedding_model_instance(
**kwargs,
) -> Tuple[EmbeddingModel, EmbeddingModelDescription]:
model_spec = match_embedding(model_name, download_hub)
if model_path is None or model_path == "":
if not model_path:
model_path = cache(model_spec)

model = EmbeddingModel(model_uid, model_path, **kwargs)
Expand Down
2 changes: 1 addition & 1 deletion xinference/model/flexible/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ def create_flexible_model_instance(
**kwargs,
) -> Tuple[FlexibleModel, FlexibleModelDescription]:
model_spec = match_flexible_model(model_name)
if model_path is None or model_path == "":
if not model_path:
model_path = model_spec.model_uri
launcher_name = model_spec.launcher
launcher_args = model_spec.parser_args()
Expand Down
4 changes: 2 additions & 2 deletions xinference/model/image/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ def create_image_model_instance(
for name in controlnet:
for cn_model_spec in model_spec.controlnet:
if cn_model_spec.model_name == name:
if model_path is None or model_path == "":
if not model_path:
model_path = cache(cn_model_spec)
controlnet_model_paths.append(model_path)
break
Expand All @@ -222,7 +222,7 @@ def create_image_model_instance(
kwargs["controlnet"] = controlnet_model_paths[0]
else:
kwargs["controlnet"] = controlnet_model_paths
if model_path is None or model_path == "":
if not model_path:
model_path = cache(model_spec)
if peft_model_config is not None:
lora_model = peft_model_config.peft_model
Expand Down
2 changes: 1 addition & 1 deletion xinference/model/llm/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ def create_llm_model_instance(
)
logger.debug(f"Launching {model_uid} with {llm_cls.__name__}")

if model_path is None or model_path == "":
if not model_path:
model_path = cache(llm_family, llm_spec, quantization)

peft_model = peft_model_config.peft_model if peft_model_config else None
Expand Down
2 changes: 1 addition & 1 deletion xinference/model/rerank/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -322,7 +322,7 @@ def create_rerank_model_instance(
f"Huggingface: {BUILTIN_RERANK_MODELS.keys()}"
f"ModelScope: {MODELSCOPE_RERANK_MODELS.keys()}"
)
if model_path is None or model_path == "":
if not model_path:
model_path = cache(model_spec)
use_fp16 = kwargs.pop("use_fp16", False)
model = RerankModel(
Expand Down
4 changes: 2 additions & 2 deletions xinference/web/ui/src/scenes/launch_model/modelCard.js
Original file line number Diff line number Diff line change
Expand Up @@ -304,7 +304,7 @@ const ModelCard = ({
worker_ip: workerIp.trim() === '' ? null : workerIp.trim(),
gpu_idx: GPUIdx.trim() === '' ? null : handleGPUIdx(GPUIdx.trim()),
download_hub: downloadHub === '' ? null : downloadHub,
model_path: modelPath === '' ? null : modelPath,
model_path: modelPath.trim() === '' ? null : modelPath.trim(),
}

let modelDataWithID_other = {
Expand All @@ -316,7 +316,7 @@ const ModelCard = ({
worker_ip: workerIp.trim() === '' ? null : workerIp.trim(),
gpu_idx: GPUIdx.trim() === '' ? null : handleGPUIdx(GPUIdx.trim()),
download_hub: downloadHub === '' ? null : downloadHub,
model_path: modelPath === '' ? null : modelPath,
model_path: modelPath.trim() === '' ? null : modelPath.trim(),
}

if (nGPULayers >= 0) {
Expand Down

0 comments on commit 181619f

Please sign in to comment.