Replies: 1 comment
-
https://speech.fish.audio/zh/ |
Beta Was this translation helpful? Give feedback.
0 replies
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
-
已经安装了vs2022,但是提示需要c语言编译?
return fn(*args, **kwargs)
File "C:\Users\Administrator\AppData\Local\Programs\Python\Python310\lib\contextlib.py", line 79, in inner
return func(*args, **kwds)
File "C:\Users\Administrator\AppData\Local\Programs\Python\Python310\lib\contextlib.py", line 79, in inner
return func(*args, **kwds)
File "C:\Users\Administrator\PycharmProjects\fish-speech\venv\lib\site-packages\torch_dynamo\utils.py", line 231, in time_wrapper
r = func(*args, **kwargs)
File "C:\Users\Administrator\PycharmProjects\fish-speech\venv\lib\site-packages\torch_inductor\compile_fx.py", line 517, in compile_fx_inner
compiled_graph = FxGraphCache.load(
File "C:\Users\Administrator\PycharmProjects\fish-speech\venv\lib\site-packages\torch_inductor\codecache.py", line 1044, in load
compiled_graph = compile_fx_fn(gm, example_inputs, **fx_kwargs)
File "C:\Users\Administrator\AppData\Local\Programs\Python\Python310\lib\contextlib.py", line 79, in inner
return func(*args, **kwds)
File "C:\Users\Administrator\PycharmProjects\fish-speech\venv\lib\site-packages\torch_inductor\compile_fx.py", line 831, in fx_codegen_and_compile
compiled_fn = graph.compile_to_fn()
File "C:\Users\Administrator\PycharmProjects\fish-speech\venv\lib\site-packages\torch_inductor\graph.py", line 1751, in compile_to_fn
return self.compile_to_module().call
File "C:\Users\Administrator\PycharmProjects\fish-speech\venv\lib\site-packages\torch_dynamo\utils.py", line 231, in time_wrapper
r = func(*args, **kwargs)
File "C:\Users\Administrator\PycharmProjects\fish-speech\venv\lib\site-packages\torch_inductor\graph.py", line 1680, in compile_to_module
self.codegen_with_cpp_wrapper() if self.cpp_wrapper else self.codegen()
File "C:\Users\Administrator\PycharmProjects\fish-speech\venv\lib\site-packages\torch_inductor\graph.py", line 1640, in codegen
self.scheduler.codegen()
File "C:\Users\Administrator\PycharmProjects\fish-speech\venv\lib\site-packages\torch_dynamo\utils.py", line 231, in time_wrapper
r = func(*args, **kwargs)
File "C:\Users\Administrator\PycharmProjects\fish-speech\venv\lib\site-packages\torch_inductor\scheduler.py", line 2741, in codegen
self.get_backend(device).codegen_node(node)
File "C:\Users\Administrator\PycharmProjects\fish-speech\venv\lib\site-packages\torch_inductor\codegen\cuda_combined_scheduling.py", line 69, in codegen_node
return self._triton_scheduling.codegen_node(node)
File "C:\Users\Administrator\PycharmProjects\fish-speech\venv\lib\site-packages\torch_inductor\codegen\simd.py", line 1148, in codegen_node
return self.codegen_node_schedule(node_schedule, buf_accesses, numel, rnumel)
File "C:\Users\Administrator\PycharmProjects\fish-speech\venv\lib\site-packages\torch_inductor\codegen\simd.py", line 1317, in codegen_node_schedule
src_code = kernel.codegen_kernel()
File "C:\Users\Administrator\PycharmProjects\fish-speech\venv\lib\site-packages\torch_inductor\codegen\triton.py", line 2159, in codegen_kernel
**self.inductor_meta_common(),
File "C:\Users\Administrator\PycharmProjects\fish-speech\venv\lib\site-packages\torch_inductor\codegen\triton.py", line 2047, in inductor_meta_common
"backend_hash": torch.utils._triton.triton_hash_with_backend(),
File "C:\Users\Administrator\PycharmProjects\fish-speech\venv\lib\site-packages\torch\utils_triton.py", line 63, in triton_hash_with_backend
backend = triton_backend()
File "C:\Users\Administrator\PycharmProjects\fish-speech\venv\lib\site-packages\torch\utils_triton.py", line 49, in triton_backend
target = driver.active.get_current_target()
File "C:\Users\Administrator\PycharmProjects\fish-speech\venv\lib\site-packages\triton\runtime\driver.py", line 23, in getattr
self._initialize_obj()
File "C:\Users\Administrator\PycharmProjects\fish-speech\venv\lib\site-packages\triton\runtime\driver.py", line 20, in _initialize_obj
self._obj = self._init_fn()
File "C:\Users\Administrator\PycharmProjects\fish-speech\venv\lib\site-packages\triton\runtime\driver.py", line 9, in _create_driver
return actives0
File "C:\Users\Administrator\PycharmProjects\fish-speech\venv\lib\site-packages\triton\backends\nvidia\driver.py", line 411, in init
self.utils = CudaUtils() # TODO: make static
File "C:\Users\Administrator\PycharmProjects\fish-speech\venv\lib\site-packages\triton\backends\nvidia\driver.py", line 55, in init
mod = compile_module_from_src(Path(os.path.join(dirname, "driver.c")).read_text(), "cuda_utils")
File "C:\Users\Administrator\PycharmProjects\fish-speech\venv\lib\site-packages\triton\backends\nvidia\driver.py", line 32, in compile_module_from_src
so = _build(name, src_path, tmpdir, library_dir, include_dir, libraries)
File "C:\Users\Administrator\PycharmProjects\fish-speech\venv\lib\site-packages\triton\runtime\build.py", line 54, in _build
raise RuntimeError("Failed to find C compiler. Please specify via CC environment variable.")
torch._dynamo.exc.BackendCompilerFailed: backend='inductor' raised:
RuntimeError: Failed to find C compiler. Please specify via CC environment variable.
大佬们求助
Beta Was this translation helpful? Give feedback.
All reactions