You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
[INFO|configuration_utils.py:677] 2024-12-18 17:52:36,187 >> loading configuration file D:\GLM\glm-4-9b-chat\config.json
Traceback (most recent call last):
File "C:\Users\QUALITY-LAB\anaconda3\envs\glm\Lib\site-packages\gradio\queueing.py", line 575, in process_events
response = await route_utils.call_process_api(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\QUALITY-LAB\anaconda3\envs\glm\Lib\site-packages\gradio\route_utils.py", line 322, in call_process_api
output = await app.get_blocks().process_api(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\QUALITY-LAB\anaconda3\envs\glm\Lib\site-packages\gradio\blocks.py", line 1935, in process_api
result = await self.call_function(
^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\QUALITY-LAB\anaconda3\envs\glm\Lib\site-packages\gradio\blocks.py", line 1532, in call_function
prediction = await utils.async_iteration(iterator)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\QUALITY-LAB\anaconda3\envs\glm\Lib\site-packages\gradio\utils.py", line 671, in async_iteration
return await iterator.anext()
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\QUALITY-LAB\anaconda3\envs\glm\Lib\site-packages\gradio\utils.py", line 664, in anext
return await anyio.to_thread.run_sync(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\QUALITY-LAB\anaconda3\envs\glm\Lib\site-packages\anyio\to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\QUALITY-LAB\anaconda3\envs\glm\Lib\site-packages\anyio_backends_asyncio.py", line 2505, in run_sync_in_worker_thread
return await future
^^^^^^^^^^^^
File "C:\Users\QUALITY-LAB\anaconda3\envs\glm\Lib\site-packages\anyio_backends_asyncio.py", line 1005, in run
result = context.run(func, *args)
^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\QUALITY-LAB\anaconda3\envs\glm\Lib\site-packages\gradio\utils.py", line 647, in run_sync_iterator_async
return next(iterator)
^^^^^^^^^^^^^^
File "C:\Users\QUALITY-LAB\anaconda3\envs\glm\Lib\site-packages\gradio\utils.py", line 809, in gen_wrapper
response = next(iterator)
^^^^^^^^^^^^^^
File "D:\GLM\LLaMA-Factory-main\src\llamafactory\webui\chatter.py", line 104, in load_model
super().init(args)
File "D:\GLM\LLaMA-Factory-main\src\llamafactory\chat\chat_model.py", line 52, in init
self.engine: "BaseEngine" = HuggingfaceEngine(model_args, data_args, finetuning_args, generating_args)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\GLM\LLaMA-Factory-main\src\llamafactory\chat\hf_engine.py", line 54, in init
tokenizer_module = load_tokenizer(model_args)
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\GLM\LLaMA-Factory-main\src\llamafactory\model\loader.py", line 69, in load_tokenizer
config = load_config(model_args)
^^^^^^^^^^^^^^^^^^^^^^^
return AutoConfig.from_pretrained(model_args.model_name_or_path, **init_kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\QUALITY-LAB\anaconda3\envs\glm\Lib\site-packages\transformers\models\auto\configuration_auto.py", line 1020, in from_pretrained
trust_remote_code = resolve_trust_remote_code(
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\QUALITY-LAB\anaconda3\envs\glm\Lib\site-packages\transformers\dynamic_module_utils.py", line 678, in resolve_trust_remote_code
raise ValueError(
ValueError: Loading D:\GLM\glm-4-9b-chat requires you to execute the configuration file in that repo on your local machine. Make sure you have read the code there to avoid malicious use, then set the option trust_remote_code=True to remove this error.
Expected behavior
No response
Others
No response
The text was updated successfully, but these errors were encountered:
Reminder
System Info
PS D:\GLM\LLaMA-Factory-main> llamafactory-cli env
llamafactory
version: 0.9.2.dev0Reproduction
[INFO|configuration_utils.py:677] 2024-12-18 17:52:36,187 >> loading configuration file D:\GLM\glm-4-9b-chat\config.json
Traceback (most recent call last):
File "C:\Users\QUALITY-LAB\anaconda3\envs\glm\Lib\site-packages\gradio\queueing.py", line 575, in process_events
response = await route_utils.call_process_api(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\QUALITY-LAB\anaconda3\envs\glm\Lib\site-packages\gradio\route_utils.py", line 322, in call_process_api
output = await app.get_blocks().process_api(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\QUALITY-LAB\anaconda3\envs\glm\Lib\site-packages\gradio\blocks.py", line 1935, in process_api
result = await self.call_function(
^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\QUALITY-LAB\anaconda3\envs\glm\Lib\site-packages\gradio\blocks.py", line 1532, in call_function
prediction = await utils.async_iteration(iterator)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\QUALITY-LAB\anaconda3\envs\glm\Lib\site-packages\gradio\utils.py", line 671, in async_iteration
return await iterator.anext()
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\QUALITY-LAB\anaconda3\envs\glm\Lib\site-packages\gradio\utils.py", line 664, in anext
return await anyio.to_thread.run_sync(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\QUALITY-LAB\anaconda3\envs\glm\Lib\site-packages\anyio\to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\QUALITY-LAB\anaconda3\envs\glm\Lib\site-packages\anyio_backends_asyncio.py", line 2505, in run_sync_in_worker_thread
return await future
^^^^^^^^^^^^
File "C:\Users\QUALITY-LAB\anaconda3\envs\glm\Lib\site-packages\anyio_backends_asyncio.py", line 1005, in run
result = context.run(func, *args)
^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\QUALITY-LAB\anaconda3\envs\glm\Lib\site-packages\gradio\utils.py", line 647, in run_sync_iterator_async
return next(iterator)
^^^^^^^^^^^^^^
File "C:\Users\QUALITY-LAB\anaconda3\envs\glm\Lib\site-packages\gradio\utils.py", line 809, in gen_wrapper
response = next(iterator)
^^^^^^^^^^^^^^
File "D:\GLM\LLaMA-Factory-main\src\llamafactory\webui\chatter.py", line 104, in load_model
super().init(args)
File "D:\GLM\LLaMA-Factory-main\src\llamafactory\chat\chat_model.py", line 52, in init
self.engine: "BaseEngine" = HuggingfaceEngine(model_args, data_args, finetuning_args, generating_args)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\GLM\LLaMA-Factory-main\src\llamafactory\chat\hf_engine.py", line 54, in init
tokenizer_module = load_tokenizer(model_args)
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\GLM\LLaMA-Factory-main\src\llamafactory\model\loader.py", line 69, in load_tokenizer
config = load_config(model_args)
^^^^^^^^^^^^^^^^^^^^^^^
return AutoConfig.from_pretrained(model_args.model_name_or_path, **init_kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\QUALITY-LAB\anaconda3\envs\glm\Lib\site-packages\transformers\models\auto\configuration_auto.py", line 1020, in from_pretrained
trust_remote_code = resolve_trust_remote_code(
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\QUALITY-LAB\anaconda3\envs\glm\Lib\site-packages\transformers\dynamic_module_utils.py", line 678, in resolve_trust_remote_code
raise ValueError(
ValueError: Loading D:\GLM\glm-4-9b-chat requires you to execute the configuration file in that repo on your local machine. Make sure you have read the code there to avoid malicious use, then set the option
trust_remote_code=True
to remove this error.Expected behavior
No response
Others
No response
The text was updated successfully, but these errors were encountered: