[X] I have read the README and searched the existing issues.
System Info
Reproduction
Traceback (most recent call last):
File "/root/miniconda3/envs/llama/lib/python3.11/site-packages/gradio/queueing.py", line 571, in process_events
response = await route_utils.call_process_api(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/root/miniconda3/envs/llama/lib/python3.11/site-packages/gradio/route_utils.py", line 276, in call_process_api
output = await app.get_blocks().process_api(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/root/miniconda3/envs/llama/lib/python3.11/site-packages/gradio/blocks.py", line 1928, in process_api
result = await self.call_function(
^^^^^^^^^^^^^^^^^^^^^^^^^
File "/root/miniconda3/envs/llama/lib/python3.11/site-packages/gradio/blocks.py", line 1526, in call_function
prediction = await utils.async_iteration(iterator)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/root/miniconda3/envs/llama/lib/python3.11/site-packages/gradio/utils.py", line 656, in async_iteration
return await iterator.anext()
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/root/miniconda3/envs/llama/lib/python3.11/site-packages/gradio/utils.py", line 649, in anext
return await anyio.to_thread.run_sync(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/root/miniconda3/envs/llama/lib/python3.11/site-packages/anyio/to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/root/miniconda3/envs/llama/lib/python3.11/site-packages/anyio/_backends/_asyncio.py", line 2177, in run_sync_in_worker_thread
return await future
^^^^^^^^^^^^
File "/root/miniconda3/envs/llama/lib/python3.11/site-packages/anyio/_backends/_asyncio.py", line 859, in run
result = context.run(func, *args)
^^^^^^^^^^^^^^^^^^^^^^^^
File "/root/miniconda3/envs/llama/lib/python3.11/site-packages/gradio/utils.py", line 632, in run_sync_iterator_async
return next(iterator)
^^^^^^^^^^^^^^
File "/root/miniconda3/envs/llama/lib/python3.11/site-packages/gradio/utils.py", line 815, in gen_wrapper
response = next(iterator)
^^^^^^^^^^^^^^
File "/root/autodl-tmp/LLaMA-Factory/src/llamafactory/webui/components/export.py", line 103, in save_model
export_model(args)
File "/root/autodl-tmp/LLaMA-Factory/src/llamafactory/train/tuner.py", line 76, in export_model
raise ValueError("Cannot merge adapters to a quantized model.")
ValueError: Cannot merge adapters to a quantized model.
Reminder
System Info
Reproduction
Traceback (most recent call last): File "/root/miniconda3/envs/llama/lib/python3.11/site-packages/gradio/queueing.py", line 571, in process_events response = await route_utils.call_process_api( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/root/miniconda3/envs/llama/lib/python3.11/site-packages/gradio/route_utils.py", line 276, in call_process_api output = await app.get_blocks().process_api( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/root/miniconda3/envs/llama/lib/python3.11/site-packages/gradio/blocks.py", line 1928, in process_api result = await self.call_function( ^^^^^^^^^^^^^^^^^^^^^^^^^ File "/root/miniconda3/envs/llama/lib/python3.11/site-packages/gradio/blocks.py", line 1526, in call_function prediction = await utils.async_iteration(iterator) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/root/miniconda3/envs/llama/lib/python3.11/site-packages/gradio/utils.py", line 656, in async_iteration return await iterator.anext() ^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/root/miniconda3/envs/llama/lib/python3.11/site-packages/gradio/utils.py", line 649, in anext return await anyio.to_thread.run_sync( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/root/miniconda3/envs/llama/lib/python3.11/site-packages/anyio/to_thread.py", line 56, in run_sync return await get_async_backend().run_sync_in_worker_thread( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/root/miniconda3/envs/llama/lib/python3.11/site-packages/anyio/_backends/_asyncio.py", line 2177, in run_sync_in_worker_thread return await future ^^^^^^^^^^^^ File "/root/miniconda3/envs/llama/lib/python3.11/site-packages/anyio/_backends/_asyncio.py", line 859, in run result = context.run(func, *args) ^^^^^^^^^^^^^^^^^^^^^^^^ File "/root/miniconda3/envs/llama/lib/python3.11/site-packages/gradio/utils.py", line 632, in run_sync_iterator_async return next(iterator) ^^^^^^^^^^^^^^ File "/root/miniconda3/envs/llama/lib/python3.11/site-packages/gradio/utils.py", line 815, in gen_wrapper response = next(iterator) ^^^^^^^^^^^^^^ File "/root/autodl-tmp/LLaMA-Factory/src/llamafactory/webui/components/export.py", line 103, in save_model export_model(args) File "/root/autodl-tmp/LLaMA-Factory/src/llamafactory/train/tuner.py", line 76, in export_model raise ValueError("Cannot merge adapters to a quantized model.") ValueError: Cannot merge adapters to a quantized model.
Expected behavior
No response
Others
No response