Closed fengcunhan closed 9 months ago
直接把多线程那部分改成单线程就可以了 for seg in ( speech_array_indices if len(speech_array_indices) == 1 else tqdm(speech_array_indices) ): r = self.whisper_model.transcribe( audio[int(seg["start"]): int(seg["end"])], task="transcribe", language=lang, initial_prompt=prompt, verbose=False if len(speech_array_indices) == 1 else None, ) r["origin_timestamp"] = seg res.append(r)
Apple M1 Pro
Python3
from autocut.transcribe import Transcribe import ssl ssl._create_default_https_context = ssl._create_unverified_context
class Args: def init(self, inputs, encoding, force, bitrate,whisper_model): self.inputs = inputs self.encoding = encoding self.force = force self.bitrate = bitrate self.whisper_model = whisper_model
if name == "main": args = Args(['/Users/fch/Downloads/worker01701132638810.mp3'], 'utf-8', False, '64k','small') args.vad = '-1' args.whisper_mode='whisper' args.device='cpu' args.lang='zh' args.prompt='' Transcribe(args).run()
File "/Users/xxx/Documents/github/autocut/autocut/test_cut_mp3.py", line 21, in
Transcribe(args).run()
File "/Users/xx/Documents/github/autocut/autocut/transcribe.py", line 48, in run
transcribe_results = self._transcribe(input, audio, speech_array_indices)
File "/Users/xx/Documents/github/autocut/autocut/transcribe.py", line 97, in _transcribe
self.whisper_model.transcribe(
File "/Users/xxx/Documents/github/autocut/autocut/whisper_model.py", line 103, in transcribe
res = [i.get() for i in sub_res]
File "/Users/xxx/Documents/github/autocut/autocut/whisper_model.py", line 103, in
res = [i.get() for i in sub_res]
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/multiprocessing/pool.py", line 774, in get
raise self._value
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/multiprocessing/pool.py", line 540, in _handle_tasks
put(task)
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/multiprocessing/connection.py", line 206, in send
self._send_bytes(_ForkingPickler.dumps(obj))
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/multiprocessing/reduction.py", line 51, in dumps
cls(buf, protocol).dump(obj)
File "/Users/xxx/Documents/github/autocut/venv/lib/python3.10/site-packages/torch/multiprocessing/reductions.py", line 190, in reduce_tensor
storage = tensor._typed_storage()
File "/Users/xxx/Documents/github/autocut/venv/lib/python3.10/site-packages/torch/_tensor.py", line 242, in _typed_storage
untyped_storage = self.untyped_storage()
NotImplementedError: Cannot access storage of SparseTensorImpl