DaohanZhang / No-name

0 stars 0 forks source link

can not use docker in jupyter #2

Open DaohanZhang opened 1 week ago

DaohanZhang commented 1 week ago

notebook: /home/zhangdaohan20h / dredge-main / notebook/wulab_DREDge_ks2.ipynb envs: kilosort4 in zhangdaohan20h

import numpy as np
import matplotlib.pyplot as plt

import spikeinterface.full as si  # import core only
import spikeinterface.extractors as se
import spikeinterface.preprocessing as spre
import spikeinterface.sorters as ss
import spikeinterface.postprocessing as spost
import spikeinterface.qualitymetrics as sqm
import spikeinterface.comparison as sc
import spikeinterface.exporters as sexp
import spikeinterface.curation as scur
import spikeinterface.widgets as sw

import os
print('#')
print('#')

rec = se.read_spikeglx("/home/zhangdaohan20h/public_data/NPX_examples/Pt01", 
                        load_sync_channel=False, stream_id="imec0.ap")

rec = si.astype(rec, np.float32)
rec = si.bandpass_filter(rec)
rec = si.common_reference(rec)

rec
# optional
# rec = rec.save_to_folder("/local/scratch", n_jobs=4, chunk_duration="5s")

import docker
sorting_KS2 = ss.run_sorter(sorter_name="kilosort2_5", recording=rec, docker_image='docker.mrxn.net/spikeinterface/kilosort2_5-compiled-base:latest', n_jobs = 20, verbose=True, installation_mode = 'dev')

print('finish')
#'docker.mrxn.net/spikeinterface/kilosort2_5-compiled-base:latest'

#sorting_KS2 = ss.run_sorter(sorter_name="kilosort2_5", recording=rec, docker_image=False, n_jobs = 20, verbose=True)
#sorting_KS3 = ss.run_sorter(sorter_name="kilosort3", recording=rec, docker_image=False, n_jobs = 20, verbose=True)
#
#
/home/zhangdaohan20h/public_data/NPX_examples/Pt01/Pt01_g0_t0.imec0.ap.meta
/home/zhangdaohan20h/public_data/NPX_examples/Pt01/Pt01_g0_t0.imec0.lf.meta
---------------------------------------------------------------------------
PermissionError                           Traceback (most recent call last)
File ~/.conda/envs/kilosort4/lib/python3.9/site-packages/urllib3/connectionpool.py:789, in HTTPConnectionPool.urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, preload_content, decode_content, **response_kw)
    788 # Make the request on the HTTPConnection object
--> 789 response = self._make_request(
    790     conn,
    791     method,
    792     url,
    793     timeout=timeout_obj,
    794     body=body,
    795     headers=headers,
    796     chunked=chunked,
    797     retries=retries,
    798     response_conn=response_conn,
    799     preload_content=preload_content,
    800     decode_content=decode_content,
    801     **response_kw,
    802 )
    804 # Everything went great!

File ~/.conda/envs/kilosort4/lib/python3.9/site-packages/urllib3/connectionpool.py:495, in HTTPConnectionPool._make_request(self, conn, method, url, body, headers, retries, timeout, chunked, response_conn, preload_content, decode_content, enforce_content_length)
    494 try:
--> 495     conn.request(
    496         method,
    497         url,
    498         body=body,
    499         headers=headers,
    500         chunked=chunked,
    501         preload_content=preload_content,
    502         decode_content=decode_content,
    503         enforce_content_length=enforce_content_length,
    504     )
    506 # We are swallowing BrokenPipeError (errno.EPIPE) since the server is
    507 # legitimately able to close the connection after sending a valid response.
    508 # With this behaviour, the received response is still readable.

File ~/.conda/envs/kilosort4/lib/python3.9/site-packages/urllib3/connection.py:398, in HTTPConnection.request(self, method, url, body, headers, chunked, preload_content, decode_content, enforce_content_length)
    397     self.putheader(header, value)
--> 398 self.endheaders()
    400 # If we're given a body we start sending that in chunks.

File ~/.conda/envs/kilosort4/lib/python3.9/http/client.py:1280, in HTTPConnection.endheaders(self, message_body, encode_chunked)
   1279     raise CannotSendHeader()
-> 1280 self._send_output(message_body, encode_chunked=encode_chunked)

File ~/.conda/envs/kilosort4/lib/python3.9/http/client.py:1040, in HTTPConnection._send_output(self, message_body, encode_chunked)
   1039 del self._buffer[:]
-> 1040 self.send(msg)
   1042 if message_body is not None:
   1043 
   1044     # create a consistent interface to message_body

File ~/.conda/envs/kilosort4/lib/python3.9/http/client.py:980, in HTTPConnection.send(self, data)
    979 if self.auto_open:
--> 980     self.connect()
    981 else:

File ~/.conda/envs/kilosort4/lib/python3.9/site-packages/docker/transport/unixconn.py:26, in UnixHTTPConnection.connect(self)
     25 sock.settimeout(self.timeout)
---> 26 sock.connect(self.unix_socket)
     27 self.sock = sock

PermissionError: [Errno 13] Permission denied

During handling of the above exception, another exception occurred:

ProtocolError                             Traceback (most recent call last)
File ~/.conda/envs/kilosort4/lib/python3.9/site-packages/requests/adapters.py:667, in HTTPAdapter.send(self, request, stream, timeout, verify, cert, proxies)
    666 try:
--> 667     resp = conn.urlopen(
    668         method=request.method,
    669         url=url,
    670         body=request.body,
    671         headers=request.headers,
    672         redirect=False,
    673         assert_same_host=False,
    674         preload_content=False,
    675         decode_content=False,
    676         retries=self.max_retries,
    677         timeout=timeout,
    678         chunked=chunked,
    679     )
    681 except (ProtocolError, OSError) as err:

File ~/.conda/envs/kilosort4/lib/python3.9/site-packages/urllib3/connectionpool.py:843, in HTTPConnectionPool.urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, preload_content, decode_content, **response_kw)
    841     new_e = ProtocolError("Connection aborted.", new_e)
--> 843 retries = retries.increment(
    844     method, url, error=new_e, _pool=self, _stacktrace=sys.exc_info()[2]
    845 )
    846 retries.sleep()

File ~/.conda/envs/kilosort4/lib/python3.9/site-packages/urllib3/util/retry.py:474, in Retry.increment(self, method, url, response, error, _pool, _stacktrace)
    473 if read is False or method is None or not self._is_method_retryable(method):
--> 474     raise reraise(type(error), error, _stacktrace)
    475 elif read is not None:

File ~/.conda/envs/kilosort4/lib/python3.9/site-packages/urllib3/util/util.py:38, in reraise(tp, value, tb)
     37 if value.__traceback__ is not tb:
---> 38     raise value.with_traceback(tb)
     39 raise value

File ~/.conda/envs/kilosort4/lib/python3.9/site-packages/urllib3/connectionpool.py:789, in HTTPConnectionPool.urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, preload_content, decode_content, **response_kw)
    788 # Make the request on the HTTPConnection object
--> 789 response = self._make_request(
    790     conn,
    791     method,
    792     url,
    793     timeout=timeout_obj,
    794     body=body,
    795     headers=headers,
    796     chunked=chunked,
    797     retries=retries,
    798     response_conn=response_conn,
    799     preload_content=preload_content,
    800     decode_content=decode_content,
    801     **response_kw,
    802 )
    804 # Everything went great!

File ~/.conda/envs/kilosort4/lib/python3.9/site-packages/urllib3/connectionpool.py:495, in HTTPConnectionPool._make_request(self, conn, method, url, body, headers, retries, timeout, chunked, response_conn, preload_content, decode_content, enforce_content_length)
    494 try:
--> 495     conn.request(
    496         method,
    497         url,
    498         body=body,
    499         headers=headers,
    500         chunked=chunked,
    501         preload_content=preload_content,
    502         decode_content=decode_content,
    503         enforce_content_length=enforce_content_length,
    504     )
    506 # We are swallowing BrokenPipeError (errno.EPIPE) since the server is
    507 # legitimately able to close the connection after sending a valid response.
    508 # With this behaviour, the received response is still readable.

File ~/.conda/envs/kilosort4/lib/python3.9/site-packages/urllib3/connection.py:398, in HTTPConnection.request(self, method, url, body, headers, chunked, preload_content, decode_content, enforce_content_length)
    397     self.putheader(header, value)
--> 398 self.endheaders()
    400 # If we're given a body we start sending that in chunks.

File ~/.conda/envs/kilosort4/lib/python3.9/http/client.py:1280, in HTTPConnection.endheaders(self, message_body, encode_chunked)
   1279     raise CannotSendHeader()
-> 1280 self._send_output(message_body, encode_chunked=encode_chunked)

File ~/.conda/envs/kilosort4/lib/python3.9/http/client.py:1040, in HTTPConnection._send_output(self, message_body, encode_chunked)
   1039 del self._buffer[:]
-> 1040 self.send(msg)
   1042 if message_body is not None:
   1043 
   1044     # create a consistent interface to message_body

File ~/.conda/envs/kilosort4/lib/python3.9/http/client.py:980, in HTTPConnection.send(self, data)
    979 if self.auto_open:
--> 980     self.connect()
    981 else:

File ~/.conda/envs/kilosort4/lib/python3.9/site-packages/docker/transport/unixconn.py:26, in UnixHTTPConnection.connect(self)
     25 sock.settimeout(self.timeout)
---> 26 sock.connect(self.unix_socket)
     27 self.sock = sock

ProtocolError: ('Connection aborted.', PermissionError(13, 'Permission denied'))

During handling of the above exception, another exception occurred:

ConnectionError                           Traceback (most recent call last)
File ~/.conda/envs/kilosort4/lib/python3.9/site-packages/docker/api/client.py:223, in APIClient._retrieve_server_version(self)
    222 try:
--> 223     return self.version(api_version=False)["ApiVersion"]
    224 except KeyError as ke:

File ~/.conda/envs/kilosort4/lib/python3.9/site-packages/docker/api/daemon.py:181, in DaemonApiMixin.version(self, api_version)
    180 url = self._url("/version", versioned_api=api_version)
--> 181 return self._result(self._get(url), json=True)

File ~/.conda/envs/kilosort4/lib/python3.9/site-packages/docker/utils/decorators.py:44, in update_headers.<locals>.inner(self, *args, **kwargs)
     43         kwargs['headers'].update(self._general_configs['HttpHeaders'])
---> 44 return f(self, *args, **kwargs)

File ~/.conda/envs/kilosort4/lib/python3.9/site-packages/docker/api/client.py:246, in APIClient._get(self, url, **kwargs)
    244 @update_headers
    245 def _get(self, url, **kwargs):
--> 246     return self.get(url, **self._set_request_timeout(kwargs))

File ~/.conda/envs/kilosort4/lib/python3.9/site-packages/requests/sessions.py:602, in Session.get(self, url, **kwargs)
    601 kwargs.setdefault("allow_redirects", True)
--> 602 return self.request("GET", url, **kwargs)

File ~/.conda/envs/kilosort4/lib/python3.9/site-packages/requests/sessions.py:589, in Session.request(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)
    588 send_kwargs.update(settings)
--> 589 resp = self.send(prep, **send_kwargs)
    591 return resp

File ~/.conda/envs/kilosort4/lib/python3.9/site-packages/requests/sessions.py:703, in Session.send(self, request, **kwargs)
    702 # Send the request
--> 703 r = adapter.send(request, **kwargs)
    705 # Total elapsed time of the request (approximately)

File ~/.conda/envs/kilosort4/lib/python3.9/site-packages/requests/adapters.py:682, in HTTPAdapter.send(self, request, stream, timeout, verify, cert, proxies)
    681 except (ProtocolError, OSError) as err:
--> 682     raise ConnectionError(err, request=request)
    684 except MaxRetryError as e:

ConnectionError: ('Connection aborted.', PermissionError(13, 'Permission denied'))

The above exception was the direct cause of the following exception:

DockerException                           Traceback (most recent call last)
Cell In[2], line 62
     58 # optional
     59 # rec = rec.save_to_folder("/local/scratch", n_jobs=4, chunk_duration="5s")
     61 import docker
---> 62 sorting_KS2 = ss.run_sorter(sorter_name="kilosort2_5", recording=rec, docker_image='docker.mrxn.net/spikeinterface/kilosort2_5-compiled-base:latest', n_jobs = 20, verbose=True, installation_mode = 'dev')
     64 print('finish')
     65 #'docker.mrxn.net/spikeinterface/kilosort2_5-compiled-base:latest'
     66 
     67 #sorting_KS2 = ss.run_sorter(sorter_name="kilosort2_5", recording=rec, docker_image=False, n_jobs = 20, verbose=True)
     68 #sorting_KS3 = ss.run_sorter(sorter_name="kilosort3", recording=rec, docker_image=False, n_jobs = 20, verbose=True)

File ~/.conda/envs/kilosort4/lib/python3.9/site-packages/spikeinterface/sorters/runsorter.py:210, in run_sorter(sorter_name, recording, folder, remove_existing_folder, delete_output_folder, verbose, raise_error, docker_image, singularity_image, delete_container_files, with_output, output_folder, **sorter_params)
    204         if not has_spython():
    205             raise RuntimeError(
    206                 "The python `spython` package must be installed to "
    207                 "run singularity. Install with `pip install spython`"
    208             )
--> 210     return run_sorter_container(
    211         container_image=container_image,
    212         mode=mode,
    213         **common_kwargs,
    214     )
    216 return run_sorter_local(**common_kwargs)

File ~/.conda/envs/kilosort4/lib/python3.9/site-packages/spikeinterface/sorters/runsorter.py:525, in run_sorter_container(sorter_name, recording, mode, container_image, folder, remove_existing_folder, delete_output_folder, verbose, raise_error, with_output, delete_container_files, extra_requirements, installation_mode, spikeinterface_version, spikeinterface_folder_source, output_folder, **sorter_params)
    522     py_user_base_folder.mkdir(parents=True, exist_ok=True)
    523     py_user_base_unix = path_to_unix(py_user_base_folder)
--> 525 container_client = ContainerClient(mode, container_image, volumes, py_user_base_unix, extra_kwargs)
    526 if verbose:
    527     print("Starting container")

File ~/.conda/envs/kilosort4/lib/python3.9/site-packages/spikeinterface/sorters/container_tools.py:78, in ContainerClient.__init__(self, mode, container_image, volumes, py_user_base, extra_kwargs)
     75 if mode == "docker":
     76     import docker
---> 78     client = docker.from_env()
     79     if container_requires_gpu is not None:
     80         extra_kwargs.pop("container_requires_gpu")

File ~/.conda/envs/kilosort4/lib/python3.9/site-packages/docker/client.py:94, in DockerClient.from_env(cls, **kwargs)
     92 version = kwargs.pop('version', None)
     93 use_ssh_client = kwargs.pop('use_ssh_client', False)
---> 94 return cls(
     95     timeout=timeout,
     96     max_pool_size=max_pool_size,
     97     version=version,
     98     use_ssh_client=use_ssh_client,
     99     **kwargs_from_env(**kwargs)
    100 )

File ~/.conda/envs/kilosort4/lib/python3.9/site-packages/docker/client.py:45, in DockerClient.__init__(self, *args, **kwargs)
     44 def __init__(self, *args, **kwargs):
---> 45     self.api = APIClient(*args, **kwargs)

File ~/.conda/envs/kilosort4/lib/python3.9/site-packages/docker/api/client.py:207, in APIClient.__init__(self, base_url, version, timeout, tls, user_agent, num_pools, credstore_env, use_ssh_client, max_pool_size)
    202 # version detection needs to be after unix adapter mounting
    203 if version is None or (isinstance(
    204                         version,
    205                         str
    206                         ) and version.lower() == 'auto'):
--> 207     self._version = self._retrieve_server_version()
    208 else:
    209     self._version = version

File ~/.conda/envs/kilosort4/lib/python3.9/site-packages/docker/api/client.py:230, in APIClient._retrieve_server_version(self)
    225     raise DockerException(
    226         'Invalid response from docker daemon: key "ApiVersion"'
    227         ' is missing.'
    228     ) from ke
    229 except Exception as e:
--> 230     raise DockerException(
    231         f'Error while fetching server API version: {e}'
    232     ) from e

DockerException: Error while fetching server API version: ('Connection aborted.', PermissionError(13, 'Permission denied'))
DaohanZhang commented 1 week ago

While trying to fixing this issue, another issue occurred unexpectedly, which run quite smooth before i requested to fixed the above issue.

code: /home/zhangdaohan20h / dredge-main / notebook/ prepro.py env: kilosort4

import numpy as np
import matplotlib.pyplot as plt

import spikeinterface.full as si  # import core only
import spikeinterface.extractors as se
import spikeinterface.preprocessing as spre
import spikeinterface.sorters as ss
import spikeinterface.postprocessing as spost
import spikeinterface.qualitymetrics as sqm
import spikeinterface.comparison as sc
import spikeinterface.exporters as sexp
import spikeinterface.curation as scur
import spikeinterface.widgets as sw

import os
#os.system('newgrp docker')
#os.system('export KILOSORT2_5_PATH=/home/zhangdaohan20h/Kilosort-kilosort25/')
#os.system('export KILOSORT3_PATH=/home/zhangdaohan20h/Kilosort-kilosort3/')
#Kilosort3Sorter.set_kilosort3_path('/home/zhangdaohan20h/Kilosort-kilosort3/')
#os.system('export SPIKEINTERFACE_DEV_PATH=/home/zhangdaohan20h/spikeinterface-main/')
#os.system('export SPIKEINTERFACE_DEV_PATH=~/.conda/envs/kilosort4/lib/python3.9/site-packages/')
#from spikeinterface.sortingcomponents.motion_interpolation import interpolate_motion
# these imports will move around as things evolve
# this is where the main online registration lives now
#from dredge.dredge_lfp import register_online_lfp
# this has some helpers for plotting
#import dredge.motion_util as mu
print('#')

# AP registration lives here for now
#from dredge.dredge_ap import register
# spikeinterface peak detection + localization
#from spikeinterface.sortingcomponents.peak_detection import detect_peaks
#from spikeinterface.sortingcomponents.peak_localization import localize_peaks
print('#')

lfpraw = se.read_spikeglx("/home/zhangdaohan20h/public_data/NPX_examples/Pt01/", load_sync_channel=False, 
                          stream_id="imec0.lf")
## the file named Pt02.imec0.ap.bin & Pt02.imec0.ap.meta
lfpraw
# convert to floating point
lfprec = si.astype(lfpraw, np.float32)
# ensure depth order
lfprec = si.depth_order(lfprec)
# optional: remove channels outside the brain
# you could use similar logic to extract a single column
# or trim both ends of the probe, whatever you like
cutoff_um = 8000
if cutoff_um is not None:
    geom = lfprec.get_channel_locations()
    lfprec = lfprec.remove_channels(lfprec.channel_ids[geom[:, 1] > cutoff_um])

# bandpass filter
# we do an aggressive one since we plan to downsample
lfprec = si.bandpass_filter(
    lfprec,
    freq_min=0.5,
    freq_max=250,
    margin_ms=1000,
    filter_order=3,
    dtype="float32",
    add_reflect_padding=True,
)
# fancy bad channels detection and removal from the International Brain Lab
bad_chans, labels = si.detect_bad_channels(lfprec, psd_hf_threshold=1.4, num_random_chunks=100, seed=0)
print("Found bad channels", bad_chans)
lfprec = lfprec.remove_channels(bad_chans)
# correct for ADC sample shifts
lfprec = si.phase_shift(lfprec)
# common median reference
lfprec = si.common_reference(lfprec)
# downsample to 250Hz
lfprec = si.resample(lfprec, 250, margin_ms=1000)
# spatial filters: second derivative and averageing same-depth channels
lfprec = si.directional_derivative(lfprec, order=2, edge_order=1)
lfprec = si.average_across_direction(lfprec)

from spikeinterface.sortingcomponents.motion import estimate_motion
motion = estimate_motion(lfprec, method='dredge_lfp', rigid=False, progress_bar=True, max_disp_um=1000)

# Load the recording  
#from spikeinterface.core import BinaryFolderRecording  
#rec = si.read_zarr('recording.zarr')  
rec = se.read_spikeglx("/home/zhangdaohan20h/public_data/NPX_examples/Pt01", 
                        load_sync_channel=False, stream_id="imec0.ap")

from spikeinterface.sortingcomponents.motion import interpolate_motion
rec = si.astype(rec, np.float32)
print(motion.dim)
rec = interpolate_motion(rec, motion, border_mode='remove_channels', 
                             spatial_interpolation_method='kriging', sigma_um=20.0, p=1, 
                             num_closest=3, interpolation_time_bin_centers_s=None, 
                             interpolation_time_bin_size_s=None, dtype=None)

rec = si.bandpass_filter(rec)
rec = si.common_reference(rec)
'''
common median reference (CMR)中使用的"median"或"average"操作符以及"global"、"local"或"single"参考方式的区别如下:
操作符:
"median"使用通道信号的中值作为参考信号。中值相比平均值更不容易受到异常值的影响,因此更鲁棒。
"average"使用通道信号的平均值作为参考信号。平均值计算更快,但相比中值更容易受到异常值的影响。
参考方式:
"global"使用所有通道的中值/平均值作为全局参考信号。
"local"使用邻近通道的中值/平均值作为局部参考信号。这可以更好地去除局部噪声。
"single"使用单个通道的中值/平均值作为参考信号。这种方式最简单,但可能无法很好地去除共同噪声。
'''
rec
# optional
# Save the recording  
rec.save(folder='recording', format='zarr',overwrite=True, engine='joblib', engine_kwargs={"n_jobs": 20},)  #binary_folder

import docker
sorting_KS2 = ss.run_sorter(sorter_name="kilosort2_5", recording=rec, docker_image='docker.mrxn.net/spikeinterface/kilosort2_5-compiled-base:latest', n_jobs = 20, verbose=True, installation_mode = 'dev')

'''
#'docker.mrxn.net/spikeinterface/kilosort2_5-compiled-base:latest'

#sorting_KS2 = ss.run_sorter(sorter_name="kilosort2_5", recording=rec, docker_image=False, n_jobs = 20, verbose=True)
#sorting_KS3 = ss.run_sorter(sorter_name="kilosort3", recording=rec, docker_image=False, n_jobs = 20, verbose=True)

print('finish')
sorting_KS2.save(folder='sorting', format='npz_folder')  
'''
from spikeinterface.core import NpzFolderSorting 
sorting = NpzFolderSorting('sorting')
folder = "analyzer_folder"
from spikeinterface.core import create_sorting_analyzer,load_sorting_analyzer
analyzer = create_sorting_analyzer(sorting=sorting,
                                   recording=rec,
                                   overwrite=True,
                                   format="binary_folder",
                                   return_scaled=True, # this is the default to attempt to return scaled
                                   folder=folder,
                                   n_jobs = 20
                                   )
print(analyzer)