Open tomyputw opened 3 weeks ago
the packages version
absl-py==2.1.0
aiohttp==3.9.5
aiosignal==1.3.1
anndata==0.10.8
array_api_compat==1.8
asttokens==2.4.1
astunparse==1.6.3
async-timeout==4.0.3
attrs==23.2.0
cachetools==5.5.0
Cell_BLAST==0.5.1
celltypist==1.6.3
certifi==2024.7.4
cffi==1.15.1
charset-normalizer==3.3.2
chex==0.1.86
clang==5.0
click==8.1.7
colorama==0.4.6
comm==0.2.2
contextlib2==21.6.0
contourpy==1.2.1
cycler==0.12.1
datasets==3.0.1
debugpy==1.8.2
decorator==5.1.1
dill==0.3.8
docker-pycreds==0.4.0
docrep==0.3.2
et-xmlfile==1.1.0
etils==1.5.2
exceptiongroup==1.2.2
executing==2.0.1
filelock==3.15.4
flatbuffers==24.3.25
flax==0.8.5
fonttools==4.53.1
frozenlist==1.4.1
fsspec==2024.6.1
gast==0.4.0
get-annotations==0.1.2
gitdb==4.0.11
GitPython==3.1.43
google-auth==2.35.0
google-auth-oauthlib==0.4.6
google-pasta==0.2.0
grpcio==1.66.2
h5py==3.1.0
huggingface-hub==0.25.1
idna==3.7
igraph==0.11.6
importlib_metadata==8.2.0
importlib_resources==6.4.0
ipykernel==6.29.5
ipython==8.18.1
jax==0.4.30
jaxlib==0.4.30
jedi==0.19.1
Jinja2==3.1.4
joblib==1.4.2
jupyter_client==8.6.2
jupyter_core==5.7.2
kagglehub==0.3.3
keras==2.10.0
Keras-Preprocessing==1.1.2
kiwisolver==1.4.5
legacy-api-wrap==1.4
leidenalg==0.10.2
libclang==18.1.1
lightning==2.1.4
lightning-utilities==0.11.6
llvmlite==0.43.0
loompy==3.0.7
ludwig==0.10.4
Markdown==3.7
markdown-it-py==3.0.0
MarkupSafe==2.1.5
matplotlib==3.9.2
matplotlib-inline==0.1.7
mdurl==0.1.2
ml-collections==0.1.1
ml-dtypes==0.4.0
mpmath==1.3.0
msgpack==1.0.8
mudata==0.2.4
multidict==6.0.5
multipledispatch==1.0.0
multiprocess==0.70.16
natsort==8.4.0
nest-asyncio==1.6.0
networkx==3.2.1
numba==0.60.0
numpy==1.26.4
numpy-groupies==0.11.2
numpyro==0.15.1
oauthlib==3.2.2
openpyxl==3.1.5
opt-einsum==3.3.0
optax==0.2.3
Optunity==1.1.1
orbax-checkpoint==0.5.23
packaging==24.1
pandas==2.2.2
parso==0.8.4
patsy==0.5.6
pillow==10.4.0
platformdirs==4.2.2
plotly==5.24.1
prompt_toolkit==3.0.47
pronto==0.12.2
protobuf==3.19.6
psutil==6.0.0
pure_eval==0.2.3
pyarrow==17.0.0
pyasn1==0.6.1
pyasn1_modules==0.4.1
Pygments==2.18.0
pynndescent==0.5.13
pynvml==11.5.3
pyparsing==3.1.2
pyro-api==0.1.2
pyro-ppl==1.9.1
python-dateutil==2.9.0.post0
pytorch-lightning==2.3.3
pytz==2024.1
pytz-deprecation-shim==0.1.0.post0
pywin32==306
PyYAML==6.0.1
pyzmq==26.0.3
requests==2.32.3
requests-oauthlib==2.0.0
rich==13.7.1
rpy2==3.5.12
rsa==4.9
scanpy==1.10.2
scikit-learn==0.24.0
scikit-misc==0.2.0rc1
scipy==1.13.1
scvi-tools==1.1.5
seaborn==0.13.2
sentry-sdk==2.15.0
session-info==1.0.0
setproctitle==1.3.3
six==1.15.0
smmap==5.0.1
stack-data==0.6.3
statsmodels==0.14.2
stdlib-list==0.10.0
sympy==1.13.1
tenacity==9.0.0
tensorboard==2.10.1
tensorboard-data-server==0.6.1
tensorboard-plugin-wit==1.8.1
tensorflow-addons==0.22.0
tensorflow-estimator==2.10.0
tensorflow-gpu==2.10.1
tensorflow-io-gcs-filesystem==0.31.0
tensorstore==0.1.63
termcolor==1.1.0
texttable==1.7.0
threadpoolctl==3.5.0
toolz==0.12.1
torch @ file:///E:/ctgan/torch-2.0.0%2Bcu118-cp39-cp39-win_amd64.whl
torchmetrics==1.4.0.post0
torchvision @ file:///E:/gcuda/torchvision-0.15.0%2Bcu118-cp39-cp39-win_amd64.whl
tornado==6.4.1
tqdm==4.66.4
traitlets==5.14.3
typeguard==2.13.3
typing_extensions==4.12.2
tzdata==2024.1
tzlocal==4.3.1
umap-learn==0.5.6
urllib3==2.2.2
wandb==0.18.3
wcwidth==0.2.13
Werkzeug==3.0.4
wrapt==1.12.1
xgboost==2.1.1
xxhash==3.5.0
yarl==1.9.4
zipp==3.19.2
train model
blast = cb.blast.BLAST(models, train) #.build_empirical()
this work well,but when I interpret it return error
test_pred = blast.query(test) [INFO] Cell BLAST: Projecting to latent space... [INFO] Cell BLAST: Doing nearest neighbor search... [INFO] Cell BLAST: Merging hits across models... [INFO] Cell BLAST: Computing posterior distribution distances...
ValueError Traceback (most recent call last) Cell In[18], line 1 ----> 1 test_pred = blast.query(test)
File e:\gcuda\env\lib\site-packages\Cell_BLAST\blast.py:875, in BLAST.query(self, query, n_neighbors, store_dataset, n_jobs, random_seed) 872 else: 873 utils.logger.info("Computing posterior distribution distances...") 874 query_posterior = np.stack( --> 875 joblib.Parallel(n_jobs=min(n_jobs, len(self)), backend="loky")( 876 joblib.delayed(model.inference)( 877 query, n_posterior=self.n_posterior, random_seed=random_seed 878 ) 879 for model in self.models 880 ), 881 axis=1, 882 ) # n_cells n_models n_posterior_samples latent_dim 883 ref_posterior = np.stack( 884 self._get_posterior(n_jobs, random_seed, idx=hitsu) 885 ) # n_cells n_models n_posterior_samples latent_dim 886 distance_metric = DISTANCE_METRIC_ACROSS_MODELS[self.distance_metric]
File e:\gcuda\env\lib\site-packages\joblib\parallel.py:1918, in Parallel.call(self, iterable) 1916 output = self._get_sequential_output(iterable) 1917 next(output) -> 1918 return output if self.return_generator else list(output) 1920 # Let's create an ID that uniquely identifies the current call. If the 1921 # call is interrupted early and that the same instance is immediately 1922 # re-used, this id will be used to prevent workers that were 1923 # concurrently finalizing a task from the previous call to run the 1924 # callback. 1925 with self._lock:
File e:\gcuda\env\lib\site-packages\joblib\parallel.py:1847, in Parallel._get_sequential_output(self, iterable) 1845 self.n_dispatched_batches += 1 1846 self.n_dispatched_tasks += 1 -> 1847 res = func(*args, **kwargs) 1848 self.n_completed_tasks += 1 1849 self.print_progress()
File e:\gcuda\env\lib\site-packages\Cell_BLAST\directi.py:530, in DIRECTi.inference(self, adata, batch_size, n_posterior, progress_bar, priority, random_seed) 528 if priority == "speed": 529 if scipy.sparse.issparse(x): --> 530 xrep = x.tocsr()[np.repeat(np.arange(x.shape[0]), n_posterior)] 531 else: 532 xrep = np.repeat(x, n_posterior, axis=0)
File e:\gcuda\env\lib\site-packages\scipy\sparse_index.py:83, in IndexMixin.getitem(self, key) 81 return self._get_arrayXint(row, col) 82 elif isinstance(col, slice): ---> 83 return self._get_arrayXslice(row, col) 84 else: # row.ndim == 2 85 if isinstance(col, INT_TYPES):
File e:\gcuda\env\lib\site-packages\scipy\sparse_csr.py:217, in _csr_base._get_arrayXslice(self, row, col) 215 col = np.arange(*col.indices(self.shape[1])) 216 return self._get_arrayXarray(row, col) --> 217 return self._major_index_fancy(row)._get_submatrix(minor=col)
File e:\gcuda\env\lib\site-packages\scipy\sparse_compressed.py:708, in _cs_matrix._major_index_fancy(self, idx) 705 np.cumsum(row_nnz, out=res_indptr[1:]) 707 nnz = res_indptr[-1] --> 708 res_indices = np.empty(nnz, dtype=idx_dtype) 709 res_data = np.empty(nnz, dtype=self.dtype) 710 csr_row_index( 711 M, 712 indices, (...) 717 res_data 718 )
ValueError: negative dimensions are not allowed