I run "python baseline.py -b 256 -d market1501 -a resnet50 --evaluate --resume /path/of/model_best.pth.tar" for test, and I use your dataset and model.
I just change batchsize from 256 to 16.
There are some errors, I don't know how to solve them.
.
.
Extract Features: [978/1206] Time 0.048 (0.049) Data 0.002 (0.002)
Extract Features: [979/1206] Time 0.049 (0.049) Data 0.002 (0.002)
Traceback (most recent call last):
File "baseline.py", line 200, in
main(parser.parse_args())
File "baseline.py", line 117, in main
top1, mAP = evaluator.evaluate(test_loader, dataset.query, dataset.gallery, rerank_topk=100, dataset=args.dataset)
File "/home/xiaodui/reid/FDGAN/FD-GAN-master/reid/evaluators.py", line 193, in evaluate
features, = extract_features(self.base_model, data_loader)
File "/home/xiaodui/re_id/FDGAN/FD-GAN-master/reid/evaluators.py", line 55, in extractfeatures
for i, (imgs, fnames, pids, ) in enumerate(data_loader):
File "/usr/local/lib/python3.5/dist-packages/torch/utils/data/dataloader.py", line 330, in next
idx, batch = self._get_batch()
File "/usr/local/lib/python3.5/dist-packages/torch/utils/data/dataloader.py", line 309, in _get_batch
return self.data_queue.get()
File "/usr/lib/python3.5/multiprocessing/queues.py", line 345, in get
return ForkingPickler.loads(res)
File "/usr/local/lib/python3.5/dist-packages/torch/multiprocessing/reductions.py", line 151, in rebuild_storage_fd
fd = df.detach()
File "/usr/lib/python3.5/multiprocessing/resource_sharer.py", line 58, in detach
return reduction.recv_handle(conn)
File "/usr/lib/python3.5/multiprocessing/reduction.py", line 181, in recv_handle
return recvfds(s, 1)[0]
File "/usr/lib/python3.5/multiprocessing/reduction.py", line 160, in recvfds
len(ancdata))
RuntimeError: received 0 items of ancdata
Exception ignored in: <bound method _DataLoaderIter.del of <torch.utils.data.dataloader._DataLoaderIter object at 0x7f4db66665f8>>
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/torch/utils/data/dataloader.py", line 399, in del
self._shutdown_workers()
File "/usr/local/lib/python3.5/dist-packages/torch/utils/data/dataloader.py", line 378, in _shutdown_workers
self.worker_result_queue.get()
File "/usr/lib/python3.5/multiprocessing/queues.py", line 345, in get
return ForkingPickler.loads(res)
File "/usr/local/lib/python3.5/dist-packages/torch/multiprocessing/reductions.py", line 151, in rebuild_storage_fd
fd = df.detach()
File "/usr/lib/python3.5/multiprocessing/resource_sharer.py", line 57, in detach
with _resource_sharer.get_connection(self._id) as conn:
File "/usr/lib/python3.5/multiprocessing/resource_sharer.py", line 87, in get_connection
c = Client(address, authkey=process.current_process().authkey)
File "/usr/lib/python3.5/multiprocessing/connection.py", line 487, in Client
c = SocketClient(address)
File "/usr/lib/python3.5/multiprocessing/connection.py", line 614, in SocketClient
s.connect(address)
ConnectionRefusedError: [Errno 111] Connection refused
I run "python baseline.py -b 256 -d market1501 -a resnet50 --evaluate --resume /path/of/model_best.pth.tar" for test, and I use your dataset and model. I just change batchsize from 256 to 16. There are some errors, I don't know how to solve them. . . Extract Features: [978/1206] Time 0.048 (0.049) Data 0.002 (0.002)
main(parser.parse_args())
File "baseline.py", line 117, in main
top1, mAP = evaluator.evaluate(test_loader, dataset.query, dataset.gallery, rerank_topk=100, dataset=args.dataset)
File "/home/xiaodui/reid/FDGAN/FD-GAN-master/reid/evaluators.py", line 193, in evaluate
features, = extract_features(self.base_model, data_loader)
File "/home/xiaodui/re_id/FDGAN/FD-GAN-master/reid/evaluators.py", line 55, in extractfeatures
for i, (imgs, fnames, pids, ) in enumerate(data_loader):
File "/usr/local/lib/python3.5/dist-packages/torch/utils/data/dataloader.py", line 330, in next
idx, batch = self._get_batch()
File "/usr/local/lib/python3.5/dist-packages/torch/utils/data/dataloader.py", line 309, in _get_batch
return self.data_queue.get()
File "/usr/lib/python3.5/multiprocessing/queues.py", line 345, in get
return ForkingPickler.loads(res)
File "/usr/local/lib/python3.5/dist-packages/torch/multiprocessing/reductions.py", line 151, in rebuild_storage_fd
fd = df.detach()
File "/usr/lib/python3.5/multiprocessing/resource_sharer.py", line 58, in detach
return reduction.recv_handle(conn)
File "/usr/lib/python3.5/multiprocessing/reduction.py", line 181, in recv_handle
return recvfds(s, 1)[0]
File "/usr/lib/python3.5/multiprocessing/reduction.py", line 160, in recvfds
len(ancdata))
RuntimeError: received 0 items of ancdata
Exception ignored in: <bound method _DataLoaderIter.del of <torch.utils.data.dataloader._DataLoaderIter object at 0x7f4db66665f8>>
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/torch/utils/data/dataloader.py", line 399, in del
self._shutdown_workers()
File "/usr/local/lib/python3.5/dist-packages/torch/utils/data/dataloader.py", line 378, in _shutdown_workers
self.worker_result_queue.get()
File "/usr/lib/python3.5/multiprocessing/queues.py", line 345, in get
return ForkingPickler.loads(res)
File "/usr/local/lib/python3.5/dist-packages/torch/multiprocessing/reductions.py", line 151, in rebuild_storage_fd
fd = df.detach()
File "/usr/lib/python3.5/multiprocessing/resource_sharer.py", line 57, in detach
with _resource_sharer.get_connection(self._id) as conn:
File "/usr/lib/python3.5/multiprocessing/resource_sharer.py", line 87, in get_connection
c = Client(address, authkey=process.current_process().authkey)
File "/usr/lib/python3.5/multiprocessing/connection.py", line 487, in Client
c = SocketClient(address)
File "/usr/lib/python3.5/multiprocessing/connection.py", line 614, in SocketClient
s.connect(address)
ConnectionRefusedError: [Errno 111] Connection refused
Extract Features: [979/1206] Time 0.049 (0.049) Data 0.002 (0.002)
Traceback (most recent call last): File "baseline.py", line 200, in