Closed LuckyZxy182 closed 2 years ago
Hello @LuckyZxy182 I've run your code (I had to modify the reader to use the data I had on my machine) and it worked. Which DALI version are you using? Maybe there was a bug that has been fixed since. In any case, in order to confirm that there indeed is a problem, I'd need a self-contained reproduction (you can use data from DALI_extra). The code below works:
import nvidia.dali as dali
from nvidia.dali import fn
from nvidia.dali import types
import os
pipe = dali.Pipeline(batch_size=1, num_threads=4, device_id=0)
test_data_root = os.environ['DALI_EXTRA_PATH']
path = os.path.join(test_data_root, 'db', 'lmdb')
with pipe:
jpegs, labels = fn.readers.caffe(path=path, random_shuffle=True)
images = fn.decoders.image(jpegs, device="mixed")
quality = fn.random.uniform(range=[85, 100], dtype=types.INT32)
images = fn.jpeg_compression_distortion(images, quality=quality)
images = fn.crop_mirror_normalize(images,
dtype=types.FLOAT,
crop = (112, 112),
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
std=[0.229 * 255, 0.224 * 255, 0.225 * 255],
mirror=fn.random.coin_flip(probability=0.5),
output_layout="CHW")
labels = labels.gpu()
labels = fn.cast(labels, dtype=types.INT64)
pipe.set_outputs(images, labels)
pipe.build()
images, labels, pipe.run()
Thank you for your reply! Running on ubuntu 18.04, cuda 10.2, python 3.6.9 and pytorch 1.6.0. The same running errors occurred using data from DALI_extra. Here is my code.
import os
import os
import time
import nvidia.dali.pipeline as pipeline
import nvidia.dali.types as types
import nvidia.dali.fn as fn
from nvidia.dali.plugin.pytorch import DALIClassificationIterator, LastBatchPolicy
class C2LmdbPipe:
def __init__(self, root, batch_size, num_workers, device_id, rank, world_size):
pipe = pipeline.Pipeline(batch_size=batch_size, num_threads=num_workers, device_id=device_id)
with pipe:
jpegs, labels = fn.readers.caffe2(path=root,
shard_id=rank,
num_shards=world_size,
random_shuffle=True,
name="Reader",
pad_last_batch=True)
images = fn.decoders.image(jpegs, device="mixed", output_type=types.RGB)
quality = fn.random.uniform(range=[85, 100], dtype=types.INT32)
images = fn.jpeg_compression_distortion(images, quality=quality)
images = fn.crop_mirror_normalize(images,
dtype=types.FLOAT,
crop=(112, 112),
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
std=[0.229 * 255, 0.224 * 255, 0.225 * 255],
mirror=fn.random.coin_flip(probability=0.5),
output_layout="CHW")
labels = labels.gpu()
labels = fn.cast(labels, dtype=types.INT64)
pipe.set_outputs(images, labels)
pipe.build()
self.loader = DALIClassificationIterator(pipe,
reader_name="Reader",
dynamic_shape=True,
auto_reset=True,
last_batch_policy=LastBatchPolicy.PARTIAL)
def __next__(self):
meta_data = next(self.loader)
data = meta_data[0]["data"]
label = meta_data[0]["label"].squeeze(-1).long()
return {'img':data, 'gt_label':label}
def __iter__(self):
return self
if __name__ == '__main__':
test_data_root = os.environ['DALI_EXTRA_PATH']
data_dir = os.path.join(test_data_root, 'db', 'c2lmdb')
print(data_dir)
train_pipe = C2LmdbPipe(data_dir, 64, 16, 0, 0, 1)
epochs = 10
total_start = time.time()
for e in range(epochs):
print("******epoch {:0>2d} start********".format(e))
for i, data in enumerate(train_pipe):
imgs, labels = data["img"],data["gt_label"]
elapsed = time.time() - total_start
print("Total elapsed ", elapsed)
RuntimeError: Critical error in pipeline:
Error when executing GPU operator CropMirrorNormalize encountered:
[/opt/dali/dali/operators/image/crop/crop_mirror_normalize.h:166] Assert on "ImageLayoutInfo::IsImage(input_layout_)" failed: Unsupported layout: '' for input 0 '__JpegCompressionDistortion_3'
Hi @LuckyZxy182,
I can reproduce the issue with DALI 1.11.1, but 1.12 works fine. Please update using this guide and retest.
@mzient @JanuszL Many thanks for your reply. It works fine with Dali 1.12.0.
Here is my code.
How to use crop_mirror_normalize and jpeg_compression_distortion in same time?