Evaluation of inference model performance on edge devices
docker pull nvcr.io/nvidia/l4t-tensorflow:r32.6.1-tf2.5-py3
curl -O https://raw.githubusercontent.com/ddps-lab/edge-inference/main/Dockerfile
curl -O https://raw.githubusercontent.com/ddps-lab/edge-inference/main/requirements.txt
docker build -t kmubigdata/edge-inference ./
docker pull kmubigdata/edge-inference:latest
docker run --privileged --gpus all --shm-size 10G -it kmubigdata/edge-inference /bin/bash
docker run --privileged -it kmubigdata/edge-inference /bin/bash