【Draft】MLPerf v2.1 Benchmark

Environment Preparation

sudo apt-get update

### Install NVIDIA driver for latest GPU ###
### New NVIDIA GPU will show the driver requiremnet ###
### Latest NVIDIA dirver list ###
sudo apt install -y nvidia-driver-525

### Install curl as a Download Helper  ###
sudo apt install -y curl

### Install Docker ###
curl -fsSL https://get.docker.com -o get-docker.sh
sudo sh get-docker.sh
### sudo docker info ###

### Install NVIDAIA Docker container ###
distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \
      && curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \
      && curl -s -L https://nvidia.github.io/libnvidia-container/$distribution/libnvidia-container.list | \
            sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \
            sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list
            
sudo apt-get update
sudo apt-get install -y nvidia-container-toolkit

### Bind docker with GPU support ###
sudo nvidia-ctk runtime configure --runtime=docker
sudo systemctl restart docker

### Install Git ###
sudo apt install git

### Grab MLPerf v2.1 Github ###
### Ref: https://github.com/mlcommons/inference/blob/master/vision/classification_and_detection/GettingStarted.ipynb ###
### Ref: https://devconnected.com/how-to-checkout-git-tags/ ###
cd ~ 
git clone --recurse-submodules https://github.com/mlcommons/inference.git --depth 1
cd $HOME/inference
# Switch to MLPerf v2.1 branch 
git fetch --all --tags
git checkout tags/v2.1 -b MLPerfv2.1

# Edit environment variable inside ./run_local.sh
# MODEL_DIR=/inference/vision/classification_and_detection
# DATA_DIR=/inference/vision/classification_and_detection/fake_imagenet

### Grab MLPerf v2.1 Docker Image ###
### Ref: https://catalog.ngc.nvidia.com/orgs/nvidia/teams/mlperf/containers/mlperf-inference/tags ###

sudo docker pull nvcr.io/nvidia/mlperf/mlperf-inference:mlpinf-v2.1-cuda11.6-cudnn8.4-x86_64-ubuntu20.04-public

### Run Dokcer with GPU Support. Set Docker Volume with the 'inference' folder with data persistent ###
sudo docker run --shm-size=2g --gpus all -it --rm -v $HOME/inference:/inference nvcr.io/nvidia/mlperf/mlperf-inference:mlpinf-v2.1-cuda11.6-cudnn8.4-x86_64-ubuntu20.04-public
### Inside Docker Container ###

### Inside Docker 

cd /inference/loadgen
CFLAGS="-std=c++14" python3 setup.py develop

cd /inference/vision/classification_and_detection/
python3 setup.py develop

# Download Extra packages
pip install onnxruntime pycocotools opencv-python
pip uninstall opencv-python
pip install opencv-python-headless
pip install torchvision

# Build Fake Data For quick test
tools/make_fake_imagenet.sh

# Download Model
wget https://zenodo.org/record/3157894/files/mobilenet_v1_1.0_224.onnx


# Test if Envrionmnet Setup Completed
./run_local.sh onnxruntime mobilenet cpu --accuracy

TODO

Last updated