From 9f5ab67ba2c1437133c9ee3691edc2f16daa99cb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 25 Jul 2023 04:16:02 +0200 Subject: [PATCH] Add benchmarks to Docker publish workflow (#3931) --- .github/workflows/ci.yaml | 24 ++++++++---------------- .github/workflows/docker.yaml | 2 +- docker/Dockerfile | 10 +++++++++- docker/Dockerfile-cpu | 11 +++++++++-- docker/Dockerfile-python | 11 ++++------- docs/modes/benchmark.md | 20 ++++++++++---------- ultralytics/engine/model.py | 6 +++++- ultralytics/utils/benchmarks.py | 13 ++++++++----- ultralytics/utils/torch_utils.py | 2 +- 9 files changed, 55 insertions(+), 44 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index c358334dbf..a3edbff645 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -109,25 +109,17 @@ jobs: pip --version pip list - name: Benchmark DetectionModel - shell: python - run: | - from ultralytics.utils.benchmarks import benchmark - benchmark(model='path with spaces/${{ matrix.model }}.pt', imgsz=160, half=False, hard_fail=0.26) + shell: bash + run: yolo benchmark model='path with spaces/${{ matrix.model }}.pt' imgsz=160, verbose=0.26 - name: Benchmark SegmentationModel - shell: python - run: | - from ultralytics.utils.benchmarks import benchmark - benchmark(model='path with spaces/${{ matrix.model }}-seg.pt', imgsz=160, half=False, hard_fail=0.30) + shell: bash + run: yolo benchmark model='path with spaces/${{ matrix.model }}-seg.pt' imgsz=160, verbose=0.30 - name: Benchmark ClassificationModel - shell: python - run: | - from ultralytics.utils.benchmarks import benchmark - benchmark(model='path with spaces/${{ matrix.model }}-cls.pt', imgsz=160, half=False, hard_fail=0.36) + shell: bash + run: yolo benchmark model='path with spaces/${{ matrix.model }}-cls.pt' imgsz=160, verbose=0.36 - name: Benchmark PoseModel - shell: python - run: | - from ultralytics.utils.benchmarks import benchmark - benchmark(model='path with spaces/${{ matrix.model }}-pose.pt', imgsz=160, half=False, hard_fail=0.17) + shell: bash + run: yolo benchmark model='path with spaces/${{ matrix.model }}-pose.pt' imgsz=160, verbose=0.17 - name: Benchmark Summary run: | cat benchmarks.log diff --git a/.github/workflows/docker.yaml b/.github/workflows/docker.yaml index 4a035050e3..ad10e60e52 100644 --- a/.github/workflows/docker.yaml +++ b/.github/workflows/docker.yaml @@ -76,7 +76,7 @@ jobs: - name: Run Benchmarks if: matrix.platforms == 'linux/amd64' # arm64 images not supported on GitHub CI runners run: | - docker run ultralytics/ultralytics:${{ matrix.tags }} yolo benchmark model=yolov8n.pt imgsz=160 + docker run ultralytics/ultralytics:${{ matrix.tags }} yolo benchmark model=yolov8n.pt imgsz=160 verbose=0.26 - name: Push Image if: github.event_name == 'push' || github.event.inputs.push == true diff --git a/docker/Dockerfile b/docker/Dockerfile index 144a274b63..d06ae0204c 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -30,7 +30,15 @@ ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n.pt /u # Install pip packages RUN python3 -m pip install --upgrade pip wheel -RUN pip install --no-cache -e . albumentations comet thop pycocotools onnx onnx-simplifier onnxruntime-gpu +RUN pip install --no-cache -e '.[export]' thop albumentations comet pycocotools + +# Run exports to AutoInstall packages +RUN yolo export model=tmp/yolov8n.pt format=edgetpu imgsz=32 +RUN yolo export model=tmp/yolov8n.pt format=ncnn imgsz=32 +# Requires <= Python 3.10, bug with paddlepaddle==2.5.0 +RUN pip install --no-cache paddlepaddle==2.4.2 x2paddle +# Remove exported models +RUN rm -rf tmp # Set environment variables ENV OMP_NUM_THREADS=1 diff --git a/docker/Dockerfile-cpu b/docker/Dockerfile-cpu index bdc0b9ed5e..4a31fa5edf 100644 --- a/docker/Dockerfile-cpu +++ b/docker/Dockerfile-cpu @@ -28,8 +28,15 @@ RUN rm -rf /usr/lib/python3.11/EXTERNALLY-MANAGED # Install pip packages RUN python3 -m pip install --upgrade pip wheel -RUN pip install --no-cache -e . thop --extra-index-url https://download.pytorch.org/whl/cpu - +RUN pip install --no-cache -e '.[export]' thop --extra-index-url https://download.pytorch.org/whl/cpu + +# Run exports to AutoInstall packages +RUN yolo export model=tmp/yolov8n.pt format=edgetpu imgsz=32 +RUN yolo export model=tmp/yolov8n.pt format=ncnn imgsz=32 +# Requires <= Python 3.10, bug with paddlepaddle==2.5.0 +# RUN pip install --no-cache paddlepaddle==2.4.2 x2paddle +# Remove exported models +RUN rm -rf tmp # Usage Examples ------------------------------------------------------------------------------------------------------- diff --git a/docker/Dockerfile-python b/docker/Dockerfile-python index bb3b36cf0a..2664647bc0 100644 --- a/docker/Dockerfile-python +++ b/docker/Dockerfile-python @@ -31,15 +31,12 @@ RUN python3 -m pip install --upgrade pip wheel RUN pip install --no-cache -e '.[export]' thop --extra-index-url https://download.pytorch.org/whl/cpu # Run exports to AutoInstall packages -WORKDIR /tmp_exports -RUN yolo export format=edgetpu imgsz=32 -RUN yolo export format=ncnn imgsz=32 +RUN yolo export model=tmp/yolov8n.pt format=edgetpu imgsz=32 +RUN yolo export model=tmp/yolov8n.pt format=ncnn imgsz=32 # Requires <= Python 3.10, bug with paddlepaddle==2.5.0 RUN pip install --no-cache paddlepaddle==2.4.2 x2paddle - -# Reset workdir -WORKDIR /usr/src/ultralytics -RUN rm -rf /tmp_exports +# Remove exported models +RUN rm -rf tmp # Usage Examples ------------------------------------------------------------------------------------------------------- diff --git a/docs/modes/benchmark.md b/docs/modes/benchmark.md index d8bab15a4d..a63aa15dc6 100644 --- a/docs/modes/benchmark.md +++ b/docs/modes/benchmark.md @@ -40,18 +40,18 @@ full list of export arguments. ## Arguments -Arguments such as `model`, `data`, `imgsz`, `half`, `device`, and `hard_fail` provide users with the flexibility to fine-tune +Arguments such as `model`, `data`, `imgsz`, `half`, `device`, and `verbose` provide users with the flexibility to fine-tune the benchmarks to their specific needs and compare the performance of different export formats with ease. -| Key | Value | Description | -|-------------|---------|----------------------------------------------------------------------------| -| `model` | `None` | path to model file, i.e. yolov8n.pt, yolov8n.yaml | -| `data` | `None` | path to yaml referencing the benchmarking dataset (under `val` label) | -| `imgsz` | `640` | image size as scalar or (h, w) list, i.e. (640, 480) | -| `half` | `False` | FP16 quantization | -| `int8` | `False` | INT8 quantization | -| `device` | `None` | device to run on, i.e. cuda device=0 or device=0,1,2,3 or device=cpu | -| `hard_fail` | `False` | do not continue on error (bool), or val floor threshold (float) | +| Key | Value | Description | +|-----------|---------|-----------------------------------------------------------------------| +| `model` | `None` | path to model file, i.e. yolov8n.pt, yolov8n.yaml | +| `data` | `None` | path to yaml referencing the benchmarking dataset (under `val` label) | +| `imgsz` | `640` | image size as scalar or (h, w) list, i.e. (640, 480) | +| `half` | `False` | FP16 quantization | +| `int8` | `False` | INT8 quantization | +| `device` | `None` | device to run on, i.e. cuda device=0 or device=0,1,2,3 or device=cpu | +| `verbose` | `False` | do not continue on error (bool), or val floor threshold (float) | ## Export Formats diff --git a/ultralytics/engine/model.py b/ultralytics/engine/model.py index 69f7606957..b74a207775 100644 --- a/ultralytics/engine/model.py +++ b/ultralytics/engine/model.py @@ -319,7 +319,11 @@ class YOLO: overrides.update(kwargs) overrides['mode'] = 'benchmark' overrides = {**DEFAULT_CFG_DICT, **overrides} # fill in missing overrides keys with defaults - return benchmark(model=self, imgsz=overrides['imgsz'], half=overrides['half'], device=overrides['device']) + return benchmark(model=self, + imgsz=overrides['imgsz'], + half=overrides['half'], + device=overrides['device'], + verbose=overrides['verbose']) def export(self, **kwargs): """ diff --git a/ultralytics/utils/benchmarks.py b/ultralytics/utils/benchmarks.py index eaea7176a6..7a67e4f0c3 100644 --- a/ultralytics/utils/benchmarks.py +++ b/ultralytics/utils/benchmarks.py @@ -26,6 +26,7 @@ ncnn | `ncnn` | yolov8n_ncnn_model/ import glob import platform +import sys import time from pathlib import Path @@ -49,7 +50,7 @@ def benchmark(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt', half=False, int8=False, device='cpu', - hard_fail=False): + verbose=False): """ Benchmark a YOLO model across different formats for speed and accuracy. @@ -61,7 +62,7 @@ def benchmark(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt', half (bool, optional): Use half-precision for the model if True. Default is False. int8 (bool, optional): Use int8-precision for the model if True. Default is False. device (str, optional): Device to run the benchmark on, either 'cpu' or 'cuda'. Default is 'cpu'. - hard_fail (bool | float | optional): If True or a float, assert benchmarks pass with given metric. + verbose (bool | float | optional): If True or a float, assert benchmarks pass with given metric. Default is False. Returns: @@ -84,6 +85,8 @@ def benchmark(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt', assert i != 9 or LINUX, 'Edge TPU export only supported on Linux' if i == 10: assert MACOS or LINUX, 'TF.js export only supported on macOS and Linux' + elif i == 11: + assert sys.version_info < (3, 11), 'PaddlePaddle export only supported on Python<=3.10' if 'cpu' in device.type: assert cpu, 'inference not supported on CPU' if 'cuda' in device.type: @@ -121,7 +124,7 @@ def benchmark(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt', metric, speed = results.results_dict[key], results.speed['inference'] y.append([name, '✅', round(file_size(filename), 1), round(metric, 4), round(speed, 2)]) except Exception as e: - if hard_fail: + if verbose: assert type(e) is AssertionError, f'Benchmark failure for {name}: {e}' LOGGER.warning(f'ERROR ❌️ Benchmark failure for {name}: {e}') y.append([name, emoji, round(file_size(filename), 1), None, None]) # mAP, t_inference @@ -136,9 +139,9 @@ def benchmark(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt', with open('benchmarks.log', 'a', errors='ignore', encoding='utf-8') as f: f.write(s) - if hard_fail and isinstance(hard_fail, float): + if verbose and isinstance(verbose, float): metrics = df[key].array # values to compare to floor - floor = hard_fail # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n + floor = verbose # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n assert all(x > floor for x in metrics if pd.notna(x)), f'Benchmark failure: metric(s) < floor {floor}' return df diff --git a/ultralytics/utils/torch_utils.py b/ultralytics/utils/torch_utils.py index 500b519b79..12eeecfaa4 100644 --- a/ultralytics/utils/torch_utils.py +++ b/ultralytics/utils/torch_utils.py @@ -28,7 +28,7 @@ TORCHVISION_0_10 = check_version(torchvision.__version__, '0.10.0') TORCH_1_9 = check_version(torch.__version__, '1.9.0') TORCH_1_11 = check_version(torch.__version__, '1.11.0') TORCH_1_12 = check_version(torch.__version__, '1.12.0') -TORCH_2_0 = check_version(torch.__version__, minimum='2.0') +TORCH_2_0 = check_version(torch.__version__, '2.0.0') @contextmanager