From 22ebd44f62791cfd4de2a24de15ce05a13c1447a Mon Sep 17 00:00:00 2001 From: Kirill Lalayants <91465467+lalayants@users.noreply.github.com> Date: Thu, 17 Oct 2024 03:57:01 +0300 Subject: [PATCH] `ultralytics 8.3.15` new TPU device-selection ability (#16576) Co-authored-by: UltralyticsAssistant Co-authored-by: Ultralytics Assistant <135830346+UltralyticsAssistant@users.noreply.github.com> Co-authored-by: Skillnoob <78843978+Skillnoob@users.noreply.github.com> Co-authored-by: Laughing <61612323+Laughing-q@users.noreply.github.com> Co-authored-by: Glenn Jocher --- .../guides/coral-edge-tpu-on-raspberry-pi.md | 51 ++++++++++++------- ultralytics/__init__.py | 2 +- ultralytics/nn/autobackend.py | 8 ++- ultralytics/utils/torch_utils.py | 2 +- 4 files changed, 42 insertions(+), 21 deletions(-) diff --git a/docs/en/guides/coral-edge-tpu-on-raspberry-pi.md b/docs/en/guides/coral-edge-tpu-on-raspberry-pi.md index 5f6fceb781..e2d2a03f45 100644 --- a/docs/en/guides/coral-edge-tpu-on-raspberry-pi.md +++ b/docs/en/guides/coral-edge-tpu-on-raspberry-pi.md @@ -85,7 +85,7 @@ After installing the runtime, you need to plug in your Coral Edge TPU into a USB To use the Edge TPU, you need to convert your model into a compatible format. It is recommended that you run export on Google Colab, x86_64 Linux machine, using the official [Ultralytics Docker container](docker-quickstart.md), or using [Ultralytics HUB](../hub/quickstart.md), since the Edge TPU compiler is not available on ARM. See the [Export Mode](../modes/export.md) for the available arguments. -!!! note "Exporting the model" +!!! example "Exporting the model" === "Python" @@ -105,13 +105,27 @@ To use the Edge TPU, you need to convert your model into a compatible format. It yolo export model=path/to/model.pt format=edgetpu # Export an official model or custom model ``` -The exported model will be saved in the `_saved_model/` folder with the name `_full_integer_quant_edgetpu.tflite`. +The exported model will be saved in the `_saved_model/` folder with the name `_full_integer_quant_edgetpu.tflite`. It is important that your model ends with the suffix `_edgetpu.tflite`, otherwise ultralytics doesn't know that you're using a Edge TPU model. ## Running the model -After exporting your model, you can run inference with it using the following code: +Before you can actually run the model, you will need to install the correct libraries. -!!! note "Running the model" +If `tensorflow` is installed, uninstall tensorflow with the following command: + +```bash +pip uninstall tensorflow tensorflow-aarch64 +``` + +Then install/update `tflite-runtime`: + +```bash +pip install -U tflite-runtime +``` + +Now you can run inference using the following code: + +!!! example "Running the model" === "Python" @@ -119,7 +133,7 @@ After exporting your model, you can run inference with it using the following co from ultralytics import YOLO # Load a model - model = YOLO("path/to/edgetpu_model.tflite") # Load an official model or custom model + model = YOLO("path/to/_full_integer_quant_edgetpu.tflite") # Load an official model or custom model # Run Prediction model.predict("path/to/source.png") @@ -128,27 +142,30 @@ After exporting your model, you can run inference with it using the following co === "CLI" ```bash - yolo predict model=path/to/edgetpu_model.tflite source=path/to/source.png # Load an official model or custom model + yolo predict model=path/to/_full_integer_quant_edgetpu.tflite source=path/to/source.png # Load an official model or custom model ``` Find comprehensive information on the [Predict](../modes/predict.md) page for full prediction mode details. -???+ warning "Important" +!!! note "Inference with multiple Edge TPUs" - You should run the model using `tflite-runtime` and not `tensorflow`. - If `tensorflow` is installed, uninstall tensorflow with the following command: + If you have multiple Edge TPUs you can use the following code to select a specific TPU. - ```bash - pip uninstall tensorflow tensorflow-aarch64 - ``` + === "Python" + + ```python + from ultralytics import YOLO - Then install/update `tflite-runtime`: + # Load a model + model = YOLO("path/to/_full_integer_quant_edgetpu.tflite") # Load an official model or custom model - ``` - pip install -U tflite-runtime - ``` + # Run Prediction + model.predict("path/to/source.png") # Inference defaults to the first TPU + + model.predict("path/to/source.png", device="tpu:0") # Select the first TPU - If you want a `tflite-runtime` wheel for `tensorflow` 2.15.0 download it from [here](https://github.com/feranick/TFlite-builds/releases) and install it using `pip` or your package manager of choice. + model.predict("path/to/source.png", device="tpu:1") # Select the second TPU + ``` ## FAQ diff --git a/ultralytics/__init__.py b/ultralytics/__init__.py index 99cfc59deb..d83c00a02b 100644 --- a/ultralytics/__init__.py +++ b/ultralytics/__init__.py @@ -1,6 +1,6 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license -__version__ = "8.3.14" +__version__ = "8.3.15" import os diff --git a/ultralytics/nn/autobackend.py b/ultralytics/nn/autobackend.py index 7d4dbb8cb4..9a8678f184 100644 --- a/ultralytics/nn/autobackend.py +++ b/ultralytics/nn/autobackend.py @@ -336,11 +336,15 @@ class AutoBackend(nn.Module): Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime - LOGGER.info(f"Loading {w} for TensorFlow Lite Edge TPU inference...") + device = device[3:] if str(device).startswith("tpu") else ":0" + LOGGER.info(f"Loading {w} on device {device[1:]} for TensorFlow Lite Edge TPU inference...") delegate = {"Linux": "libedgetpu.so.1", "Darwin": "libedgetpu.1.dylib", "Windows": "edgetpu.dll"}[ platform.system() ] - interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)]) + interpreter = Interpreter( + model_path=w, + experimental_delegates=[load_delegate(delegate, options={"device": device})], + ) else: # TFLite LOGGER.info(f"Loading {w} for TensorFlow Lite inference...") interpreter = Interpreter(model_path=w) # load TFLite model diff --git a/ultralytics/utils/torch_utils.py b/ultralytics/utils/torch_utils.py index 0143b933d8..0dbc728e23 100644 --- a/ultralytics/utils/torch_utils.py +++ b/ultralytics/utils/torch_utils.py @@ -163,7 +163,7 @@ def select_device(device="", batch=0, newline=False, verbose=True): Note: Sets the 'CUDA_VISIBLE_DEVICES' environment variable for specifying which GPUs to use. """ - if isinstance(device, torch.device): + if isinstance(device, torch.device) or str(device).startswith("tpu"): return device s = f"Ultralytics {__version__} 🚀 Python-{PYTHON_VERSION} torch-{torch.__version__} "