diff --git a/pyproject.toml b/pyproject.toml index 6a19203f03..40c7f6bf2b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ export = [ "onnx>=1.12.0", # ONNX export "coremltools>=7.0; platform_system != 'Windows' and python_version <= '3.11'", # CoreML supported on macOS and Linux "scikit-learn>=1.3.2; platform_system != 'Windows' and python_version <= '3.11'", # CoreML k-means quantization - "openvino>=2024.0.0", # OpenVINO export + "openvino>=2024.0.0,<2025.0.0", # OpenVINO export "tensorflow>=2.0.0", # TF bug https://github.com/ultralytics/ultralytics/issues/5161 "tensorflowjs>=3.9.0", # TF.js export, automatically installs tensorflow "tensorstore>=0.1.63; platform_machine == 'aarch64' and python_version >= '3.9'", # for TF Raspberry Pi exports diff --git a/ultralytics/engine/exporter.py b/ultralytics/engine/exporter.py index 16111ea840..440a21650e 100644 --- a/ultralytics/engine/exporter.py +++ b/ultralytics/engine/exporter.py @@ -570,7 +570,7 @@ class Exporter: @try_export def export_openvino(self, prefix=colorstr("OpenVINO:")): """YOLO OpenVINO export.""" - check_requirements("openvino>=2024.5.0") + check_requirements("openvino>=2024.0.0,<2025.0.0") import openvino as ov LOGGER.info(f"\n{prefix} starting export with openvino {ov.__version__}...") @@ -592,7 +592,7 @@ class Exporter: if self.model.task != "classify": ov_model.set_rt_info("fit_to_window_letterbox", ["model_info", "resize_type"]) - ov.runtime.save_model(ov_model, file, compress_to_fp16=self.args.half) + ov.save_model(ov_model, file, compress_to_fp16=self.args.half) yaml_save(Path(file).parent / "metadata.yaml", self.metadata) # add metadata.yaml if self.args.int8: diff --git a/ultralytics/nn/autobackend.py b/ultralytics/nn/autobackend.py index 7629fb7ca8..89744ee46d 100644 --- a/ultralytics/nn/autobackend.py +++ b/ultralytics/nn/autobackend.py @@ -244,7 +244,7 @@ class AutoBackend(nn.Module): # OpenVINO elif xml: LOGGER.info(f"Loading {w} for OpenVINO inference...") - check_requirements("openvino>=2024.0.0") + check_requirements("openvino>=2024.0.0,<2025.0.0") import openvino as ov core = ov.Core() @@ -600,7 +600,7 @@ class AutoBackend(nn.Module): results[userdata] = request.results # Create AsyncInferQueue, set the callback and start asynchronous inference for each input image - async_queue = self.ov.runtime.AsyncInferQueue(self.ov_compiled_model) + async_queue = self.ov.AsyncInferQueue(self.ov_compiled_model) async_queue.set_callback(callback) for i in range(n): # Start async inference with userdata=i to specify the position in results list