`ultralytics 8.3.69` New Results `to_sql()` method for SQL format (#18921)

Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com>
Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>
pull/18935/head v8.3.69
Muhammad Rizwan Munawar 3 weeks ago committed by GitHub
parent 813511a232
commit 748c380fc5
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 7
      .github/workflows/ci.yml
  2. 7
      docs/en/modes/predict.md
  3. 2
      ultralytics/__init__.py
  4. 69
      ultralytics/engine/results.py
  5. 6
      ultralytics/utils/benchmarks.py

@ -98,7 +98,8 @@ jobs:
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, windows-latest, macos-15, ubuntu-24.04-arm]
# Temporarily disable windows-latest due to https://github.com/ultralytics/ultralytics/actions/runs/13020330819/job/36319338854?pr=18921
os: [ubuntu-latest, macos-15, ubuntu-24.04-arm]
python-version: ["3.11"]
model: [yolo11n]
steps:
@ -151,7 +152,9 @@ jobs:
- name: Benchmark Summary
run: |
cat benchmarks.log
echo "$(cat benchmarks.log)" >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
cat benchmarks.log >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
Tests:
if: github.event_name != 'workflow_dispatch' || github.event.inputs.tests == 'true'

@ -508,7 +508,12 @@ All Ultralytics `predict()` calls will return a list of `Results` objects:
| `verbose()` | `str` | Return log string for each task. |
| `save_txt()` | `None` | Save predictions into a txt file. |
| `save_crop()` | `None` | Save cropped predictions to `save_dir/cls/file_name.jpg`. |
| `tojson()` | `str` | Convert the object to JSON format. |
| `summary()` | `List[Dict]` | A list of dictionaries, each containing summarized information for results |
| `to_df()` | `DataFrame` | Convert the results to Pandas Dataframe. |
| `to_csv()` | `str` | Convert the result to CSV (comma separated values) format. |
| `to_xml()` | `str` | Convert the results to XML (Extensible Markup Language) format. |
| `to_json()` | `str` | Convert the results to JSON format. |
| `to_sql()` | `None` | Dump the results into the SQL database. |
For more details see the [`Results` class documentation](../reference/engine/results.md).

@ -1,6 +1,6 @@
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
__version__ = "8.3.68"
__version__ = "8.3.69"
import os

@ -937,6 +937,75 @@ class Results(SimpleClass):
return json.dumps(self.summary(normalize=normalize, decimals=decimals), indent=2)
def to_sql(self, table_name="results", normalize=False, decimals=5, db_path="results.db"):
"""
Converts detection results to an SQL-compatible format.
This method serializes the detection results into a format compatible with SQL databases.
It includes information about detected objects such as bounding boxes, class names, confidence scores,
and optionally segmentation masks, keypoints or oriented bounding boxes.
Args:
table_name (str): Name of the SQL table where the data will be inserted. Defaults to "detection_results".
normalize (bool): Whether to normalize the bounding box coordinates by the image dimensions.
If True, coordinates will be returned as float values between 0 and 1. Defaults to False.
decimals (int): Number of decimal places to round the bounding boxes values to. Defaults to 5.
db_path (str): Path to the SQLite database file. Defaults to "results.db".
Examples:
>>> results = model("path/to/image.jpg")
>>> results[0].to_sql()
>>> print("SQL data written successfully.")
"""
import json
import sqlite3
# Convert results to a list of dictionaries
data = self.summary(normalize=normalize, decimals=decimals)
if not data:
LOGGER.warning(" No results to save to SQL. Results dict is empty")
return
# Connect to the SQLite database
conn = sqlite3.connect(db_path)
cursor = conn.cursor()
# Create table if it doesn't exist
columns = (
"id INTEGER PRIMARY KEY AUTOINCREMENT, class_name TEXT, confidence REAL, "
"box TEXT, masks TEXT, kpts TEXT, obb TEXT"
)
cursor.execute(f"CREATE TABLE IF NOT EXISTS {table_name} ({columns})")
# Insert data into the table
for i, item in enumerate(data):
detect, obb = None, None # necessary to reinit these variables inside for loop to avoid duplication
class_name = item.get("name")
box = item.get("box", {})
# Serialize the box as JSON for 'detect' and 'obb' based on key presence
if all(key in box for key in ["x1", "y1", "x2", "y2"]) and not any(key in box for key in ["x3", "x4"]):
detect = json.dumps(box)
if all(key in box for key in ["x1", "y1", "x2", "y2", "x3", "x4"]):
obb = json.dumps(box)
cursor.execute(
f"INSERT INTO {table_name} (class_name, confidence, box, masks, kpts, obb) VALUES (?, ?, ?, ?, ?, ?)",
(
class_name,
item.get("confidence"),
detect,
json.dumps(item.get("segments", {}).get("x", [])),
json.dumps(item.get("keypoints", {}).get("x", [])),
obb,
),
)
# Commit and close the connection
conn.commit()
conn.close()
LOGGER.info(f"✅ Detection results successfully written to SQL table '{table_name}' in database '{db_path}'.")
class Boxes(BaseTensor):
"""

@ -173,10 +173,10 @@ def benchmark(
df = pd.DataFrame(y, columns=["Format", "Status❔", "Size (MB)", key, "Inference time (ms/im)", "FPS"])
name = model.model_name
s = f"\nBenchmarks complete for {name} on {data} at imgsz={imgsz} ({time.time() - t0:.2f}s)\n{df.fillna('-')}\n"
dt = time.time() - t0
legend = "Benchmarks legend: - ✅ Success - ❎ Export passed but validation failed - ❌ Export failed"
s = f"\nBenchmarks complete for {name} on {data} at imgsz={imgsz} ({dt:.2f}s)\n{legend}\n{df.fillna('-')}\n"
LOGGER.info(s)
LOGGER.info("Status legends:")
LOGGER.info("✅ - Benchmark passed | ❎ - Export passed but validation failed | ❌ - Export failed")
with open("benchmarks.log", "a", errors="ignore", encoding="utf-8") as f:
f.write(s)

Loading…
Cancel
Save