`ultralytics 8.1.25` fix `**kwargs: (dict)` warnings (#8815)

Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com>
pull/8819/head
Glenn Jocher 9 months ago committed by GitHub
parent f8f62bc649
commit 2bc605f32a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 2
      README.md
  2. 4
      docs/en/datasets/obb/dota-v2.md
  3. 2
      ultralytics/__init__.py
  4. 2
      ultralytics/cfg/default.yaml
  5. 18
      ultralytics/engine/model.py
  6. 2
      ultralytics/engine/trainer.py
  7. 4
      ultralytics/hub/utils.py
  8. 2
      ultralytics/nn/modules/transformer.py
  9. 2
      ultralytics/nn/modules/utils.py
  10. 2
      ultralytics/utils/__init__.py
  11. 2
      ultralytics/utils/downloads.py
  12. 2
      ultralytics/utils/patches.py

@ -200,7 +200,7 @@ See [OBB Docs](https://docs.ultralytics.com/tasks/obb/) for usage examples with
| [YOLOv8l-obb](https://github.com/ultralytics/assets/releases/download/v8.1.0/yolov8l-obb.pt) | 1024 | 80.7 | 1278.42 | 11.83 | 44.5 | 433.8 |
| [YOLOv8x-obb](https://github.com/ultralytics/assets/releases/download/v8.1.0/yolov8x-obb.pt) | 1024 | 81.36 | 1759.10 | 13.23 | 69.5 | 676.7 |
- **mAP<sup>test</sup>** values are for single-model multi-scale on [DOTAv1](https://captain-whu.github.io/DOTA/index.html) dataset. <br>Reproduce by `yolo val obb data=DOTAv1.yaml device=0 split=test` and submit merged results to [DOTA evaluation](https://captain-whu.github.io/DOTA/evaluation.html).
- **mAP<sup>test</sup>** values are for single-model multiscale on [DOTAv1](https://captain-whu.github.io/DOTA/index.html) dataset. <br>Reproduce by `yolo val obb data=DOTAv1.yaml device=0 split=test` and submit merged results to [DOTA evaluation](https://captain-whu.github.io/DOTA/evaluation.html).
- **Speed** averaged over DOTAv1 val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. <br>Reproduce by `yolo val obb data=DOTAv1.yaml batch=1 device=0|cpu`
</details>

@ -81,14 +81,14 @@ To train DOTA dataset, we split original DOTA images with high-resolution into i
split_trainval(
data_root='path/to/DOTAv1.0/',
save_dir='path/to/DOTAv1.0-split/',
rates=[0.5, 1.0, 1.5], # multi-scale
rates=[0.5, 1.0, 1.5], # multiscale
gap=500
)
# split test set, without labels.
split_test(
data_root='path/to/DOTAv1.0/',
save_dir='path/to/DOTAv1.0-split/',
rates=[0.5, 1.0, 1.5], # multi-scale
rates=[0.5, 1.0, 1.5], # multiscale
gap=500
)
```

@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
__version__ = "8.1.24"
__version__ = "8.1.25"
from ultralytics.data.explorer.explorer import Explorer
from ultralytics.models import RTDETR, SAM, YOLO, YOLOWorld

@ -34,7 +34,7 @@ amp: True # (bool) Automatic Mixed Precision (AMP) training, choices=[True, Fals
fraction: 1.0 # (float) dataset fraction to train on (default is 1.0, all images in train set)
profile: False # (bool) profile ONNX and TensorRT speeds during training for loggers
freeze: None # (int | list, optional) freeze first n layers, or freeze list of layer indices during training
multi_scale: False # (bool) Whether to use multi-scale during training
multi_scale: False # (bool) Whether to use multiscale during training
# Segmentation
overlap_mask: True # (bool) masks should overlap during training (segment train only)
mask_ratio: 4 # (int) mask downsample ratio (segment train only)

@ -161,7 +161,7 @@ class Model(nn.Module):
Defaults to None.
stream (bool, optional): If True, treats the input source as a continuous stream for predictions.
Defaults to False.
**kwargs (dict): Additional keyword arguments for configuring the prediction process.
**kwargs (any): Additional keyword arguments for configuring the prediction process.
Returns:
(List[ultralytics.engine.results.Results]): A list of prediction results, encapsulated in the Results class.
@ -368,7 +368,7 @@ class Model(nn.Module):
source (str | int | PIL.Image | np.ndarray): The source of the image for generating embeddings.
The source can be a file path, URL, PIL image, numpy array, etc. Defaults to None.
stream (bool): If True, predictions are streamed. Defaults to False.
**kwargs (dict): Additional keyword arguments for configuring the embedding process.
**kwargs (any): Additional keyword arguments for configuring the embedding process.
Returns:
(List[torch.Tensor]): A list containing the image embeddings.
@ -406,7 +406,7 @@ class Model(nn.Module):
stream (bool, optional): Treats the input source as a continuous stream for predictions. Defaults to False.
predictor (BasePredictor, optional): An instance of a custom predictor class for making predictions.
If None, the method uses a default predictor. Defaults to None.
**kwargs (dict): Additional keyword arguments for configuring the prediction process. These arguments allow
**kwargs (any): Additional keyword arguments for configuring the prediction process. These arguments allow
for further customization of the prediction behavior.
Returns:
@ -460,7 +460,7 @@ class Model(nn.Module):
source (str, optional): The input source for object tracking. It can be a file path, URL, or video stream.
stream (bool, optional): Treats the input source as a continuous video stream. Defaults to False.
persist (bool, optional): Persists the trackers between different calls to this method. Defaults to False.
**kwargs (dict): Additional keyword arguments for configuring the tracking process. These arguments allow
**kwargs (any): Additional keyword arguments for configuring the tracking process. These arguments allow
for further customization of the tracking behavior.
Returns:
@ -497,7 +497,7 @@ class Model(nn.Module):
Args:
validator (BaseValidator, optional): An instance of a custom validator class for validating the model. If
None, the method uses a default validator. Defaults to None.
**kwargs (dict): Arbitrary keyword arguments representing the validation configuration. These arguments are
**kwargs (any): Arbitrary keyword arguments representing the validation configuration. These arguments are
used to customize various aspects of the validation process.
Returns:
@ -531,7 +531,7 @@ class Model(nn.Module):
configurable options, users should refer to the 'configuration' section in the documentation.
Args:
**kwargs (dict): Arbitrary keyword arguments to customize the benchmarking process. These are combined with
**kwargs (any): Arbitrary keyword arguments to customize the benchmarking process. These are combined with
default configurations, model-specific arguments, and method defaults.
Returns:
@ -570,7 +570,7 @@ class Model(nn.Module):
possible arguments, refer to the 'configuration' section in the documentation.
Args:
**kwargs (dict): Arbitrary keyword arguments to customize the export process. These are combined with the
**kwargs (any): Arbitrary keyword arguments to customize the export process. These are combined with the
model's overrides and method defaults.
Returns:
@ -607,7 +607,7 @@ class Model(nn.Module):
Args:
trainer (BaseTrainer, optional): An instance of a custom trainer class for training the model. If None, the
method uses a default trainer. Defaults to None.
**kwargs (dict): Arbitrary keyword arguments representing the training configuration. These arguments are
**kwargs (any): Arbitrary keyword arguments representing the training configuration. These arguments are
used to customize various aspects of the training process.
Returns:
@ -679,7 +679,7 @@ class Model(nn.Module):
use_ray (bool): If True, uses Ray Tune for hyperparameter tuning. Defaults to False.
iterations (int): The number of tuning iterations to perform. Defaults to 10.
*args (list): Variable length argument list for additional arguments.
**kwargs (dict): Arbitrary keyword arguments. These are combined with the model's overrides and defaults.
**kwargs (any): Arbitrary keyword arguments. These are combined with the model's overrides and defaults.
Returns:
(dict): A dictionary containing the results of the hyperparameter search.

@ -280,7 +280,7 @@ class BaseTrainer:
# Check imgsz
gs = max(int(self.model.stride.max() if hasattr(self.model, "stride") else 32), 32) # grid size (max stride)
self.args.imgsz = check_imgsz(self.args.imgsz, stride=gs, floor=gs, max_dim=1)
self.stride = gs # for multi-scale training
self.stride = gs # for multiscale training
# Batch size
if self.batch_size == -1 and RANK == -1: # single-GPU only, estimate best batch size

@ -84,7 +84,7 @@ def requests_with_progress(method, url, **kwargs):
Args:
method (str): The HTTP method to use (e.g. 'GET', 'POST').
url (str): The URL to send the request to.
**kwargs (dict): Additional keyword arguments to pass to the underlying `requests.request` function.
**kwargs (any): Additional keyword arguments to pass to the underlying `requests.request` function.
Returns:
(requests.Response): The response object from the HTTP request.
@ -122,7 +122,7 @@ def smart_request(method, url, retry=3, timeout=30, thread=True, code=-1, verbos
code (int, optional): An identifier for the request, used for logging purposes. Default is -1.
verbose (bool, optional): A flag to determine whether to print out to console or not. Default is True.
progress (bool, optional): Whether to show a progress bar during the request. Default is False.
**kwargs (dict): Keyword arguments to be passed to the requests function specified in method.
**kwargs (any): Keyword arguments to be passed to the requests function specified in method.
Returns:
(requests.Response): The HTTP response object. If the request is executed in a separate thread, returns None.

@ -215,7 +215,7 @@ class LayerNorm2d(nn.Module):
class MSDeformAttn(nn.Module):
"""
Multi-Scale Deformable Attention Module based on Deformable-DETR and PaddleDetection implementations.
Multiscale Deformable Attention Module based on Deformable-DETR and PaddleDetection implementations.
https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/ops/modules/ms_deform_attn.py
"""

@ -46,7 +46,7 @@ def multi_scale_deformable_attn_pytorch(
attention_weights: torch.Tensor,
) -> torch.Tensor:
"""
Multi-scale deformable attention.
Multiscale deformable attention.
https://github.com/IDEA-Research/detrex/blob/main/detrex/layers/multi_scale_deform_attn.py
"""

@ -113,7 +113,7 @@ class TQDM(tqdm_original):
Args:
*args (list): Positional arguments passed to original tqdm.
**kwargs (dict): Keyword arguments, with custom defaults applied.
**kwargs (any): Keyword arguments, with custom defaults applied.
"""
def __init__(self, *args, **kwargs):

@ -410,7 +410,7 @@ def attempt_download_asset(file, repo="ultralytics/assets", release="v8.1.0", **
file (str | Path): The filename or file path to be downloaded.
repo (str, optional): The GitHub repository in the format 'owner/repo'. Defaults to 'ultralytics/assets'.
release (str, optional): The specific release version to be downloaded. Defaults to 'v8.1.0'.
**kwargs (dict): Additional keyword arguments for the download process.
**kwargs (any): Additional keyword arguments for the download process.
Returns:
(str): The path to the downloaded file.

@ -68,7 +68,7 @@ def torch_save(*args, use_dill=True, **kwargs):
Args:
*args (tuple): Positional arguments to pass to torch.save.
use_dill (bool): Whether to try using dill for serialization if available. Defaults to True.
**kwargs (dict): Keyword arguments to pass to torch.save.
**kwargs (any): Keyword arguments to pass to torch.save.
"""
try:
assert use_dill

Loading…
Cancel
Save