PyUpgrade 3.8 updates (#15941)

Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
pull/14872/head
Glenn Jocher 2 months ago committed by GitHub
parent ea13dc6208
commit 9ec8e9acbf
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 2
      docs/build_docs.py
  2. 2
      ultralytics/data/converter.py
  3. 2
      ultralytics/data/dataset.py
  4. 2
      ultralytics/data/split_dota.py
  5. 2
      ultralytics/engine/predictor.py
  6. 5
      ultralytics/engine/validator.py
  7. 17
      ultralytics/hub/utils.py
  8. 2
      ultralytics/models/fastsam/predict.py
  9. 2
      ultralytics/models/sam/modules/blocks.py
  10. 4
      ultralytics/nn/modules/block.py
  11. 2
      ultralytics/solutions/parking_management.py
  12. 6
      ultralytics/utils/benchmarks.py
  13. 2
      ultralytics/utils/metrics.py

@ -164,7 +164,7 @@ def update_docs_html():
# Convert plaintext links to HTML hyperlinks
files_modified = 0
for html_file in tqdm(SITE.rglob("*.html"), desc="Converting plaintext links"):
with open(html_file, "r", encoding="utf-8") as file:
with open(html_file, encoding="utf-8") as file:
content = file.read()
updated_content = convert_plaintext_links_to_html(content)
if updated_content != content:

@ -490,7 +490,7 @@ def convert_dota_to_yolo_obb(dota_root_path: str):
normalized_coords = [
coords[i] / image_width if i % 2 == 0 else coords[i] / image_height for i in range(8)
]
formatted_coords = ["{:.6g}".format(coord) for coord in normalized_coords]
formatted_coords = [f"{coord:.6g}" for coord in normalized_coords]
g.write(f"{class_idx} {' '.join(formatted_coords)}\n")
for phase in ["train", "val"]:

@ -296,7 +296,7 @@ class GroundingDataset(YOLODataset):
"""Loads annotations from a JSON file, filters, and normalizes bounding boxes for each image."""
labels = []
LOGGER.info("Loading annotation file...")
with open(self.json_file, "r") as f:
with open(self.json_file) as f:
annotations = json.load(f)
images = {f'{x["id"]:d}': x for x in annotations["images"]}
img_to_anns = defaultdict(list)

@ -193,7 +193,7 @@ def crop_and_save(anno, windows, window_objs, im_dir, lb_dir, allow_background_i
with open(Path(lb_dir) / f"{new_name}.txt", "w") as f:
for lb in label:
formatted_coords = ["{:.6g}".format(coord) for coord in lb[1:]]
formatted_coords = [f"{coord:.6g}" for coord in lb[1:]]
f.write(f"{int(lb[0])} {' '.join(formatted_coords)}\n")

@ -328,7 +328,7 @@ class BasePredictor:
frame = int(match[1]) if match else None # 0 if frame undetermined
self.txt_path = self.save_dir / "labels" / (p.stem + ("" if self.dataset.mode == "image" else f"_{frame}"))
string += "%gx%g " % im.shape[2:]
string += "{:g}x{:g} ".format(*im.shape[2:])
result = self.results[i]
result.save_dir = self.save_dir.__str__() # used in other locations
string += f"{result.verbose()}{result.speed['inference']:.1f}ms"

@ -202,8 +202,9 @@ class BaseValidator:
return {k: round(float(v), 5) for k, v in results.items()} # return results as 5 decimal place floats
else:
LOGGER.info(
"Speed: %.1fms preprocess, %.1fms inference, %.1fms loss, %.1fms postprocess per image"
% tuple(self.speed.values())
"Speed: {:.1f}ms preprocess, {:.1f}ms inference, {:.1f}ms loss, {:.1f}ms postprocess per image".format(
*tuple(self.speed.values())
)
)
if self.args.save_json and self.jdict:
with open(str(self.save_dir / "predictions.json"), "w") as f:

@ -55,23 +55,22 @@ def request_with_credentials(url: str) -> any:
display.display(
display.Javascript(
"""
window._hub_tmp = new Promise((resolve, reject) => {
f"""
window._hub_tmp = new Promise((resolve, reject) => {{
const timeout = setTimeout(() => reject("Failed authenticating existing browser session"), 5000)
fetch("%s", {
fetch("{url}", {{
method: 'POST',
credentials: 'include'
})
}})
.then((response) => resolve(response.json()))
.then((json) => {
.then((json) => {{
clearTimeout(timeout);
}).catch((err) => {
}}).catch((err) => {{
clearTimeout(timeout);
reject(err);
});
});
}});
}});
"""
% url
)
)
return output.eval_js("_hub_tmp")

@ -100,7 +100,7 @@ class FastSAMPredictor(SegmentationPredictor):
texts = [texts]
crop_ims, filter_idx = [], []
for i, b in enumerate(result.boxes.xyxy.tolist()):
x1, y1, x2, y2 = [int(x) for x in b]
x1, y1, x2, y2 = (int(x) for x in b)
if masks[i].sum() <= 100:
filter_idx.append(i)
continue

@ -35,7 +35,7 @@ class DropPath(nn.Module):
def __init__(self, drop_prob=0.0, scale_by_keep=True):
"""Initialize DropPath module for stochastic depth regularization during training."""
super(DropPath, self).__init__()
super().__init__()
self.drop_prob = drop_prob
self.scale_by_keep = scale_by_keep

@ -672,7 +672,7 @@ class CBLinear(nn.Module):
def __init__(self, c1, c2s, k=1, s=1, p=None, g=1):
"""Initializes the CBLinear module, passing inputs unchanged."""
super(CBLinear, self).__init__()
super().__init__()
self.c2s = c2s
self.conv = nn.Conv2d(c1, sum(c2s), k, s, autopad(k, p), groups=g, bias=True)
@ -686,7 +686,7 @@ class CBFuse(nn.Module):
def __init__(self, idx):
"""Initializes CBFuse module with layer index for selective feature fusion."""
super(CBFuse, self).__init__()
super().__init__()
self.idx = idx
def forward(self, xs):

@ -210,7 +210,7 @@ class ParkingManagement:
Args:
json_file (str): file that have all parking slot points
"""
with open(json_file, "r") as f:
with open(json_file) as f:
return json.load(f)
def process_data(self, json_data, im0, boxes, clss):

@ -198,7 +198,7 @@ class RF100Benchmark:
os.mkdir("ultralytics-benchmarks")
safe_download("https://github.com/ultralytics/assets/releases/download/v0.0.0/datasets_links.txt")
with open(ds_link_txt, "r") as file:
with open(ds_link_txt) as file:
for line in file:
try:
_, url, workspace, project, version = re.split("/+", line.strip())
@ -222,7 +222,7 @@ class RF100Benchmark:
Args:
path (str): YAML file path.
"""
with open(path, "r") as file:
with open(path) as file:
yaml_data = yaml.safe_load(file)
yaml_data["train"] = "train/images"
yaml_data["val"] = "valid/images"
@ -242,7 +242,7 @@ class RF100Benchmark:
skip_symbols = ["🚀", "", "💡", ""]
with open(yaml_path) as stream:
class_names = yaml.safe_load(stream)["names"]
with open(val_log_file, "r", encoding="utf-8") as f:
with open(val_log_file, encoding="utf-8") as f:
lines = f.readlines()
eval_lines = []
for line in lines:

@ -460,7 +460,7 @@ def plot_pr_curve(px, py, ap, save_dir=Path("pr_curve.png"), names={}, on_plot=N
else:
ax.plot(px, py, linewidth=1, color="grey") # plot(recall, precision)
ax.plot(px, py.mean(1), linewidth=3, color="blue", label="all classes %.3f mAP@0.5" % ap[:, 0].mean())
ax.plot(px, py.mean(1), linewidth=3, color="blue", label=f"all classes {ap[:, 0].mean():.3f} mAP@0.5")
ax.set_xlabel("Recall")
ax.set_ylabel("Precision")
ax.set_xlim(0, 1)

Loading…
Cancel
Save