Shorten module paths with new 'nn' dir (#96)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>pull/100/head
parent
4fb04be20b
commit
48cffa176e
18 changed files with 141 additions and 147 deletions
@ -1 +1,5 @@ |
||||
__version__ = "8.0.0.dev0" |
||||
|
||||
from ultralytics.yolo.engine.model import YOLO |
||||
|
||||
__all__ = ["__version__", "YOLO"] # allow simpler import |
||||
|
@ -1,7 +0,0 @@ |
||||
from ultralytics.yolo import v8 |
||||
|
||||
from .engine.model import YOLO |
||||
from .engine.trainer import BaseTrainer |
||||
from .engine.validator import BaseValidator |
||||
|
||||
__all__ = ["BaseTrainer", "BaseValidator", "YOLO"] # allow simpler import |
@ -1,109 +0,0 @@ |
||||
import contextlib |
||||
|
||||
import torchvision |
||||
|
||||
from ultralytics.yolo.utils.downloads import attempt_download |
||||
from ultralytics.yolo.utils.modeling.modules import * |
||||
|
||||
|
||||
def attempt_load_weights(weights, device=None, inplace=True, fuse=True): |
||||
# Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a |
||||
|
||||
model = Ensemble() |
||||
for w in weights if isinstance(weights, list) else [weights]: |
||||
ckpt = torch.load(attempt_download(w), map_location='cpu') # load |
||||
ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model |
||||
|
||||
# Model compatibility updates |
||||
if not hasattr(ckpt, 'stride'): |
||||
ckpt.stride = torch.tensor([32.]) |
||||
if hasattr(ckpt, 'names') and isinstance(ckpt.names, (list, tuple)): |
||||
ckpt.names = dict(enumerate(ckpt.names)) # convert to dict |
||||
|
||||
model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode |
||||
|
||||
# Module compatibility updates |
||||
for m in model.modules(): |
||||
t = type(m) |
||||
if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Segment): |
||||
m.inplace = inplace # torch 1.7.0 compatibility |
||||
elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'): |
||||
m.recompute_scale_factor = None # torch 1.11.0 compatibility |
||||
|
||||
# Return model |
||||
if len(model) == 1: |
||||
return model[-1] |
||||
|
||||
# Return detection ensemble |
||||
print(f'Ensemble created with {weights}\n') |
||||
for k in 'names', 'nc', 'yaml': |
||||
setattr(model, k, getattr(model[0], k)) |
||||
model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride |
||||
assert all(model[0].nc == m.nc for m in model), f'Models have different class counts: {[m.nc for m in model]}' |
||||
return model |
||||
|
||||
|
||||
def parse_model(d, ch): # model_dict, input_channels(3) |
||||
# Parse a YOLOv5 model.yaml dictionary |
||||
LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<50}{'arguments':<30}") |
||||
nc, gd, gw, act = d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation') |
||||
if act: |
||||
Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU() |
||||
LOGGER.info(f"{colorstr('activation:')} {act}") # print |
||||
no = nc + 4 # number of outputs = classes + box |
||||
|
||||
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out |
||||
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args |
||||
m = eval(m) if isinstance(m, str) else m # eval strings |
||||
for j, a in enumerate(args): |
||||
with contextlib.suppress(NameError): |
||||
args[j] = eval(a) if isinstance(a, str) else a # eval strings |
||||
|
||||
n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain |
||||
if m in { |
||||
Conv, ConvTranspose, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, Focus, BottleneckCSP, |
||||
C1, C2, C2f, C3, C3TR, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x}: |
||||
c1, c2 = ch[f], args[0] |
||||
if c2 != no: # if not output |
||||
c2 = make_divisible(c2 * gw, 8) |
||||
|
||||
args = [c1, c2, *args[1:]] |
||||
if m in {BottleneckCSP, C1, C2, C2f, C3, C3TR, C3Ghost, C3x}: |
||||
args.insert(2, n) # number of repeats |
||||
n = 1 |
||||
elif m is nn.BatchNorm2d: |
||||
args = [ch[f]] |
||||
elif m is Concat: |
||||
c2 = sum(ch[x] for x in f) |
||||
# TODO: channel, gw, gd |
||||
elif m in {Detect, Segment}: |
||||
args.append([ch[x] for x in f]) |
||||
if m is Segment: |
||||
args[3] = make_divisible(args[3] * gw, 8) |
||||
else: |
||||
c2 = ch[f] |
||||
|
||||
m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module |
||||
t = str(m)[8:-2].replace('__main__.', '') # module type |
||||
m.np = sum(x.numel() for x in m_.parameters()) # number params |
||||
m_.i, m_.f, m_.type = i, f, t # attach index, 'from' index, type |
||||
LOGGER.info(f'{i:>3}{str(f):>18}{n_:>3}{m.np:10.0f} {t:<50}{str(args):<30}') # print |
||||
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist |
||||
layers.append(m_) |
||||
if i == 0: |
||||
ch = [] |
||||
ch.append(c2) |
||||
return nn.Sequential(*layers), sorted(save) |
||||
|
||||
|
||||
def get_model(model='s.pt', pretrained=True): |
||||
# Load a YOLO model locally, from torchvision, or from Ultralytics assets |
||||
if model.endswith(".pt"): |
||||
model = model.split(".")[0] |
||||
|
||||
if Path(f"{model}.pt").is_file(): # local file |
||||
return attempt_load_weights(f"{model}.pt", device='cpu') |
||||
elif model in torchvision.models.__dict__: # TorchVision models i.e. resnet50, efficientnet_b0 |
||||
return torchvision.models.__dict__[model](weights='IMAGENET1K_V1' if pretrained else None) |
||||
else: # Ultralytics assets |
||||
return attempt_load_weights(f"{model}.pt", device='cpu') |
Loading…
Reference in new issue