|
|
|
@ -60,7 +60,7 @@ |
|
|
|
|
"base_uri": "https://localhost:8080/", |
|
|
|
|
"height": 1000 |
|
|
|
|
}, |
|
|
|
|
"outputId": "9012b4cd-53eb-4c84-f5b7-4976d4b4e58a" |
|
|
|
|
"outputId": "5006941e-44ff-4e27-f53e-31bf87221334" |
|
|
|
|
}, |
|
|
|
|
"source": [ |
|
|
|
|
"# Pip install method (recommended)\n", |
|
|
|
@ -117,7 +117,7 @@ |
|
|
|
|
"# Run inference on an image with YOLOv8n\n", |
|
|
|
|
"!yolo task=detect mode=predict model=yolov8n.pt conf=0.25 source='https://ultralytics.com/images/zidane.jpg'" |
|
|
|
|
], |
|
|
|
|
"execution_count": 2, |
|
|
|
|
"execution_count": null, |
|
|
|
|
"outputs": [ |
|
|
|
|
{ |
|
|
|
|
"output_type": "stream", |
|
|
|
@ -175,14 +175,114 @@ |
|
|
|
|
{ |
|
|
|
|
"cell_type": "code", |
|
|
|
|
"metadata": { |
|
|
|
|
"id": "X58w8JLpMnjH" |
|
|
|
|
"id": "X58w8JLpMnjH", |
|
|
|
|
"outputId": "3e8689b5-e6e6-4764-c1d9-2626f53355f2", |
|
|
|
|
"colab": { |
|
|
|
|
"base_uri": "https://localhost:8080/" |
|
|
|
|
} |
|
|
|
|
}, |
|
|
|
|
"source": [ |
|
|
|
|
"# Validate YOLOv8n on COCO128 val\n", |
|
|
|
|
"!yolo task=detect mode=val model=yolov8n.pt data=coco128.yaml" |
|
|
|
|
], |
|
|
|
|
"execution_count": null, |
|
|
|
|
"outputs": [] |
|
|
|
|
"execution_count": 2, |
|
|
|
|
"outputs": [ |
|
|
|
|
{ |
|
|
|
|
"output_type": "stream", |
|
|
|
|
"name": "stdout", |
|
|
|
|
"text": [ |
|
|
|
|
"Ultralytics YOLOv8.0.5 🚀 Python-3.8.16 torch-1.13.1+cu116 CUDA:0 (Tesla T4, 15110MiB)\n", |
|
|
|
|
"Downloading https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n.pt to yolov8n.pt...\n", |
|
|
|
|
"100% 6.24M/6.24M [00:01<00:00, 6.32MB/s]\n", |
|
|
|
|
"\n", |
|
|
|
|
"Fusing layers... \n", |
|
|
|
|
"YOLOv8n summary: 168 layers, 3151904 parameters, 0 gradients, 8.7 GFLOPs\n", |
|
|
|
|
"\n", |
|
|
|
|
"Dataset not found ⚠️, missing paths ['/datasets/coco128/images/train2017']\n", |
|
|
|
|
"Downloading https://ultralytics.com/assets/coco128.zip to coco128.zip...\n", |
|
|
|
|
"100% 6.66M/6.66M [00:00<00:00, 71.9MB/s]\n", |
|
|
|
|
"Dataset download success ✅ (0.8s), saved to \u001b[1m/datasets\u001b[0m\n", |
|
|
|
|
"Downloading https://ultralytics.com/assets/Arial.ttf to /root/.config/Ultralytics/Arial.ttf...\n", |
|
|
|
|
"100% 755k/755k [00:00<00:00, 44.6MB/s]\n", |
|
|
|
|
"\u001b[34m\u001b[1mval: \u001b[0mScanning /datasets/coco128/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1451.73it/s]\n", |
|
|
|
|
"\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /datasets/coco128/labels/train2017.cache\n", |
|
|
|
|
" Class Images Instances Box(P R mAP50 mAP50-95): 100% 8/8 [00:05<00:00, 1.53it/s]\n", |
|
|
|
|
" all 128 929 0.64 0.537 0.605 0.446\n", |
|
|
|
|
" person 128 254 0.797 0.677 0.764 0.538\n", |
|
|
|
|
" bicycle 128 6 0.514 0.333 0.315 0.264\n", |
|
|
|
|
" car 128 46 0.813 0.217 0.273 0.168\n", |
|
|
|
|
" motorcycle 128 5 0.687 0.887 0.898 0.685\n", |
|
|
|
|
" airplane 128 6 0.82 0.833 0.927 0.675\n", |
|
|
|
|
" bus 128 7 0.491 0.714 0.728 0.671\n", |
|
|
|
|
" train 128 3 0.534 0.667 0.706 0.604\n", |
|
|
|
|
" truck 128 12 1 0.332 0.473 0.297\n", |
|
|
|
|
" boat 128 6 0.226 0.167 0.316 0.134\n", |
|
|
|
|
" traffic light 128 14 0.734 0.2 0.202 0.139\n", |
|
|
|
|
" stop sign 128 2 1 0.992 0.995 0.701\n", |
|
|
|
|
" bench 128 9 0.839 0.582 0.62 0.365\n", |
|
|
|
|
" bird 128 16 0.921 0.728 0.864 0.51\n", |
|
|
|
|
" cat 128 4 0.875 1 0.995 0.791\n", |
|
|
|
|
" dog 128 9 0.603 0.889 0.785 0.585\n", |
|
|
|
|
" horse 128 2 0.597 1 0.995 0.518\n", |
|
|
|
|
" elephant 128 17 0.849 0.765 0.9 0.679\n", |
|
|
|
|
" bear 128 1 0.593 1 0.995 0.995\n", |
|
|
|
|
" zebra 128 4 0.848 1 0.995 0.965\n", |
|
|
|
|
" giraffe 128 9 0.72 1 0.951 0.722\n", |
|
|
|
|
" backpack 128 6 0.589 0.333 0.376 0.232\n", |
|
|
|
|
" umbrella 128 18 0.804 0.5 0.643 0.414\n", |
|
|
|
|
" handbag 128 19 0.424 0.0526 0.165 0.0889\n", |
|
|
|
|
" tie 128 7 0.804 0.714 0.674 0.476\n", |
|
|
|
|
" suitcase 128 4 0.635 0.883 0.745 0.534\n", |
|
|
|
|
" frisbee 128 5 0.675 0.8 0.759 0.688\n", |
|
|
|
|
" skis 128 1 0.567 1 0.995 0.497\n", |
|
|
|
|
" snowboard 128 7 0.742 0.714 0.747 0.5\n", |
|
|
|
|
" sports ball 128 6 0.716 0.433 0.485 0.278\n", |
|
|
|
|
" kite 128 10 0.817 0.45 0.569 0.184\n", |
|
|
|
|
" baseball bat 128 4 0.551 0.25 0.353 0.175\n", |
|
|
|
|
" baseball glove 128 7 0.624 0.429 0.429 0.293\n", |
|
|
|
|
" skateboard 128 5 0.846 0.6 0.6 0.41\n", |
|
|
|
|
" tennis racket 128 7 0.726 0.387 0.487 0.33\n", |
|
|
|
|
" bottle 128 18 0.448 0.389 0.376 0.208\n", |
|
|
|
|
" wine glass 128 16 0.743 0.362 0.584 0.333\n", |
|
|
|
|
" cup 128 36 0.58 0.278 0.404 0.29\n", |
|
|
|
|
" fork 128 6 0.527 0.167 0.246 0.184\n", |
|
|
|
|
" knife 128 16 0.564 0.5 0.59 0.36\n", |
|
|
|
|
" spoon 128 22 0.597 0.182 0.328 0.19\n", |
|
|
|
|
" bowl 128 28 0.648 0.643 0.618 0.491\n", |
|
|
|
|
" banana 128 1 0 0 0.124 0.0379\n", |
|
|
|
|
" sandwich 128 2 0.249 0.5 0.308 0.308\n", |
|
|
|
|
" orange 128 4 1 0.31 0.995 0.623\n", |
|
|
|
|
" broccoli 128 11 0.374 0.182 0.249 0.203\n", |
|
|
|
|
" carrot 128 24 0.648 0.458 0.572 0.362\n", |
|
|
|
|
" hot dog 128 2 0.351 0.553 0.745 0.721\n", |
|
|
|
|
" pizza 128 5 0.644 1 0.995 0.843\n", |
|
|
|
|
" donut 128 14 0.657 1 0.94 0.864\n", |
|
|
|
|
" cake 128 4 0.618 1 0.945 0.845\n", |
|
|
|
|
" chair 128 35 0.506 0.514 0.442 0.239\n", |
|
|
|
|
" couch 128 6 0.463 0.5 0.706 0.555\n", |
|
|
|
|
" potted plant 128 14 0.65 0.643 0.711 0.472\n", |
|
|
|
|
" bed 128 3 0.698 0.667 0.789 0.625\n", |
|
|
|
|
" dining table 128 13 0.432 0.615 0.485 0.366\n", |
|
|
|
|
" toilet 128 2 0.615 0.5 0.695 0.676\n", |
|
|
|
|
" tv 128 2 0.373 0.62 0.745 0.696\n", |
|
|
|
|
" laptop 128 3 1 0 0.451 0.361\n", |
|
|
|
|
" mouse 128 2 1 0 0.0625 0.00625\n", |
|
|
|
|
" remote 128 8 0.843 0.5 0.605 0.529\n", |
|
|
|
|
" cell phone 128 8 0 0 0.0549 0.0393\n", |
|
|
|
|
" microwave 128 3 0.435 0.667 0.806 0.718\n", |
|
|
|
|
" oven 128 5 0.412 0.4 0.339 0.27\n", |
|
|
|
|
" sink 128 6 0.35 0.167 0.182 0.129\n", |
|
|
|
|
" refrigerator 128 5 0.589 0.4 0.604 0.452\n", |
|
|
|
|
" book 128 29 0.629 0.103 0.346 0.178\n", |
|
|
|
|
" clock 128 9 0.788 0.83 0.875 0.74\n", |
|
|
|
|
" vase 128 2 0.376 1 0.828 0.795\n", |
|
|
|
|
" scissors 128 1 1 0 0.249 0.0746\n", |
|
|
|
|
" teddy bear 128 21 0.877 0.333 0.591 0.394\n", |
|
|
|
|
" toothbrush 128 5 0.743 0.6 0.638 0.374\n", |
|
|
|
|
"Speed: 1.1ms pre-process, 5.7ms inference, 0.0ms loss, 3.7ms post-process per image\n" |
|
|
|
|
] |
|
|
|
|
} |
|
|
|
|
] |
|
|
|
|
}, |
|
|
|
|
{ |
|
|
|
|
"cell_type": "markdown", |
|
|
|
@ -200,14 +300,163 @@ |
|
|
|
|
{ |
|
|
|
|
"cell_type": "code", |
|
|
|
|
"metadata": { |
|
|
|
|
"id": "1NcFxRcFdJ_O" |
|
|
|
|
"id": "1NcFxRcFdJ_O", |
|
|
|
|
"outputId": "3e6ce168-7f91-4253-d2f1-84c8254a66ee", |
|
|
|
|
"colab": { |
|
|
|
|
"base_uri": "https://localhost:8080/" |
|
|
|
|
} |
|
|
|
|
}, |
|
|
|
|
"source": [ |
|
|
|
|
"# Train YOLOv8n on COCO128 for 3 epochs\n", |
|
|
|
|
"!yolo task=detect mode=train model=yolov8n.pt data=coco128.yaml epochs=3 imgsz=640" |
|
|
|
|
], |
|
|
|
|
"execution_count": null, |
|
|
|
|
"outputs": [] |
|
|
|
|
"execution_count": 3, |
|
|
|
|
"outputs": [ |
|
|
|
|
{ |
|
|
|
|
"output_type": "stream", |
|
|
|
|
"name": "stdout", |
|
|
|
|
"text": [ |
|
|
|
|
"Ultralytics YOLOv8.0.5 🚀 Python-3.8.16 torch-1.13.1+cu116 CUDA:0 (Tesla T4, 15110MiB)\n", |
|
|
|
|
"\u001b[34m\u001b[1myolo/engine/trainer: \u001b[0mtask=detect, mode=train, model=yolov8n.pt, data=coco128.yaml, epochs=3, patience=50, batch=16, imgsz=640, save=True, cache=False, device=, workers=8, project=None, name=None, exist_ok=False, pretrained=False, optimizer=SGD, verbose=False, seed=0, deterministic=True, single_cls=False, image_weights=False, rect=False, cos_lr=False, close_mosaic=10, resume=False, overlap_mask=True, mask_ratio=4, dropout=0.0, val=True, save_json=False, save_hybrid=False, conf=None, iou=0.7, max_det=300, half=False, dnn=False, plots=True, source=None, show=False, save_txt=False, save_conf=False, save_crop=False, hide_labels=False, hide_conf=False, vid_stride=1, line_thickness=3, visualize=False, augment=False, agnostic_nms=False, retina_masks=False, format=torchscript, keras=False, optimize=False, int8=False, dynamic=False, simplify=False, opset=17, workspace=4, nms=False, lr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=7.5, cls=0.5, dfl=1.5, fl_gamma=0.0, label_smoothing=0.0, nbs=64, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0, cfg=None, hydra={'output_subdir': None, 'run': {'dir': '.'}}, v5loader=False, save_dir=runs/detect/train\n", |
|
|
|
|
"\n", |
|
|
|
|
" from n params module arguments \n", |
|
|
|
|
" 0 -1 1 464 ultralytics.nn.modules.Conv [3, 16, 3, 2] \n", |
|
|
|
|
" 1 -1 1 4672 ultralytics.nn.modules.Conv [16, 32, 3, 2] \n", |
|
|
|
|
" 2 -1 1 7360 ultralytics.nn.modules.C2f [32, 32, 1, True] \n", |
|
|
|
|
" 3 -1 1 18560 ultralytics.nn.modules.Conv [32, 64, 3, 2] \n", |
|
|
|
|
" 4 -1 2 49664 ultralytics.nn.modules.C2f [64, 64, 2, True] \n", |
|
|
|
|
" 5 -1 1 73984 ultralytics.nn.modules.Conv [64, 128, 3, 2] \n", |
|
|
|
|
" 6 -1 2 197632 ultralytics.nn.modules.C2f [128, 128, 2, True] \n", |
|
|
|
|
" 7 -1 1 295424 ultralytics.nn.modules.Conv [128, 256, 3, 2] \n", |
|
|
|
|
" 8 -1 1 460288 ultralytics.nn.modules.C2f [256, 256, 1, True] \n", |
|
|
|
|
" 9 -1 1 164608 ultralytics.nn.modules.SPPF [256, 256, 5] \n", |
|
|
|
|
" 10 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", |
|
|
|
|
" 11 [-1, 6] 1 0 ultralytics.nn.modules.Concat [1] \n", |
|
|
|
|
" 12 -1 1 148224 ultralytics.nn.modules.C2f [384, 128, 1] \n", |
|
|
|
|
" 13 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", |
|
|
|
|
" 14 [-1, 4] 1 0 ultralytics.nn.modules.Concat [1] \n", |
|
|
|
|
" 15 -1 1 37248 ultralytics.nn.modules.C2f [192, 64, 1] \n", |
|
|
|
|
" 16 -1 1 36992 ultralytics.nn.modules.Conv [64, 64, 3, 2] \n", |
|
|
|
|
" 17 [-1, 12] 1 0 ultralytics.nn.modules.Concat [1] \n", |
|
|
|
|
" 18 -1 1 123648 ultralytics.nn.modules.C2f [192, 128, 1] \n", |
|
|
|
|
" 19 -1 1 147712 ultralytics.nn.modules.Conv [128, 128, 3, 2] \n", |
|
|
|
|
" 20 [-1, 9] 1 0 ultralytics.nn.modules.Concat [1] \n", |
|
|
|
|
" 21 -1 1 493056 ultralytics.nn.modules.C2f [384, 256, 1] \n", |
|
|
|
|
" 22 [15, 18, 21] 1 897664 ultralytics.nn.modules.Detect [80, [64, 128, 256]] \n", |
|
|
|
|
"Model summary: 225 layers, 3157200 parameters, 3157184 gradients, 8.9 GFLOPs\n", |
|
|
|
|
"\n", |
|
|
|
|
"Transferred 355/355 items from pretrained weights\n", |
|
|
|
|
"\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 64 weight(decay=0.0005), 63 bias\n", |
|
|
|
|
"\u001b[34m\u001b[1mtrain: \u001b[0mScanning /datasets/coco128/labels/train2017.cache... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<?, ?it/s]\n", |
|
|
|
|
"\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", |
|
|
|
|
"\u001b[34m\u001b[1mval: \u001b[0mScanning /datasets/coco128/labels/train2017.cache... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<?, ?it/s]\n", |
|
|
|
|
"Image sizes 640 train, 640 val\n", |
|
|
|
|
"Using 2 dataloader workers\n", |
|
|
|
|
"Logging results to \u001b[1mruns/detect/train\u001b[0m\n", |
|
|
|
|
"Starting training for 3 epochs...\n", |
|
|
|
|
"\n", |
|
|
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n", |
|
|
|
|
" 1/3 4.31G 1.221 1.429 1.241 196 640: 100% 8/8 [00:09<00:00, 1.18s/it]\n", |
|
|
|
|
" Class Images Instances Box(P R mAP50 mAP50-95): 100% 4/4 [00:02<00:00, 1.95it/s]\n", |
|
|
|
|
" all 128 929 0.671 0.516 0.617 0.457\n", |
|
|
|
|
"\n", |
|
|
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n", |
|
|
|
|
" 2/3 5.31G 1.186 1.306 1.255 287 640: 100% 8/8 [00:06<00:00, 1.33it/s]\n", |
|
|
|
|
" Class Images Instances Box(P R mAP50 mAP50-95): 100% 4/4 [00:02<00:00, 1.92it/s]\n", |
|
|
|
|
" all 128 929 0.668 0.582 0.637 0.473\n", |
|
|
|
|
"\n", |
|
|
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n", |
|
|
|
|
" 3/3 5.31G 1.17 1.408 1.267 189 640: 100% 8/8 [00:06<00:00, 1.19it/s]\n", |
|
|
|
|
" Class Images Instances Box(P R mAP50 mAP50-95): 100% 4/4 [00:04<00:00, 1.16s/it]\n", |
|
|
|
|
" all 128 929 0.638 0.601 0.645 0.483\n", |
|
|
|
|
"\n", |
|
|
|
|
"3 epochs completed in 0.011 hours.\n", |
|
|
|
|
"Optimizer stripped from runs/detect/train/weights/last.pt, 6.5MB\n", |
|
|
|
|
"Optimizer stripped from runs/detect/train/weights/best.pt, 6.5MB\n", |
|
|
|
|
"\n", |
|
|
|
|
"Validating runs/detect/train/weights/best.pt...\n", |
|
|
|
|
"Ultralytics YOLOv8.0.5 🚀 Python-3.8.16 torch-1.13.1+cu116 CUDA:0 (Tesla T4, 15110MiB)\n", |
|
|
|
|
"Fusing layers... \n", |
|
|
|
|
"Model summary: 168 layers, 3151904 parameters, 0 gradients, 8.7 GFLOPs\n", |
|
|
|
|
" Class Images Instances Box(P R mAP50 mAP50-95): 100% 4/4 [00:05<00:00, 1.31s/it]\n", |
|
|
|
|
" all 128 929 0.638 0.602 0.644 0.483\n", |
|
|
|
|
" person 128 254 0.703 0.709 0.769 0.548\n", |
|
|
|
|
" bicycle 128 6 0.455 0.333 0.322 0.254\n", |
|
|
|
|
" car 128 46 0.773 0.217 0.291 0.184\n", |
|
|
|
|
" motorcycle 128 5 0.551 0.8 0.895 0.724\n", |
|
|
|
|
" airplane 128 6 0.743 0.833 0.927 0.73\n", |
|
|
|
|
" bus 128 7 0.692 0.714 0.7 0.636\n", |
|
|
|
|
" train 128 3 0.733 0.931 0.913 0.797\n", |
|
|
|
|
" truck 128 12 0.752 0.5 0.497 0.324\n", |
|
|
|
|
" boat 128 6 0.41 0.333 0.492 0.344\n", |
|
|
|
|
" traffic light 128 14 0.682 0.214 0.202 0.139\n", |
|
|
|
|
" stop sign 128 2 0.933 1 0.995 0.671\n", |
|
|
|
|
" bench 128 9 0.752 0.556 0.603 0.416\n", |
|
|
|
|
" bird 128 16 0.875 0.876 0.957 0.641\n", |
|
|
|
|
" cat 128 4 0.863 1 0.995 0.76\n", |
|
|
|
|
" dog 128 9 0.554 0.778 0.855 0.664\n", |
|
|
|
|
" horse 128 2 0.706 1 0.995 0.561\n", |
|
|
|
|
" elephant 128 17 0.761 0.882 0.929 0.722\n", |
|
|
|
|
" bear 128 1 0.595 1 0.995 0.995\n", |
|
|
|
|
" zebra 128 4 0.85 1 0.995 0.966\n", |
|
|
|
|
" giraffe 128 9 0.891 1 0.995 0.683\n", |
|
|
|
|
" backpack 128 6 0.487 0.333 0.354 0.224\n", |
|
|
|
|
" umbrella 128 18 0.54 0.667 0.687 0.461\n", |
|
|
|
|
" handbag 128 19 0.496 0.105 0.212 0.125\n", |
|
|
|
|
" tie 128 7 0.611 0.714 0.615 0.432\n", |
|
|
|
|
" suitcase 128 4 0.469 1 0.745 0.529\n", |
|
|
|
|
" frisbee 128 5 0.622 0.8 0.733 0.64\n", |
|
|
|
|
" skis 128 1 0.721 1 0.995 0.531\n", |
|
|
|
|
" snowboard 128 7 0.687 0.714 0.751 0.51\n", |
|
|
|
|
" sports ball 128 6 0.71 0.42 0.503 0.282\n", |
|
|
|
|
" kite 128 10 0.81 0.5 0.59 0.197\n", |
|
|
|
|
" baseball bat 128 4 0.474 0.461 0.261 0.115\n", |
|
|
|
|
" baseball glove 128 7 0.67 0.429 0.43 0.317\n", |
|
|
|
|
" skateboard 128 5 0.751 0.6 0.599 0.387\n", |
|
|
|
|
" tennis racket 128 7 0.742 0.415 0.507 0.378\n", |
|
|
|
|
" bottle 128 18 0.409 0.333 0.354 0.235\n", |
|
|
|
|
" wine glass 128 16 0.562 0.5 0.597 0.356\n", |
|
|
|
|
" cup 128 36 0.67 0.306 0.411 0.296\n", |
|
|
|
|
" fork 128 6 0.57 0.167 0.229 0.203\n", |
|
|
|
|
" knife 128 16 0.608 0.562 0.634 0.405\n", |
|
|
|
|
" spoon 128 22 0.529 0.358 0.369 0.201\n", |
|
|
|
|
" bowl 128 28 0.594 0.679 0.671 0.56\n", |
|
|
|
|
" banana 128 1 0.0625 0.312 0.199 0.0513\n", |
|
|
|
|
" sandwich 128 2 0.638 0.913 0.828 0.828\n", |
|
|
|
|
" orange 128 4 0.743 0.728 0.895 0.595\n", |
|
|
|
|
" broccoli 128 11 0.49 0.264 0.278 0.232\n", |
|
|
|
|
" carrot 128 24 0.547 0.667 0.704 0.47\n", |
|
|
|
|
" hot dog 128 2 0.578 1 0.828 0.796\n", |
|
|
|
|
" pizza 128 5 0.835 1 0.995 0.84\n", |
|
|
|
|
" donut 128 14 0.537 1 0.891 0.788\n", |
|
|
|
|
" cake 128 4 0.807 1 0.995 0.904\n", |
|
|
|
|
" chair 128 35 0.401 0.514 0.485 0.277\n", |
|
|
|
|
" couch 128 6 0.795 0.649 0.746 0.504\n", |
|
|
|
|
" potted plant 128 14 0.563 0.643 0.676 0.471\n", |
|
|
|
|
" bed 128 3 0.777 1 0.995 0.735\n", |
|
|
|
|
" dining table 128 13 0.425 0.692 0.578 0.48\n", |
|
|
|
|
" toilet 128 2 0.508 0.5 0.745 0.721\n", |
|
|
|
|
" tv 128 2 0.55 0.649 0.828 0.762\n", |
|
|
|
|
" laptop 128 3 1 0 0.741 0.653\n", |
|
|
|
|
" mouse 128 2 1 0 0.0454 0.00907\n", |
|
|
|
|
" remote 128 8 0.83 0.5 0.569 0.449\n", |
|
|
|
|
" cell phone 128 8 0 0 0.0819 0.0266\n", |
|
|
|
|
" microwave 128 3 0.475 0.667 0.83 0.699\n", |
|
|
|
|
" oven 128 5 0.5 0.4 0.348 0.275\n", |
|
|
|
|
" sink 128 6 0.354 0.187 0.368 0.217\n", |
|
|
|
|
" refrigerator 128 5 0.518 0.4 0.729 0.571\n", |
|
|
|
|
" book 128 29 0.583 0.241 0.396 0.204\n", |
|
|
|
|
" clock 128 9 0.891 0.889 0.91 0.773\n", |
|
|
|
|
" vase 128 2 0.506 1 0.828 0.745\n", |
|
|
|
|
" scissors 128 1 1 0 0.142 0.0426\n", |
|
|
|
|
" teddy bear 128 21 0.587 0.476 0.63 0.458\n", |
|
|
|
|
" toothbrush 128 5 0.784 0.736 0.898 0.544\n", |
|
|
|
|
"Speed: 0.2ms pre-process, 5.1ms inference, 0.0ms loss, 3.3ms post-process per image\n", |
|
|
|
|
"Saving runs/detect/train/predictions.json...\n", |
|
|
|
|
"Results saved to \u001b[1mruns/detect/train\u001b[0m\n" |
|
|
|
|
] |
|
|
|
|
} |
|
|
|
|
] |
|
|
|
|
}, |
|
|
|
|
{ |
|
|
|
|
"cell_type": "markdown", |
|
|
|
@ -216,8 +465,8 @@ |
|
|
|
|
"\n", |
|
|
|
|
"Export a YOLOv8 model to any supported format with the `format` argument, i.e. `format=onnx`.\n", |
|
|
|
|
"\n", |
|
|
|
|
"- 💡 ProTip: Export to ONNX or OpenVINO for up to 3x CPU speedup. \n", |
|
|
|
|
"- 💡 ProTip: Export to TensorRT for up to 5x GPU speedup.\n", |
|
|
|
|
"- 💡 ProTip: Export to [ONNX](https://onnx.ai/) or [OpenVINO](https://docs.openvino.ai/latest/index.html) for up to 3x CPU speedup. \n", |
|
|
|
|
"- 💡 ProTip: Export to [TensorRT](https://developer.nvidia.com/tensorrt) for up to 5x GPU speedup.\n", |
|
|
|
|
"\n", |
|
|
|
|
"\n", |
|
|
|
|
"| Format | `format=` | Model |\n", |
|
|
|
@ -250,24 +499,24 @@ |
|
|
|
|
"base_uri": "https://localhost:8080/" |
|
|
|
|
}, |
|
|
|
|
"id": "CYIjW4igCjqD", |
|
|
|
|
"outputId": "f0910315-d678-45b4-c283-3ccd018a5024" |
|
|
|
|
"outputId": "3bb45917-f90e-4951-959d-7bcd26680f2e" |
|
|
|
|
}, |
|
|
|
|
"execution_count": null, |
|
|
|
|
"execution_count": 4, |
|
|
|
|
"outputs": [ |
|
|
|
|
{ |
|
|
|
|
"output_type": "stream", |
|
|
|
|
"name": "stdout", |
|
|
|
|
"text": [ |
|
|
|
|
"Ultralytics YOLOv8.0.1 🚀 Python-3.8.16 torch-1.13.0+cu116 CPU\n", |
|
|
|
|
"Ultralytics YOLOv8.0.5 🚀 Python-3.8.16 torch-1.13.1+cu116 CPU\n", |
|
|
|
|
"Fusing layers... \n", |
|
|
|
|
"YOLOv8n summary: 168 layers, 3151904 parameters, 0 gradients, 8.7 GFLOPs\n", |
|
|
|
|
"\n", |
|
|
|
|
"\u001b[34m\u001b[1mPyTorch:\u001b[0m starting from yolov8n.pt with output shape (1, 84, 8400) (6.2 MB)\n", |
|
|
|
|
"\n", |
|
|
|
|
"\u001b[34m\u001b[1mTorchScript:\u001b[0m starting export with torch 1.13.0+cu116...\n", |
|
|
|
|
"\u001b[34m\u001b[1mTorchScript:\u001b[0m export success ✅ 1.8s, saved as yolov8n.torchscript (12.4 MB)\n", |
|
|
|
|
"\u001b[34m\u001b[1mTorchScript:\u001b[0m starting export with torch 1.13.1+cu116...\n", |
|
|
|
|
"\u001b[34m\u001b[1mTorchScript:\u001b[0m export success ✅ 1.9s, saved as yolov8n.torchscript (12.4 MB)\n", |
|
|
|
|
"\n", |
|
|
|
|
"Export complete (2.5s)\n", |
|
|
|
|
"Export complete (2.6s)\n", |
|
|
|
|
"Results saved to \u001b[1m/content\u001b[0m\n", |
|
|
|
|
"Predict: yolo task=detect mode=predict model=yolov8n.torchscript -WARNING ⚠️ not yet supported for YOLOv8 exported models\n", |
|
|
|
|
"Validate: yolo task=detect mode=val model=yolov8n.torchscript -WARNING ⚠️ not yet supported for YOLOv8 exported models\n", |
|
|
|
|