|
917 | 917 | }
|
918 | 918 | ]
|
919 | 919 | },
|
| 920 | + { |
| 921 | + "cell_type": "markdown", |
| 922 | + "source": [ |
| 923 | + "## Deploy model on Roboflow\n", |
| 924 | + "\n", |
| 925 | + "Once you have finished training your YOLOv11 model, you’ll have a set of trained weights ready for use. These weights will be in the `/runs/segment/train/weights/best.pt` folder of your project. You can upload your model weights to Roboflow Deploy to use your trained weights on our infinitely scalable infrastructure.\n", |
| 926 | + "\n", |
| 927 | + "The `.deploy()` function in the [Roboflow pip package](https://docs.roboflow.com/python) now supports uploading YOLOv11 weights." |
| 928 | + ], |
| 929 | + "metadata": { |
| 930 | + "id": "4Z2R1eJiz4ux" |
| 931 | + } |
| 932 | + }, |
| 933 | + { |
| 934 | + "cell_type": "code", |
| 935 | + "source": [ |
| 936 | + "project.version(dataset.version).deploy(model_type=\"yolov11-seg\", model_path=f\"{HOME}/runs/segment/train/\")" |
| 937 | + ], |
| 938 | + "metadata": { |
| 939 | + "id": "IjZQqXwU0UEo" |
| 940 | + }, |
| 941 | + "execution_count": null, |
| 942 | + "outputs": [] |
| 943 | + }, |
| 944 | + { |
| 945 | + "cell_type": "code", |
| 946 | + "source": [ |
| 947 | + "!pip install inference" |
| 948 | + ], |
| 949 | + "metadata": { |
| 950 | + "id": "pz765jno0Xl3" |
| 951 | + }, |
| 952 | + "execution_count": null, |
| 953 | + "outputs": [] |
| 954 | + }, |
| 955 | + { |
| 956 | + "cell_type": "code", |
| 957 | + "source": [ |
| 958 | + "import os, random, cv2\n", |
| 959 | + "import supervision as sv\n", |
| 960 | + "import IPython\n", |
| 961 | + "import inference\n", |
| 962 | + "\n", |
| 963 | + "model_id = project.id.split(\"/\")[1] + \"/\" + dataset.version\n", |
| 964 | + "model = inference.get_model(model_id, userdata.get('ROBOFLOW_API_KEY'))\n", |
| 965 | + "\n", |
| 966 | + "# Location of test set images\n", |
| 967 | + "test_set_loc = dataset.location + \"/test/images/\"\n", |
| 968 | + "test_images = os.listdir(test_set_loc)\n", |
| 969 | + "\n", |
| 970 | + "# Run inference on 4 random test images, or fewer if fewer images are available\n", |
| 971 | + "for img_name in random.sample(test_images, min(4, len(test_images))):\n", |
| 972 | + " print(\"Running inference on \" + img_name)\n", |
| 973 | + "\n", |
| 974 | + " # Load image\n", |
| 975 | + " image = cv2.imread(os.path.join(test_set_loc, img_name))\n", |
| 976 | + "\n", |
| 977 | + " # Perform inference\n", |
| 978 | + " results = model.infer(image)[0]\n", |
| 979 | + " detections = sv.Detections.from_inference(results)\n", |
| 980 | + "\n", |
| 981 | + " # Annotate boxes and labels\n", |
| 982 | + " mask_annotator = sv.MaskAnnotator()\n", |
| 983 | + " label_annotator = sv.LabelAnnotator()\n", |
| 984 | + " annotated_image = mask_annotator.annotate(scene=image, detections=detections)\n", |
| 985 | + " annotated_image = label_annotator.annotate(scene=annotated_image, detections=detections)\n", |
| 986 | + "\n", |
| 987 | + " # Display annotated image\n", |
| 988 | + " _, ret = cv2.imencode('.jpg', annotated_image)\n", |
| 989 | + " i = IPython.display.Image(data=ret)\n", |
| 990 | + " IPython.display.display(i)\n" |
| 991 | + ], |
| 992 | + "metadata": { |
| 993 | + "id": "seFGjEE20X05" |
| 994 | + }, |
| 995 | + "execution_count": null, |
| 996 | + "outputs": [] |
| 997 | + }, |
920 | 998 | {
|
921 | 999 | "cell_type": "markdown",
|
922 | 1000 | "source": [
|
|
0 commit comments