From 856b539e54995703cc1bb123ef50a66a7855cdbb Mon Sep 17 00:00:00 2001 From: yadonglu Date: Tue, 26 Nov 2024 13:04:51 -0800 Subject: [PATCH] version 1.5 --- README.md | 2 +- demo.ipynb | 3 --- weights/convert_safetensor_to_pt.py | 2 +- 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index e17d218..7d813ff 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ **OmniParser** is a comprehensive method for parsing user interface screenshots into structured and easy-to-understand elements, which significantly enhances the ability of GPT-4V to generate actions that can be accurately grounded in the corresponding regions of the interface. ## News -- [2024/11] OmniParser V1.5 is out! It features: 1) new icon detection model trained on cleaned data, and 2) improved bbox merging logic. +- [2024/11/26] We release an updated version, OmniParser V1.5 which features more fine grained/small icon detection. Examples in the demo.ipynb. - [2024/10] OmniParser was the #1 trending model on huggingface model hub (starting 10/29/2024). - [2024/10] Feel free to checkout our demo on [huggingface space](https://huggingface.co/spaces/microsoft/OmniParser)! (stay tuned for OmniParser + Claude Computer Use) - [2024/10] Both Interactive Region Detection Model and Icon functional description model are released! [Hugginface models](https://huggingface.co/microsoft/OmniParser) diff --git a/demo.ipynb b/demo.ipynb index 6688df4..6f34af0 100644 --- a/demo.ipynb +++ b/demo.ipynb @@ -20,9 +20,6 @@ "from PIL import Image\n", "device = 'cuda'\n", "\n", - "# som_model = get_yolo_model(model_path='weights/icon_detect/best.pt')\n", - "# som_model = get_yolo_model(model_path='/home/yadonglu/sandbox/data/yolo/runs/detect/yolo11l_som_detection_seq_10ep_b32_filter5more4/weights/best.pt')\n", - "# som_model = get_yolo_model('/home/yadonglu/sandbox/data/yolo/runs/detect/yolo11l_som_detection_seq_10ep_b24_filter5more1280/weights/best.pt')\n", "som_model = get_yolo_model(model_path='weights/icon_detect_v1_5/best.pt')\n", "\n", "som_model.to(device)\n", diff --git a/weights/convert_safetensor_to_pt.py b/weights/convert_safetensor_to_pt.py index d45869f..901aa09 100644 --- a/weights/convert_safetensor_to_pt.py +++ b/weights/convert_safetensor_to_pt.py @@ -16,7 +16,7 @@ if args.version == 'v1': torch.save({'model':model}, 'weights/icon_detect/best.pt') elif args.version == 'v1_5': print("Converting v1_5") - tensor_dict = load_file("weights/icon_detect_v1_5/model.safetensors") + tensor_dict = torch.load("weights/icon_detect_v1_5/model.safetensors") model = DetectionModel('weights/icon_detect_v1_5/model.yaml') model.load_state_dict(tensor_dict) save_dict = {'model':model}