update readme

This commit is contained in:
yadonglu
2024-12-09 12:26:04 -08:00
parent 075f349ea1
commit 669efd3611
6 changed files with 1149 additions and 30 deletions

View File

@@ -28,9 +28,13 @@ pip install -r requirements.txt
Then download the model ckpts files in: https://huggingface.co/microsoft/OmniParser, and put them under weights/, default folder structure is: weights/icon_detect, weights/icon_caption_florence, weights/icon_caption_blip2. Then download the model ckpts files in: https://huggingface.co/microsoft/OmniParser, and put them under weights/, default folder structure is: weights/icon_detect, weights/icon_caption_florence, weights/icon_caption_blip2.
Finally, convert the safetensor to .pt file. For v1:
convert the safetensor to .pt file.
```python ```python
python weights/convert_safetensor_to_pt.py python weights/convert_safetensor_to_pt.py
For v1.5:
download 'model_v1_5.pt' from https://huggingface.co/microsoft/OmniParser/tree/main/icon_detect_v1_5, make a new dir: weights/icon_detect_v1_5, and put it inside the folder. No weight conversion is needed.
``` ```
## Examples: ## Examples:
@@ -39,7 +43,10 @@ We put together a few simple examples in the demo.ipynb.
## Gradio Demo ## Gradio Demo
To run gradio demo, simply run: To run gradio demo, simply run:
```python ```python
python gradio_demo.py # For v1
python gradio_demo.py --icon_detect_model weights/icon_detect/best.pt --icon_caption_model florence2
# For v1.5
python gradio_demo.py --icon_detect_model weights/icon_detect_v1_5/model_v1_5.pt --icon_caption_model florence2
``` ```
## Model Weights License ## Model Weights License

Binary file not shown.

1117
demo.ipynb

File diff suppressed because it is too large Load Diff

View File

@@ -11,10 +11,7 @@ import base64, os
from utils import check_ocr_box, get_yolo_model, get_caption_model_processor, get_som_labeled_img from utils import check_ocr_box, get_yolo_model, get_caption_model_processor, get_som_labeled_img
import torch import torch
from PIL import Image from PIL import Image
import argparse
yolo_model = get_yolo_model(model_path='weights/icon_detect/best.pt')
caption_model_processor = get_caption_model_processor(model_name="florence2", model_name_or_path="weights/icon_caption_florence")
# caption_model_processor = get_caption_model_processor(model_name="blip2", model_name_or_path="weights/icon_caption_blip2")
@@ -39,9 +36,9 @@ def process(
box_threshold, box_threshold,
iou_threshold, iou_threshold,
use_paddleocr, use_paddleocr,
imgsz imgsz,
icon_process_batch_size,
) -> Optional[Image.Image]: ) -> Optional[Image.Image]:
image_save_path = 'imgs/saved_image_demo.png' image_save_path = 'imgs/saved_image_demo.png'
image_input.save(image_save_path) image_input.save(image_save_path)
image = Image.open(image_save_path) image = Image.open(image_save_path)
@@ -57,13 +54,26 @@ def process(
ocr_bbox_rslt, is_goal_filtered = check_ocr_box(image_save_path, display_img = False, output_bb_format='xyxy', goal_filtering=None, easyocr_args={'paragraph': False, 'text_threshold':0.9}, use_paddleocr=use_paddleocr) ocr_bbox_rslt, is_goal_filtered = check_ocr_box(image_save_path, display_img = False, output_bb_format='xyxy', goal_filtering=None, easyocr_args={'paragraph': False, 'text_threshold':0.9}, use_paddleocr=use_paddleocr)
text, ocr_bbox = ocr_bbox_rslt text, ocr_bbox = ocr_bbox_rslt
# print('prompt:', prompt) # print('prompt:', prompt)
dino_labled_img, label_coordinates, parsed_content_list = get_som_labeled_img(image_save_path, yolo_model, BOX_TRESHOLD = box_threshold, output_coord_in_ratio=True, ocr_bbox=ocr_bbox,draw_bbox_config=draw_bbox_config, caption_model_processor=caption_model_processor, ocr_text=text,iou_threshold=iou_threshold, imgsz=imgsz) dino_labled_img, label_coordinates, parsed_content_list = get_som_labeled_img(image_save_path, yolo_model, BOX_TRESHOLD = box_threshold, output_coord_in_ratio=True, ocr_bbox=ocr_bbox,draw_bbox_config=draw_bbox_config, caption_model_processor=caption_model_processor, ocr_text=text,iou_threshold=iou_threshold, imgsz=imgsz, batch_size=icon_process_batch_size)
image = Image.open(io.BytesIO(base64.b64decode(dino_labled_img))) image = Image.open(io.BytesIO(base64.b64decode(dino_labled_img)))
print('finish processing') print('finish processing')
parsed_content_list = '\n'.join(parsed_content_list) # parsed_content_list = '\n'.join(parsed_content_list)
parsed_content_list = '\n'.join([f'type: {x['type']}, content: {x["content"]}, interactivity: {x["interactivity"]}' for x in parsed_content_list])
return image, str(parsed_content_list) return image, str(parsed_content_list)
parser = argparse.ArgumentParser(description='Process model paths and names.')
parser.add_argument('--icon_detect_model', type=str, required=True, default='weights/icon_detect/best.pt', help='Path to the YOLO model weights')
parser.add_argument('--icon_caption_model', type=str, required=True, default='florence2', help='Name of the caption model')
args = parser.parse_args()
icon_detect_model, icon_caption_model = args.icon_detect_model, args.icon_caption_model
yolo_model = get_yolo_model(model_path=icon_detect_model)
if icon_caption_model == 'florence2':
caption_model_processor = get_caption_model_processor(model_name="florence2", model_name_or_path="weights/icon_caption_florence")
elif icon_caption_model == 'blip2':
caption_model_processor = get_caption_model_processor(model_name="blip2", model_name_or_path="weights/icon_caption_blip2")
with gr.Blocks() as demo: with gr.Blocks() as demo:
gr.Markdown(MARKDOWN) gr.Markdown(MARKDOWN)
@@ -78,9 +88,11 @@ with gr.Blocks() as demo:
iou_threshold_component = gr.Slider( iou_threshold_component = gr.Slider(
label='IOU Threshold', minimum=0.01, maximum=1.0, step=0.01, value=0.1) label='IOU Threshold', minimum=0.01, maximum=1.0, step=0.01, value=0.1)
use_paddleocr_component = gr.Checkbox( use_paddleocr_component = gr.Checkbox(
label='Use PaddleOCR', value=True) label='Use PaddleOCR', value=False)
imgsz_component = gr.Slider( imgsz_component = gr.Slider(
label='Icon Detect Image Size', minimum=640, maximum=1920, step=32, value=640) label='Icon Detect Image Size', minimum=640, maximum=3200, step=32, value=1920)
icon_process_batch_size_component = gr.Slider(
label='Icon Process Batch Size', minimum=1, maximum=256, step=1, value=64)
submit_button_component = gr.Button( submit_button_component = gr.Button(
value='Submit', variant='primary') value='Submit', variant='primary')
with gr.Column(): with gr.Column():
@@ -94,10 +106,16 @@ with gr.Blocks() as demo:
box_threshold_component, box_threshold_component,
iou_threshold_component, iou_threshold_component,
use_paddleocr_component, use_paddleocr_component,
imgsz_component imgsz_component,
icon_process_batch_size_component
], ],
outputs=[image_output_component, text_output_component] outputs=[image_output_component, text_output_component]
) )
# demo.launch(debug=False, show_error=True, share=True) # demo.launch(debug=False, show_error=True, share=True)
demo.launch(share=True, server_port=7861, server_name='0.0.0.0') demo.launch(share=True, server_port=7861, server_name='0.0.0.0')
# python gradio_demo.py --icon_detect_model weights/icon_detect/best.pt --icon_caption_model florence2
# python gradio_demo.py --icon_detect_model weights/icon_detect_v1_5/model_v1_5.pt --icon_caption_model florence2

Binary file not shown.

Before

Width:  |  Height:  |  Size: 149 KiB

After

Width:  |  Height:  |  Size: 786 KiB

View File

@@ -5,16 +5,13 @@ import argparse
import yaml import yaml
import os import os
# accept args to specify v1 or v1_5 # accept args to specify v1
parser = argparse.ArgumentParser(description='Specify version v1 or v1_5') parser = argparse.ArgumentParser(description='add weight directory')
parser.add_argument('--weights_dir', type=str, required=True, help='Specify the path to the safetensor file', default='weights/icon_detect_v1_5') parser.add_argument('--weights_dir', type=str, required=True, help='Specify the path to the safetensor file', default='weights/icon_detect')
args = parser.parse_args() args = parser.parse_args()
tensor_dict = load_file(os.path.join(args.weights_dir, "model.safetensors")) tensor_dict = load_file(os.path.join(args.weights_dir, "model.safetensors"))
model = DetectionModel(os.path.join(args.weights_dir, "model.yaml")) model = DetectionModel(os.path.join(args.weights_dir, "model.yaml"))
# from ultralytics import YOLO
# som_model = YOLO("yolo11m.pt")
# model = som_model.model
model.load_state_dict(tensor_dict) model.load_state_dict(tensor_dict)
save_dict = {'model':model} save_dict = {'model':model}