105 Commits
v1.5.0 ... main

Author SHA1 Message Date
Dom
7579a1fdc7 chore: add .gitignore 2026-03-05 00:37:31 +01:00
Alexey Taymanov
b0d5c9f570 Merge pull request #337 from microsoft/ataymano/sec_updates
Security updates
2025-09-09 16:51:59 -04:00
Alexey Taymano
8186ddef43 update manage vm script 2025-09-09 20:36:23 +00:00
Alexey Taymano
8e0f0ecb0f bind to default qemu address 2025-09-09 20:10:39 +00:00
Alexey Taymanov
a19bed9d75 Merge pull request #327 from ataymano/ataymano/omnitool_fix
Omnitool security updates
2025-08-20 15:14:46 -04:00
Alexey Taymanov
46545c5842 Merge pull request #326 from ataymano/ataymano/gradio_demo_fix
Gradio_demo update
2025-08-20 15:14:00 -04:00
ataymano@microsoft.com
0e7ee8e23f 0.0.0.0 -> 127.0.0.1 2025-08-20 12:09:12 -04:00
ataymano@microsoft.com
e233590c6e ip address fix 2025-08-20 11:22:09 -04:00
yadong-lu
5171b09248 supprt local data logging 2025-03-26 13:33:44 -07:00
yadong-lu
0e0368988e add streanlit interface 2025-03-17 13:18:16 -07:00
yadong-lu
23aa125276 add file viewer; allow file uploads; write plan to json 2025-03-12 18:33:21 -07:00
yadong-lu
09ca3a8f1f simplify omnitool output 2025-03-12 18:31:53 -07:00
yadong-lu
0ecb489d35 update readme 2025-03-10 20:30:38 -07:00
yadong-lu
e9b8341eed update readme 2025-03-10 20:09:48 -07:00
yadong-lu
095cd19ce1 add orchestrated version of agent in omnitool 2025-03-10 20:06:13 -07:00
yadonglu
f5fcdf40e6 remove unnessary img save 2025-03-10 09:12:20 -07:00
Thomas Dhome-Casanova
547cb8f743 Common setup errors section in readme 2025-02-22 19:20:16 -08:00
Thomas Dhome-Casanova
8d773df827 Windows paddle library install fix 2025-02-22 19:19:30 -08:00
Thomas Dhome-Casanova
fd525911d1 Remove slow mirrors for GIMP 2025-02-22 19:10:41 -08:00
Thomas Dhome-Casanova
574a9c0d03 Minimal omnibox setup addition to readme 2025-02-22 18:51:58 -08:00
Thomas Dhome-Casanova
3c4e77ec37 OmniBox setup additional details + debugging info 2025-02-22 18:46:00 -08:00
yadonglu
e40a461492 update readme install 2025-02-17 21:58:46 -08:00
Thomas Dhome-Casanova
e299874a4a hf cli allow pattern supported 2025-02-15 10:47:57 -08:00
Thomas Dhome-Casanova
92b8252c00 convert rgba to rgb if passed into omniparserserver 2025-02-14 22:49:17 -08:00
yadonglu
741a30f5bd numpy version fix 2025-02-13 17:03:16 -08:00
yadonglu
5bcb4952fa add vid reference 2025-02-13 10:38:20 -08:00
yadonglu
a4c6c92ce7 minor readme 2025-02-12 23:56:22 -08:00
yadonglu
5807f4f558 add vid 2025-02-12 23:53:59 -08:00
yadonglu
fa1e600ec0 add video 2025-02-12 23:45:41 -08:00
yadonglu
fd9db1a545 v2 pre-release; merge demo 2025-02-12 17:04:33 -08:00
yadonglu
f612ddb489 Merge remote-tracking branch 'refs/remotes/origin/demo' into demo 2025-02-12 16:11:31 -08:00
yadonglu
9956befe20 default click/enter for type 2025-02-12 16:11:20 -08:00
Thomas Dhome-Casanova
58373a2ec6 omniparser v2 hf download commands 2025-02-12 11:43:39 -08:00
yadonglu
d60460d62c add exception during parsing; prompt change for scroll 2025-02-11 10:30:30 -08:00
Thomas Dhome Casanova (from Dev Box)
82fa7ffedc update assets 2025-02-10 18:12:17 -08:00
yadonglu
0f91a82fa2 update/test demo.ipynb 2025-02-07 14:53:33 -08:00
yadonglu
bd938052d0 font size adjust 2025-02-07 14:52:41 -08:00
yadonglu
7c73184d04 Merge remote-tracking branch 'origin/demo' into demo 2025-02-07 14:50:36 -08:00
yadonglu
ad33072f77 add acknowledge 2025-02-07 14:46:05 -08:00
yadong-lu
5d3a97db4f Merge pull request #116 from ThomasDh-C/demo
Init gradio demo of computer use
2025-02-06 17:59:22 -06:00
yadonglu
5ba2a7f144 risk miti 2025-02-05 23:37:56 -08:00
yadonglu
dbb86b2f4c app logo file name fix; Acknowledgment; risk mitigation 2025-02-05 23:37:19 -08:00
Thomas Dhome-Casanova
cb92e432b3 Tweaking of information order 2025-02-05 11:11:08 -08:00
Thomas Dhome-Casanova
2327acbab2 add highlihgts to readme for OmniTool 2025-02-05 11:04:59 -08:00
Thomas Dhome-Casanova
ba5c5162c6 extra small header + padding fix 2025-02-04 18:11:12 -08:00
Thomas Dhome-Casanova
1b7f66c9ee header small 2025-02-04 18:03:41 -08:00
Thomas Dhome-Casanova
5d2d58bba8 Simplify init message 2025-02-04 17:44:40 -08:00
Thomas Dhome-Casanova
75585a6528 make header bar thin for gradio 2025-02-04 17:40:30 -08:00
Thomas Dhome-Casanova
bc4931baba fix header for gradio app 2025-02-04 13:45:47 -08:00
Thomas Dhome-Casanova
07effd1a68 readme + gradio updates 2025-02-04 11:57:52 -08:00
Thomas Dhome-Casanova
61999cef39 readme updates 2025-02-04 11:54:34 -08:00
Thomas Dhome-Casanova
fe84a35292 Naming conventions 2025-02-04 11:43:36 -08:00
Thomas Dhome-Casanova
31d7b1d096 o1 (has vision) and o3-mini (no vision) 2025-02-03 23:52:04 -08:00
yadonglu
8725445881 small fixes 2025-02-03 17:05:17 -08:00
yadonglu
3e51ba0da6 add stop button 2025-02-03 14:28:52 -08:00
Thomas Dhome-Casanova
c724a44080 One folder up 2025-02-03 00:19:40 -05:00
Thomas Dhome-Casanova
89b0e9a807 OmniParser+X Computer Use Demo screenshot 2025-02-03 00:18:36 -05:00
Thomas Dhome-Casanova
633083e2d4 Strawberry demo 2025-02-02 21:16:13 -08:00
Thomas Dhome-Casanova
2b621c5c0f fixes 2025-02-02 16:09:54 -08:00
Thomas Dhome-Casanova
ba7ed0ac06 qwen2.5vl 2025-02-01 17:29:34 -08:00
yadonglu
3d981833e6 fix omniparser server; fix yyolo dependency 2025-02-01 14:49:04 -08:00
yadonglu
80f6be73e1 fix readme download model weight folder structure 2025-02-01 14:25:55 -08:00
Thomas Dhome-Casanova
0a4a9f4d23 improve typing perf 2025-02-01 12:09:27 -08:00
Thomas Dhome-Casanova
be506b2d09 R1 fixes 2025-01-31 20:02:58 -08:00
Thomas Dhome Casanova (from Dev Box)
e268184f8d init r1 2025-01-31 18:37:14 -08:00
Thomas Dhome-Casanova
ed7b34621b Add validation on submit 2025-01-30 08:10:38 -08:00
Thomas Dhome-Casanova
5f699faf16 readme init 2025-01-30 00:14:07 -08:00
Thomas Dhome-Casanova
e8882d8484 init windowshost 2025-01-29 23:36:38 -08:00
Thomas Dhome-Casanova
b2d6bc5c3e code cleanup 2025-01-29 23:01:14 -08:00
Thomas Dhome-Casanova
f6029344c5 further clean colored text 2025-01-29 22:45:26 -08:00
Thomas Dhome-Casanova
7800a24b27 Rename folder + remove coloring on omniparseragent printouts 2025-01-29 22:44:23 -08:00
Thomas Dhome-Casanova
746507b9d9 Rename folder names 2025-01-29 22:39:25 -08:00
Thomas Dhome-Casanova
17d02bc8c0 Fix gradio imports 2025-01-29 22:23:35 -08:00
Thomas Dhome-Casanova
41464ccf1c Fix import path issues 2025-01-30 06:18:24 +00:00
Thomas Dhome Casanova (from Dev Box)
124d9f6fb6 Clean up folder structure 2025-01-29 21:54:35 -08:00
Thomas Dhome-Casanova
53900f8411 correct launch command 2025-01-29 21:19:09 +00:00
yadonglu
7ea2239e10 add drop down for omni output in the chat; add args for app.py; 2025-01-28 21:33:58 -08:00
Thomas Dhome-Casanova
16570a9bf3 remove unused function 2025-01-27 22:52:55 -08:00
Thomas Dhome-Casanova
cda4795270 add omniparse is ready endpoint 2025-01-27 22:40:37 -08:00
Thomas Dhome-Casanova
72040b9ded move back to check_ocr_box 2025-01-27 22:38:05 -08:00
Thomas Dhome-Casanova
9cb2263545 align main utils with demo utils 2025-01-27 22:08:54 -08:00
Thomas Dhome-Casanova
5cf55e116f time delay for vm to process action 2025-01-27 13:56:53 -08:00
Thomas Dhome-Casanova
869323bfd9 Clean create gradio 2025-01-27 18:25:19 +00:00
Thomas Dhome Casanova (from Dev Box)
651bbe2924 if run on localhost want it to auto start local server 2025-01-22 23:14:12 -08:00
Thomas Dhome Casanova (from Dev Box)
04000ee008 trash clears history 2025-01-22 22:01:47 -08:00
Thomas Dhome Casanova (from Dev Box)
8778970aff remove selected screen as only 1 screen 2025-01-22 21:45:32 -08:00
Thomas Dhome Casanova (from Dev Box)
b1cd705f1b add in omniparser_url box 2025-01-22 21:38:59 -08:00
Thomas Dhome Casanova (from Dev Box)
bb018460d7 clean oai 2025-01-22 21:25:40 -08:00
Thomas Dhome Casanova (from Dev Box)
93d9e2e12f cleanup gradio app.py 2025-01-22 21:24:09 -08:00
Thomas Dhome Casanova (from Dev Box)
9db016b52f unused file cleanup 2025-01-22 21:14:21 -08:00
Thomas Dhome Casanova (from Dev Box)
c29ac5064a remove custom prompt functionality 2025-01-22 20:59:44 -08:00
yadonglu
6e389fe851 remove debug file 2025-01-22 19:25:01 -08:00
yadonglu
ce199d52ab support remote access using fastapi; add double click/scroll; enable local run omniparser 2025-01-22 19:24:34 -08:00
Thomas Dhome Casanova (from Dev Box)
e0a845d35c remove final use of pyautogui 2025-01-20 19:31:26 -08:00
Thomas Dhome Casanova (from Dev Box)
6cb310d124 remove need to write to disk 2025-01-20 18:29:46 -08:00
Thomas Dhome Casanova (from Dev Box)
85f5fc0385 init gradio demo 2025-01-20 15:07:05 -08:00
Thomas Dhome Casanova (from Dev Box)
9b2c7dae24 clean demo folder 2025-01-20 14:58:51 -08:00
yadonglu
ebc3912727 merge 2025-01-04 20:14:49 -08:00
yadonglu
b9d3cb715b docker demo, migration, speedup inference using cv2 2025-01-04 20:06:33 -08:00
yadonglu
36b0cbea71 add docker file, accelerate inference using cv2 2025-01-02 12:02:08 -08:00
yadonglu
d0c163cd02 minor fix to gradio demo 2024-12-13 11:55:51 -08:00
yadonglu
c9ed5cb426 minor 2024-12-11 14:08:13 -08:00
yadonglu
b1996356ca demo remote request 2024-12-06 16:10:58 -08:00
yadonglu
a3215fd4b6 Merge branch 'master' of https://github.com/microsoft/OmniParser into demo 2024-12-05 11:47:04 -08:00
yadonglu
1b5e5f41de minor 2024-12-05 11:46:55 -08:00
74 changed files with 9255 additions and 2220 deletions

82
.gitignore vendored
View File

@@ -1,8 +1,76 @@
weights/icon_caption_blip2
weights/icon_caption_florence
weights/icon_detect/
weights/icon_detect_v1_5/
weights/icon_detect_v1_5_2/
.gradio
# === Python ===
__pycache__/
debug.ipynb
*.py[cod]
*.pyo
*.egg-info/
*.egg
dist/
build/
*.whl
# === Virtual environments ===
.venv/
venv/
venv_*/
env/
# === ML Models & Data ===
*.pt
*.pth
*.onnx
*.bin
*.safetensors
*.h5
*.hdf5
*.pkl
*.pickle
*.npy
*.npz
*.faiss
models/
*.tar.gz
*.zip
# === Documents & Media ===
*.pdf
*.docx
*.xlsx
*.csv
*.png
*.jpg
*.jpeg
*.gif
*.mp3
*.wav
*.mp4
# === IDE ===
.idea/
.vscode/
*.swp
*.swo
*~
# === OS ===
.DS_Store
Thumbs.db
.~lock.*
# === Secrets ===
.env
*.env
credentials.json
token.pickle
# === Logs & Cache ===
*.log
logs/
.pytest_cache/
.mypy_cache/
.ruff_cache/
htmlcov/
.coverage
# === Backups ===
*_backup_*
backups/

790
LICENSE
View File

@@ -1,395 +1,395 @@
Attribution 4.0 International
=======================================================================
Creative Commons Corporation ("Creative Commons") is not a law firm and
does not provide legal services or legal advice. Distribution of
Creative Commons public licenses does not create a lawyer-client or
other relationship. Creative Commons makes its licenses and related
information available on an "as-is" basis. Creative Commons gives no
warranties regarding its licenses, any material licensed under their
terms and conditions, or any related information. Creative Commons
disclaims all liability for damages resulting from their use to the
fullest extent possible.
Using Creative Commons Public Licenses
Creative Commons public licenses provide a standard set of terms and
conditions that creators and other rights holders may use to share
original works of authorship and other material subject to copyright
and certain other rights specified in the public license below. The
following considerations are for informational purposes only, are not
exhaustive, and do not form part of our licenses.
Considerations for licensors: Our public licenses are
intended for use by those authorized to give the public
permission to use material in ways otherwise restricted by
copyright and certain other rights. Our licenses are
irrevocable. Licensors should read and understand the terms
and conditions of the license they choose before applying it.
Licensors should also secure all rights necessary before
applying our licenses so that the public can reuse the
material as expected. Licensors should clearly mark any
material not subject to the license. This includes other CC-
licensed material, or material used under an exception or
limitation to copyright. More considerations for licensors:
wiki.creativecommons.org/Considerations_for_licensors
Considerations for the public: By using one of our public
licenses, a licensor grants the public permission to use the
licensed material under specified terms and conditions. If
the licensor's permission is not necessary for any reason--for
example, because of any applicable exception or limitation to
copyright--then that use is not regulated by the license. Our
licenses grant only permissions under copyright and certain
other rights that a licensor has authority to grant. Use of
the licensed material may still be restricted for other
reasons, including because others have copyright or other
rights in the material. A licensor may make special requests,
such as asking that all changes be marked or described.
Although not required by our licenses, you are encouraged to
respect those requests where reasonable. More_considerations
for the public:
wiki.creativecommons.org/Considerations_for_licensees
=======================================================================
Creative Commons Attribution 4.0 International Public License
By exercising the Licensed Rights (defined below), You accept and agree
to be bound by the terms and conditions of this Creative Commons
Attribution 4.0 International Public License ("Public License"). To the
extent this Public License may be interpreted as a contract, You are
granted the Licensed Rights in consideration of Your acceptance of
these terms and conditions, and the Licensor grants You such rights in
consideration of benefits the Licensor receives from making the
Licensed Material available under these terms and conditions.
Section 1 -- Definitions.
a. Adapted Material means material subject to Copyright and Similar
Rights that is derived from or based upon the Licensed Material
and in which the Licensed Material is translated, altered,
arranged, transformed, or otherwise modified in a manner requiring
permission under the Copyright and Similar Rights held by the
Licensor. For purposes of this Public License, where the Licensed
Material is a musical work, performance, or sound recording,
Adapted Material is always produced where the Licensed Material is
synched in timed relation with a moving image.
b. Adapter's License means the license You apply to Your Copyright
and Similar Rights in Your contributions to Adapted Material in
accordance with the terms and conditions of this Public License.
c. Copyright and Similar Rights means copyright and/or similar rights
closely related to copyright including, without limitation,
performance, broadcast, sound recording, and Sui Generis Database
Rights, without regard to how the rights are labeled or
categorized. For purposes of this Public License, the rights
specified in Section 2(b)(1)-(2) are not Copyright and Similar
Rights.
d. Effective Technological Measures means those measures that, in the
absence of proper authority, may not be circumvented under laws
fulfilling obligations under Article 11 of the WIPO Copyright
Treaty adopted on December 20, 1996, and/or similar international
agreements.
e. Exceptions and Limitations means fair use, fair dealing, and/or
any other exception or limitation to Copyright and Similar Rights
that applies to Your use of the Licensed Material.
f. Licensed Material means the artistic or literary work, database,
or other material to which the Licensor applied this Public
License.
g. Licensed Rights means the rights granted to You subject to the
terms and conditions of this Public License, which are limited to
all Copyright and Similar Rights that apply to Your use of the
Licensed Material and that the Licensor has authority to license.
h. Licensor means the individual(s) or entity(ies) granting rights
under this Public License.
i. Share means to provide material to the public by any means or
process that requires permission under the Licensed Rights, such
as reproduction, public display, public performance, distribution,
dissemination, communication, or importation, and to make material
available to the public including in ways that members of the
public may access the material from a place and at a time
individually chosen by them.
j. Sui Generis Database Rights means rights other than copyright
resulting from Directive 96/9/EC of the European Parliament and of
the Council of 11 March 1996 on the legal protection of databases,
as amended and/or succeeded, as well as other essentially
equivalent rights anywhere in the world.
k. You means the individual or entity exercising the Licensed Rights
under this Public License. Your has a corresponding meaning.
Section 2 -- Scope.
a. License grant.
1. Subject to the terms and conditions of this Public License,
the Licensor hereby grants You a worldwide, royalty-free,
non-sublicensable, non-exclusive, irrevocable license to
exercise the Licensed Rights in the Licensed Material to:
a. reproduce and Share the Licensed Material, in whole or
in part; and
b. produce, reproduce, and Share Adapted Material.
2. Exceptions and Limitations. For the avoidance of doubt, where
Exceptions and Limitations apply to Your use, this Public
License does not apply, and You do not need to comply with
its terms and conditions.
3. Term. The term of this Public License is specified in Section
6(a).
4. Media and formats; technical modifications allowed. The
Licensor authorizes You to exercise the Licensed Rights in
all media and formats whether now known or hereafter created,
and to make technical modifications necessary to do so. The
Licensor waives and/or agrees not to assert any right or
authority to forbid You from making technical modifications
necessary to exercise the Licensed Rights, including
technical modifications necessary to circumvent Effective
Technological Measures. For purposes of this Public License,
simply making modifications authorized by this Section 2(a)
(4) never produces Adapted Material.
5. Downstream recipients.
a. Offer from the Licensor -- Licensed Material. Every
recipient of the Licensed Material automatically
receives an offer from the Licensor to exercise the
Licensed Rights under the terms and conditions of this
Public License.
b. No downstream restrictions. You may not offer or impose
any additional or different terms or conditions on, or
apply any Effective Technological Measures to, the
Licensed Material if doing so restricts exercise of the
Licensed Rights by any recipient of the Licensed
Material.
6. No endorsement. Nothing in this Public License constitutes or
may be construed as permission to assert or imply that You
are, or that Your use of the Licensed Material is, connected
with, or sponsored, endorsed, or granted official status by,
the Licensor or others designated to receive attribution as
provided in Section 3(a)(1)(A)(i).
b. Other rights.
1. Moral rights, such as the right of integrity, are not
licensed under this Public License, nor are publicity,
privacy, and/or other similar personality rights; however, to
the extent possible, the Licensor waives and/or agrees not to
assert any such rights held by the Licensor to the limited
extent necessary to allow You to exercise the Licensed
Rights, but not otherwise.
2. Patent and trademark rights are not licensed under this
Public License.
3. To the extent possible, the Licensor waives any right to
collect royalties from You for the exercise of the Licensed
Rights, whether directly or through a collecting society
under any voluntary or waivable statutory or compulsory
licensing scheme. In all other cases the Licensor expressly
reserves any right to collect such royalties.
Section 3 -- License Conditions.
Your exercise of the Licensed Rights is expressly made subject to the
following conditions.
a. Attribution.
1. If You Share the Licensed Material (including in modified
form), You must:
a. retain the following if it is supplied by the Licensor
with the Licensed Material:
i. identification of the creator(s) of the Licensed
Material and any others designated to receive
attribution, in any reasonable manner requested by
the Licensor (including by pseudonym if
designated);
ii. a copyright notice;
iii. a notice that refers to this Public License;
iv. a notice that refers to the disclaimer of
warranties;
v. a URI or hyperlink to the Licensed Material to the
extent reasonably practicable;
b. indicate if You modified the Licensed Material and
retain an indication of any previous modifications; and
c. indicate the Licensed Material is licensed under this
Public License, and include the text of, or the URI or
hyperlink to, this Public License.
2. You may satisfy the conditions in Section 3(a)(1) in any
reasonable manner based on the medium, means, and context in
which You Share the Licensed Material. For example, it may be
reasonable to satisfy the conditions by providing a URI or
hyperlink to a resource that includes the required
information.
3. If requested by the Licensor, You must remove any of the
information required by Section 3(a)(1)(A) to the extent
reasonably practicable.
4. If You Share Adapted Material You produce, the Adapter's
License You apply must not prevent recipients of the Adapted
Material from complying with this Public License.
Section 4 -- Sui Generis Database Rights.
Where the Licensed Rights include Sui Generis Database Rights that
apply to Your use of the Licensed Material:
a. for the avoidance of doubt, Section 2(a)(1) grants You the right
to extract, reuse, reproduce, and Share all or a substantial
portion of the contents of the database;
b. if You include all or a substantial portion of the database
contents in a database in which You have Sui Generis Database
Rights, then the database in which You have Sui Generis Database
Rights (but not its individual contents) is Adapted Material; and
c. You must comply with the conditions in Section 3(a) if You Share
all or a substantial portion of the contents of the database.
For the avoidance of doubt, this Section 4 supplements and does not
replace Your obligations under this Public License where the Licensed
Rights include other Copyright and Similar Rights.
Section 5 -- Disclaimer of Warranties and Limitation of Liability.
a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
c. The disclaimer of warranties and limitation of liability provided
above shall be interpreted in a manner that, to the extent
possible, most closely approximates an absolute disclaimer and
waiver of all liability.
Section 6 -- Term and Termination.
a. This Public License applies for the term of the Copyright and
Similar Rights licensed here. However, if You fail to comply with
this Public License, then Your rights under this Public License
terminate automatically.
b. Where Your right to use the Licensed Material has terminated under
Section 6(a), it reinstates:
1. automatically as of the date the violation is cured, provided
it is cured within 30 days of Your discovery of the
violation; or
2. upon express reinstatement by the Licensor.
For the avoidance of doubt, this Section 6(b) does not affect any
right the Licensor may have to seek remedies for Your violations
of this Public License.
c. For the avoidance of doubt, the Licensor may also offer the
Licensed Material under separate terms or conditions or stop
distributing the Licensed Material at any time; however, doing so
will not terminate this Public License.
d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
License.
Section 7 -- Other Terms and Conditions.
a. The Licensor shall not be bound by any additional or different
terms or conditions communicated by You unless expressly agreed.
b. Any arrangements, understandings, or agreements regarding the
Licensed Material not stated herein are separate from and
independent of the terms and conditions of this Public License.
Section 8 -- Interpretation.
a. For the avoidance of doubt, this Public License does not, and
shall not be interpreted to, reduce, limit, restrict, or impose
conditions on any use of the Licensed Material that could lawfully
be made without permission under this Public License.
b. To the extent possible, if any provision of this Public License is
deemed unenforceable, it shall be automatically reformed to the
minimum extent necessary to make it enforceable. If the provision
cannot be reformed, it shall be severed from this Public License
without affecting the enforceability of the remaining terms and
conditions.
c. No term or condition of this Public License will be waived and no
failure to comply consented to unless expressly agreed to by the
Licensor.
d. Nothing in this Public License constitutes or may be interpreted
as a limitation upon, or waiver of, any privileges and immunities
that apply to the Licensor or You, including from the legal
processes of any jurisdiction or authority.
=======================================================================
Creative Commons is not a party to its public
licenses. Notwithstanding, Creative Commons may elect to apply one of
its public licenses to material it publishes and in those instances
will be considered the “Licensor.” The text of the Creative Commons
public licenses is dedicated to the public domain under the CC0 Public
Domain Dedication. Except for the limited purpose of indicating that
material is shared under a Creative Commons public license or as
otherwise permitted by the Creative Commons policies published at
creativecommons.org/policies, Creative Commons does not authorize the
use of the trademark "Creative Commons" or any other trademark or logo
of Creative Commons without its prior written consent including,
without limitation, in connection with any unauthorized modifications
to any of its public licenses or any other arrangements,
understandings, or agreements concerning use of licensed material. For
the avoidance of doubt, this paragraph does not form part of the
public licenses.
Creative Commons may be contacted at creativecommons.org.
Attribution 4.0 International
=======================================================================
Creative Commons Corporation ("Creative Commons") is not a law firm and
does not provide legal services or legal advice. Distribution of
Creative Commons public licenses does not create a lawyer-client or
other relationship. Creative Commons makes its licenses and related
information available on an "as-is" basis. Creative Commons gives no
warranties regarding its licenses, any material licensed under their
terms and conditions, or any related information. Creative Commons
disclaims all liability for damages resulting from their use to the
fullest extent possible.
Using Creative Commons Public Licenses
Creative Commons public licenses provide a standard set of terms and
conditions that creators and other rights holders may use to share
original works of authorship and other material subject to copyright
and certain other rights specified in the public license below. The
following considerations are for informational purposes only, are not
exhaustive, and do not form part of our licenses.
Considerations for licensors: Our public licenses are
intended for use by those authorized to give the public
permission to use material in ways otherwise restricted by
copyright and certain other rights. Our licenses are
irrevocable. Licensors should read and understand the terms
and conditions of the license they choose before applying it.
Licensors should also secure all rights necessary before
applying our licenses so that the public can reuse the
material as expected. Licensors should clearly mark any
material not subject to the license. This includes other CC-
licensed material, or material used under an exception or
limitation to copyright. More considerations for licensors:
wiki.creativecommons.org/Considerations_for_licensors
Considerations for the public: By using one of our public
licenses, a licensor grants the public permission to use the
licensed material under specified terms and conditions. If
the licensor's permission is not necessary for any reason--for
example, because of any applicable exception or limitation to
copyright--then that use is not regulated by the license. Our
licenses grant only permissions under copyright and certain
other rights that a licensor has authority to grant. Use of
the licensed material may still be restricted for other
reasons, including because others have copyright or other
rights in the material. A licensor may make special requests,
such as asking that all changes be marked or described.
Although not required by our licenses, you are encouraged to
respect those requests where reasonable. More_considerations
for the public:
wiki.creativecommons.org/Considerations_for_licensees
=======================================================================
Creative Commons Attribution 4.0 International Public License
By exercising the Licensed Rights (defined below), You accept and agree
to be bound by the terms and conditions of this Creative Commons
Attribution 4.0 International Public License ("Public License"). To the
extent this Public License may be interpreted as a contract, You are
granted the Licensed Rights in consideration of Your acceptance of
these terms and conditions, and the Licensor grants You such rights in
consideration of benefits the Licensor receives from making the
Licensed Material available under these terms and conditions.
Section 1 -- Definitions.
a. Adapted Material means material subject to Copyright and Similar
Rights that is derived from or based upon the Licensed Material
and in which the Licensed Material is translated, altered,
arranged, transformed, or otherwise modified in a manner requiring
permission under the Copyright and Similar Rights held by the
Licensor. For purposes of this Public License, where the Licensed
Material is a musical work, performance, or sound recording,
Adapted Material is always produced where the Licensed Material is
synched in timed relation with a moving image.
b. Adapter's License means the license You apply to Your Copyright
and Similar Rights in Your contributions to Adapted Material in
accordance with the terms and conditions of this Public License.
c. Copyright and Similar Rights means copyright and/or similar rights
closely related to copyright including, without limitation,
performance, broadcast, sound recording, and Sui Generis Database
Rights, without regard to how the rights are labeled or
categorized. For purposes of this Public License, the rights
specified in Section 2(b)(1)-(2) are not Copyright and Similar
Rights.
d. Effective Technological Measures means those measures that, in the
absence of proper authority, may not be circumvented under laws
fulfilling obligations under Article 11 of the WIPO Copyright
Treaty adopted on December 20, 1996, and/or similar international
agreements.
e. Exceptions and Limitations means fair use, fair dealing, and/or
any other exception or limitation to Copyright and Similar Rights
that applies to Your use of the Licensed Material.
f. Licensed Material means the artistic or literary work, database,
or other material to which the Licensor applied this Public
License.
g. Licensed Rights means the rights granted to You subject to the
terms and conditions of this Public License, which are limited to
all Copyright and Similar Rights that apply to Your use of the
Licensed Material and that the Licensor has authority to license.
h. Licensor means the individual(s) or entity(ies) granting rights
under this Public License.
i. Share means to provide material to the public by any means or
process that requires permission under the Licensed Rights, such
as reproduction, public display, public performance, distribution,
dissemination, communication, or importation, and to make material
available to the public including in ways that members of the
public may access the material from a place and at a time
individually chosen by them.
j. Sui Generis Database Rights means rights other than copyright
resulting from Directive 96/9/EC of the European Parliament and of
the Council of 11 March 1996 on the legal protection of databases,
as amended and/or succeeded, as well as other essentially
equivalent rights anywhere in the world.
k. You means the individual or entity exercising the Licensed Rights
under this Public License. Your has a corresponding meaning.
Section 2 -- Scope.
a. License grant.
1. Subject to the terms and conditions of this Public License,
the Licensor hereby grants You a worldwide, royalty-free,
non-sublicensable, non-exclusive, irrevocable license to
exercise the Licensed Rights in the Licensed Material to:
a. reproduce and Share the Licensed Material, in whole or
in part; and
b. produce, reproduce, and Share Adapted Material.
2. Exceptions and Limitations. For the avoidance of doubt, where
Exceptions and Limitations apply to Your use, this Public
License does not apply, and You do not need to comply with
its terms and conditions.
3. Term. The term of this Public License is specified in Section
6(a).
4. Media and formats; technical modifications allowed. The
Licensor authorizes You to exercise the Licensed Rights in
all media and formats whether now known or hereafter created,
and to make technical modifications necessary to do so. The
Licensor waives and/or agrees not to assert any right or
authority to forbid You from making technical modifications
necessary to exercise the Licensed Rights, including
technical modifications necessary to circumvent Effective
Technological Measures. For purposes of this Public License,
simply making modifications authorized by this Section 2(a)
(4) never produces Adapted Material.
5. Downstream recipients.
a. Offer from the Licensor -- Licensed Material. Every
recipient of the Licensed Material automatically
receives an offer from the Licensor to exercise the
Licensed Rights under the terms and conditions of this
Public License.
b. No downstream restrictions. You may not offer or impose
any additional or different terms or conditions on, or
apply any Effective Technological Measures to, the
Licensed Material if doing so restricts exercise of the
Licensed Rights by any recipient of the Licensed
Material.
6. No endorsement. Nothing in this Public License constitutes or
may be construed as permission to assert or imply that You
are, or that Your use of the Licensed Material is, connected
with, or sponsored, endorsed, or granted official status by,
the Licensor or others designated to receive attribution as
provided in Section 3(a)(1)(A)(i).
b. Other rights.
1. Moral rights, such as the right of integrity, are not
licensed under this Public License, nor are publicity,
privacy, and/or other similar personality rights; however, to
the extent possible, the Licensor waives and/or agrees not to
assert any such rights held by the Licensor to the limited
extent necessary to allow You to exercise the Licensed
Rights, but not otherwise.
2. Patent and trademark rights are not licensed under this
Public License.
3. To the extent possible, the Licensor waives any right to
collect royalties from You for the exercise of the Licensed
Rights, whether directly or through a collecting society
under any voluntary or waivable statutory or compulsory
licensing scheme. In all other cases the Licensor expressly
reserves any right to collect such royalties.
Section 3 -- License Conditions.
Your exercise of the Licensed Rights is expressly made subject to the
following conditions.
a. Attribution.
1. If You Share the Licensed Material (including in modified
form), You must:
a. retain the following if it is supplied by the Licensor
with the Licensed Material:
i. identification of the creator(s) of the Licensed
Material and any others designated to receive
attribution, in any reasonable manner requested by
the Licensor (including by pseudonym if
designated);
ii. a copyright notice;
iii. a notice that refers to this Public License;
iv. a notice that refers to the disclaimer of
warranties;
v. a URI or hyperlink to the Licensed Material to the
extent reasonably practicable;
b. indicate if You modified the Licensed Material and
retain an indication of any previous modifications; and
c. indicate the Licensed Material is licensed under this
Public License, and include the text of, or the URI or
hyperlink to, this Public License.
2. You may satisfy the conditions in Section 3(a)(1) in any
reasonable manner based on the medium, means, and context in
which You Share the Licensed Material. For example, it may be
reasonable to satisfy the conditions by providing a URI or
hyperlink to a resource that includes the required
information.
3. If requested by the Licensor, You must remove any of the
information required by Section 3(a)(1)(A) to the extent
reasonably practicable.
4. If You Share Adapted Material You produce, the Adapter's
License You apply must not prevent recipients of the Adapted
Material from complying with this Public License.
Section 4 -- Sui Generis Database Rights.
Where the Licensed Rights include Sui Generis Database Rights that
apply to Your use of the Licensed Material:
a. for the avoidance of doubt, Section 2(a)(1) grants You the right
to extract, reuse, reproduce, and Share all or a substantial
portion of the contents of the database;
b. if You include all or a substantial portion of the database
contents in a database in which You have Sui Generis Database
Rights, then the database in which You have Sui Generis Database
Rights (but not its individual contents) is Adapted Material; and
c. You must comply with the conditions in Section 3(a) if You Share
all or a substantial portion of the contents of the database.
For the avoidance of doubt, this Section 4 supplements and does not
replace Your obligations under this Public License where the Licensed
Rights include other Copyright and Similar Rights.
Section 5 -- Disclaimer of Warranties and Limitation of Liability.
a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
c. The disclaimer of warranties and limitation of liability provided
above shall be interpreted in a manner that, to the extent
possible, most closely approximates an absolute disclaimer and
waiver of all liability.
Section 6 -- Term and Termination.
a. This Public License applies for the term of the Copyright and
Similar Rights licensed here. However, if You fail to comply with
this Public License, then Your rights under this Public License
terminate automatically.
b. Where Your right to use the Licensed Material has terminated under
Section 6(a), it reinstates:
1. automatically as of the date the violation is cured, provided
it is cured within 30 days of Your discovery of the
violation; or
2. upon express reinstatement by the Licensor.
For the avoidance of doubt, this Section 6(b) does not affect any
right the Licensor may have to seek remedies for Your violations
of this Public License.
c. For the avoidance of doubt, the Licensor may also offer the
Licensed Material under separate terms or conditions or stop
distributing the Licensed Material at any time; however, doing so
will not terminate this Public License.
d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
License.
Section 7 -- Other Terms and Conditions.
a. The Licensor shall not be bound by any additional or different
terms or conditions communicated by You unless expressly agreed.
b. Any arrangements, understandings, or agreements regarding the
Licensed Material not stated herein are separate from and
independent of the terms and conditions of this Public License.
Section 8 -- Interpretation.
a. For the avoidance of doubt, this Public License does not, and
shall not be interpreted to, reduce, limit, restrict, or impose
conditions on any use of the Licensed Material that could lawfully
be made without permission under this Public License.
b. To the extent possible, if any provision of this Public License is
deemed unenforceable, it shall be automatically reformed to the
minimum extent necessary to make it enforceable. If the provision
cannot be reformed, it shall be severed from this Public License
without affecting the enforceability of the remaining terms and
conditions.
c. No term or condition of this Public License will be waived and no
failure to comply consented to unless expressly agreed to by the
Licensor.
d. Nothing in this Public License constitutes or may be interpreted
as a limitation upon, or waiver of, any privileges and immunities
that apply to the Licensor or You, including from the legal
processes of any jurisdiction or authority.
=======================================================================
Creative Commons is not a party to its public
licenses. Notwithstanding, Creative Commons may elect to apply one of
its public licenses to material it publishes and in those instances
will be considered the “Licensor.” The text of the Creative Commons
public licenses is dedicated to the public domain under the CC0 Public
Domain Dedication. Except for the limited purpose of indicating that
material is shared under a Creative Commons public license or as
otherwise permitted by the Creative Commons policies published at
creativecommons.org/policies, Creative Commons does not authorize the
use of the trademark "Creative Commons" or any other trademark or logo
of Creative Commons without its prior written consent including,
without limitation, in connection with any unauthorized modifications
to any of its public licenses or any other arrangements,
understandings, or agreements concerning use of licensed material. For
the avoidance of doubt, this paragraph does not form part of the
public licenses.
Creative Commons may be contacted at creativecommons.org.

View File

@@ -3,15 +3,20 @@
<p align="center">
<img src="imgs/logo.png" alt="Logo">
</p>
<!-- <a href="https://trendshift.io/repositories/12975" target="_blank"><img src="https://trendshift.io/api/badge/repositories/12975" alt="microsoft%2FOmniParser | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a> -->
[![arXiv](https://img.shields.io/badge/Paper-green)](https://arxiv.org/abs/2408.00203)
[![License](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
📢 [[Project Page](https://microsoft.github.io/OmniParser/)] [[Blog Post](https://www.microsoft.com/en-us/research/articles/omniparser-for-pure-vision-based-gui-agent/)] [[Models](https://huggingface.co/microsoft/OmniParser)] [huggingface space](https://huggingface.co/spaces/microsoft/OmniParser)
📢 [[Project Page](https://microsoft.github.io/OmniParser/)] [[V2 Blog Post](https://www.microsoft.com/en-us/research/articles/omniparser-v2-turning-any-llm-into-a-computer-use-agent/)] [[Models V2](https://huggingface.co/microsoft/OmniParser-v2.0)] [[Models V1.5](https://huggingface.co/microsoft/OmniParser)] [[HuggingFace Space Demo](https://huggingface.co/spaces/microsoft/OmniParser-v2)]
**OmniParser** is a comprehensive method for parsing user interface screenshots into structured and easy-to-understand elements, which significantly enhances the ability of GPT-4V to generate actions that can be accurately grounded in the corresponding regions of the interface.
## News
- [2025/3] We support local logging of trajecotry so that you can use OmniParser+OmniTool to build training data pipeline for your favorate agent in your domain. [Documentation WIP]
- [2025/3] We are gradually adding multi agents orchstration and improving user interface in OmniTool for better experience.
- [2025/2] We release OmniParser V2 [checkpoints](https://huggingface.co/microsoft/OmniParser-v2.0). [Watch Video](https://1drv.ms/v/c/650b027c18d5a573/EWXbVESKWo9Buu6OYCwg06wBeoM97C6EOTG6RjvWLEN1Qg?e=alnHGC)
- [2025/2] We introduce OmniTool: Control a Windows 11 VM with OmniParser + your vision model of choice. OmniTool supports out of the box the following large language models - OpenAI (4o/o1/o3-mini), DeepSeek (R1), Qwen (2.5VL) or Anthropic Computer Use. [Watch Video](https://1drv.ms/v/c/650b027c18d5a573/EehZ7RzY69ZHn-MeQHrnnR4BCj3by-cLLpUVlxMjF4O65Q?e=8LxMgX)
- [2025/1] V2 is coming. We achieve new state of the art results 39.5% on the new grounding benchmark [Screen Spot Pro](https://github.com/likaixin2000/ScreenSpot-Pro-GUI-Grounding/tree/main) with OmniParser v2 (will be released soon)! Read more details [here](https://github.com/microsoft/OmniParser/tree/master/docs/Evaluation.md).
- [2024/11] We release an updated version, OmniParser V1.5 which features 1) more fine grained/small icon detection, 2) prediction of whether each screen element is interactable or not. Examples in the demo.ipynb.
- [2024/10] OmniParser was the #1 trending model on huggingface model hub (starting 10/29/2024).
@@ -20,13 +25,22 @@
- [2024/09] OmniParser achieves the best performance on [Windows Agent Arena](https://microsoft.github.io/WindowsAgentArena/)!
## Install
Install environment:
First clone the repo, and then install environment:
```python
cd OmniParser
conda create -n "omni" python==3.12
conda activate omni
pip install -r requirements.txt
```
Ensure you have the V2 weights downloaded in weights folder (ensure caption weights folder is called icon_caption_florence). If not download them with:
```
# download the model checkpoints to local directory OmniParser/weights/
for f in icon_detect/{train_args.yaml,model.pt,model.yaml} icon_caption/{config.json,generation_config.json,model.safetensors}; do huggingface-cli download microsoft/OmniParser-v2.0 "$f" --local-dir weights; done
mv weights/icon_caption weights/icon_caption_florence
```
<!-- ## [deprecated]
Then download the model ckpts files in: https://huggingface.co/microsoft/OmniParser, and put them under weights/, default folder structure is: weights/icon_detect, weights/icon_caption_florence, weights/icon_caption_blip2.
For v1:
@@ -36,7 +50,7 @@ python weights/convert_safetensor_to_pt.py
For v1.5:
download 'model_v1_5.pt' from https://huggingface.co/microsoft/OmniParser/tree/main/icon_detect_v1_5, make a new dir: weights/icon_detect_v1_5, and put it inside the folder. No weight conversion is needed.
```
``` -->
## Examples:
We put together a few simple examples in the demo.ipynb.
@@ -44,10 +58,7 @@ We put together a few simple examples in the demo.ipynb.
## Gradio Demo
To run gradio demo, simply run:
```python
# For v1
python gradio_demo.py --icon_detect_model weights/icon_detect/best.pt --icon_caption_model florence2
# For v1.5
python gradio_demo.py --icon_detect_model weights/icon_detect_v1_5/model_v1_5.pt --icon_caption_model florence2
python gradio_demo.py
```
## Model Weights License

View File

@@ -1,41 +1,41 @@
<!-- BEGIN MICROSOFT SECURITY.MD V0.0.9 BLOCK -->
## Security
Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet) and [Xamarin](https://github.com/xamarin).
If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/security.md/definition), please report it to us as described below.
## Reporting Security Issues
**Please do not report security vulnerabilities through public GitHub issues.**
Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/security.md/msrc/create-report).
If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/security.md/msrc/pgp).
You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc).
Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
* Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
* Full paths of source file(s) related to the manifestation of the issue
* The location of the affected source code (tag/branch/commit or direct URL)
* Any special configuration required to reproduce the issue
* Step-by-step instructions to reproduce the issue
* Proof-of-concept or exploit code (if possible)
* Impact of the issue, including how an attacker might exploit the issue
This information will help us triage your report more quickly.
If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/security.md/msrc/bounty) page for more details about our active programs.
## Preferred Languages
We prefer all communications to be in English.
## Policy
Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/security.md/cvd).
<!-- END MICROSOFT SECURITY.MD BLOCK -->
<!-- BEGIN MICROSOFT SECURITY.MD V0.0.9 BLOCK -->
## Security
Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet) and [Xamarin](https://github.com/xamarin).
If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/security.md/definition), please report it to us as described below.
## Reporting Security Issues
**Please do not report security vulnerabilities through public GitHub issues.**
Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/security.md/msrc/create-report).
If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/security.md/msrc/pgp).
You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc).
Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
* Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
* Full paths of source file(s) related to the manifestation of the issue
* The location of the affected source code (tag/branch/commit or direct URL)
* Any special configuration required to reproduce the issue
* Step-by-step instructions to reproduce the issue
* Proof-of-concept or exploit code (if possible)
* Impact of the issue, including how an attacker might exploit the issue
This information will help us triage your report more quickly.
If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/security.md/msrc/bounty) page for more details about our active programs.
## Preferred Languages
We prefer all communications to be in English.
## Policy
Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/security.md/cvd).
<!-- END MICROSOFT SECURITY.MD BLOCK -->

1625
demo.ipynb

File diff suppressed because one or more lines are too long

View File

@@ -8,12 +8,13 @@ import io
import base64, os
from utils import check_ocr_box, get_yolo_model, get_caption_model_processor, get_som_labeled_img
from util.utils import check_ocr_box, get_yolo_model, get_caption_model_processor, get_som_labeled_img
import torch
from PIL import Image
import argparse
yolo_model = get_yolo_model(model_path='weights/icon_detect/model.pt')
caption_model_processor = get_caption_model_processor(model_name="florence2", model_name_or_path="weights/icon_caption_florence")
# caption_model_processor = get_caption_model_processor(model_name="blip2", model_name_or_path="weights/icon_caption_blip2")
MARKDOWN = """
# OmniParser for Pure Vision Based General GUI Agent 🔥
@@ -36,13 +37,10 @@ def process(
box_threshold,
iou_threshold,
use_paddleocr,
imgsz,
icon_process_batch_size,
imgsz
) -> Optional[Image.Image]:
image_save_path = 'imgs/saved_image_demo.png'
image_input.save(image_save_path)
image = Image.open(image_save_path)
box_overlay_ratio = image.size[0] / 3200
box_overlay_ratio = image_input.size[0] / 3200
draw_bbox_config = {
'text_scale': 0.8 * box_overlay_ratio,
'text_thickness': max(int(2 * box_overlay_ratio), 1),
@@ -51,30 +49,15 @@ def process(
}
# import pdb; pdb.set_trace()
ocr_bbox_rslt, is_goal_filtered = check_ocr_box(image_save_path, display_img = False, output_bb_format='xyxy', goal_filtering=None, easyocr_args={'paragraph': False, 'text_threshold':0.9}, use_paddleocr=use_paddleocr)
ocr_bbox_rslt, is_goal_filtered = check_ocr_box(image_input, display_img = False, output_bb_format='xyxy', goal_filtering=None, easyocr_args={'paragraph': False, 'text_threshold':0.9}, use_paddleocr=use_paddleocr)
text, ocr_bbox = ocr_bbox_rslt
# print('prompt:', prompt)
dino_labled_img, label_coordinates, parsed_content_list = get_som_labeled_img(image_save_path, yolo_model, BOX_TRESHOLD = box_threshold, output_coord_in_ratio=True, ocr_bbox=ocr_bbox,draw_bbox_config=draw_bbox_config, caption_model_processor=caption_model_processor, ocr_text=text,iou_threshold=iou_threshold, imgsz=imgsz, batch_size=icon_process_batch_size)
dino_labled_img, label_coordinates, parsed_content_list = get_som_labeled_img(image_input, yolo_model, BOX_TRESHOLD = box_threshold, output_coord_in_ratio=True, ocr_bbox=ocr_bbox,draw_bbox_config=draw_bbox_config, caption_model_processor=caption_model_processor, ocr_text=text,iou_threshold=iou_threshold, imgsz=imgsz,)
image = Image.open(io.BytesIO(base64.b64decode(dino_labled_img)))
print('finish processing')
# parsed_content_list = '\n'.join(parsed_content_list)
parsed_content_list = '\n'.join([f'type: {x['type']}, content: {x["content"]}, interactivity: {x["interactivity"]}' for x in parsed_content_list])
parsed_content_list = '\n'.join([f'icon {i}: ' + str(v) for i,v in enumerate(parsed_content_list)])
# parsed_content_list = str(parsed_content_list)
return image, str(parsed_content_list)
parser = argparse.ArgumentParser(description='Process model paths and names.')
parser.add_argument('--icon_detect_model', type=str, required=True, default='weights/icon_detect/best.pt', help='Path to the YOLO model weights')
parser.add_argument('--icon_caption_model', type=str, required=True, default='florence2', help='Name of the caption model')
args = parser.parse_args()
icon_detect_model, icon_caption_model = args.icon_detect_model, args.icon_caption_model
yolo_model = get_yolo_model(model_path=icon_detect_model)
if icon_caption_model == 'florence2':
caption_model_processor = get_caption_model_processor(model_name="florence2", model_name_or_path="weights/icon_caption_florence")
elif icon_caption_model == 'blip2':
caption_model_processor = get_caption_model_processor(model_name="blip2", model_name_or_path="weights/icon_caption_blip2")
with gr.Blocks() as demo:
gr.Markdown(MARKDOWN)
with gr.Row():
@@ -88,11 +71,9 @@ with gr.Blocks() as demo:
iou_threshold_component = gr.Slider(
label='IOU Threshold', minimum=0.01, maximum=1.0, step=0.01, value=0.1)
use_paddleocr_component = gr.Checkbox(
label='Use PaddleOCR', value=False)
label='Use PaddleOCR', value=True)
imgsz_component = gr.Slider(
label='Icon Detect Image Size', minimum=640, maximum=3200, step=32, value=1920)
icon_process_batch_size_component = gr.Slider(
label='Icon Process Batch Size', minimum=1, maximum=256, step=1, value=64)
label='Icon Detect Image Size', minimum=640, maximum=1920, step=32, value=640)
submit_button_component = gr.Button(
value='Submit', variant='primary')
with gr.Column():
@@ -106,16 +87,10 @@ with gr.Blocks() as demo:
box_threshold_component,
iou_threshold_component,
use_paddleocr_component,
imgsz_component,
icon_process_batch_size_component
imgsz_component
],
outputs=[image_output_component, text_output_component]
)
# demo.launch(debug=False, show_error=True, share=True)
demo.launch(share=True, server_port=7861, server_name='0.0.0.0')
# python gradio_demo.py --icon_detect_model weights/icon_detect/best.pt --icon_caption_model florence2
# python gradio_demo.py --icon_detect_model weights/icon_detect_v1_5/model_v1_5.pt --icon_caption_model florence2
demo.launch(share=True, server_port=7861, server_name='127.0.0.1')

BIN
imgs/demo_image.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 560 KiB

BIN
imgs/demo_image_som.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 720 KiB

BIN
imgs/gradioicon.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 33 KiB

BIN
imgs/header_bar.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 251 KiB

BIN
imgs/header_bar_thin.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 86 KiB

BIN
imgs/mobile.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 MiB

BIN
imgs/omniboxicon.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.7 KiB

BIN
imgs/omniparsericon.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 786 KiB

After

Width:  |  Height:  |  Size: 147 KiB

BIN
imgs/som_overlaid_omni.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 279 KiB

View File

@@ -1,60 +0,0 @@
from utils import get_som_labeled_img, check_ocr_box, get_caption_model_processor, get_dino_model, get_yolo_model
import torch
from ultralytics import YOLO
from PIL import Image
from typing import Dict, Tuple, List
import io
import base64
config = {
'som_model_path': 'finetuned_icon_detect.pt',
'device': 'cpu',
'caption_model_path': 'Salesforce/blip2-opt-2.7b',
'draw_bbox_config': {
'text_scale': 0.8,
'text_thickness': 2,
'text_padding': 3,
'thickness': 3,
},
'BOX_TRESHOLD': 0.05
}
class Omniparser(object):
def __init__(self, config: Dict):
self.config = config
self.som_model = get_yolo_model(model_path=config['som_model_path'])
# self.caption_model_processor = get_caption_model_processor(config['caption_model_path'], device=cofig['device'])
# self.caption_model_processor['model'].to(torch.float32)
def parse(self, image_path: str):
print('Parsing image:', image_path)
ocr_bbox_rslt, is_goal_filtered = check_ocr_box(image_path, display_img = False, output_bb_format='xyxy', goal_filtering=None, easyocr_args={'paragraph': False, 'text_threshold':0.9})
text, ocr_bbox = ocr_bbox_rslt
draw_bbox_config = self.config['draw_bbox_config']
BOX_TRESHOLD = self.config['BOX_TRESHOLD']
dino_labled_img, label_coordinates, parsed_content_list = get_som_labeled_img(image_path, self.som_model, BOX_TRESHOLD = BOX_TRESHOLD, output_coord_in_ratio=False, ocr_bbox=ocr_bbox,draw_bbox_config=draw_bbox_config, caption_model_processor=None, ocr_text=text,use_local_semantics=False)
image = Image.open(io.BytesIO(base64.b64decode(dino_labled_img)))
# formating output
return_list = [{'from': 'omniparser', 'shape': {'x':coord[0], 'y':coord[1], 'width':coord[2], 'height':coord[3]},
'text': parsed_content_list[i].split(': ')[1], 'type':'text'} for i, (k, coord) in enumerate(label_coordinates.items()) if i < len(parsed_content_list)]
return_list.extend(
[{'from': 'omniparser', 'shape': {'x':coord[0], 'y':coord[1], 'width':coord[2], 'height':coord[3]},
'text': 'None', 'type':'icon'} for i, (k, coord) in enumerate(label_coordinates.items()) if i >= len(parsed_content_list)]
)
return [image, return_list]
parser = Omniparser(config)
image_path = 'examples/pc_1.png'
# time the parser
import time
s = time.time()
image, parsed_content_list = parser.parse(image_path)
device = config['device']
print(f'Time taken for Omniparser on {device}:', time.time() - s)

1
omnitool/gradio/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
tmp/

View File

@@ -0,0 +1,162 @@
"""
Agentic sampling loop that calls the Anthropic API and local implenmentation of anthropic-defined computer use tools.
"""
import asyncio
import platform
from collections.abc import Callable
from datetime import datetime
from enum import StrEnum
from typing import Any, cast
from anthropic import Anthropic, AnthropicBedrock, AnthropicVertex, APIResponse
from anthropic.types import (
ToolResultBlockParam,
)
from anthropic.types.beta import (
BetaContentBlock,
BetaContentBlockParam,
BetaImageBlockParam,
BetaMessage,
BetaMessageParam,
BetaTextBlockParam,
BetaToolResultBlockParam,
)
from anthropic.types import TextBlock
from anthropic.types.beta import BetaMessage, BetaTextBlock, BetaToolUseBlock
from tools import ComputerTool, ToolCollection, ToolResult
from PIL import Image
from io import BytesIO
import gradio as gr
from typing import Dict
BETA_FLAG = "computer-use-2024-10-22"
class APIProvider(StrEnum):
ANTHROPIC = "anthropic"
BEDROCK = "bedrock"
VERTEX = "vertex"
SYSTEM_PROMPT = f"""<SYSTEM_CAPABILITY>
* You are utilizing a Windows system with internet access.
* The current date is {datetime.today().strftime('%A, %B %d, %Y')}.
</SYSTEM_CAPABILITY>
"""
class AnthropicActor:
def __init__(
self,
model: str,
provider: APIProvider,
api_key: str,
api_response_callback: Callable[[APIResponse[BetaMessage]], None],
max_tokens: int = 4096,
only_n_most_recent_images: int | None = None,
print_usage: bool = True,
):
self.model = model
self.provider = provider
self.api_key = api_key
self.api_response_callback = api_response_callback
self.max_tokens = max_tokens
self.only_n_most_recent_images = only_n_most_recent_images
self.tool_collection = ToolCollection(ComputerTool())
self.system = SYSTEM_PROMPT
self.total_token_usage = 0
self.total_cost = 0
self.print_usage = print_usage
# Instantiate the appropriate API client based on the provider
if provider == APIProvider.ANTHROPIC:
self.client = Anthropic(api_key=api_key)
elif provider == APIProvider.VERTEX:
self.client = AnthropicVertex()
elif provider == APIProvider.BEDROCK:
self.client = AnthropicBedrock()
def __call__(
self,
*,
messages: list[BetaMessageParam]
):
"""
Generate a response given history messages.
"""
if self.only_n_most_recent_images:
_maybe_filter_to_n_most_recent_images(messages, self.only_n_most_recent_images)
# Call the API synchronously
raw_response = self.client.beta.messages.with_raw_response.create(
max_tokens=self.max_tokens,
messages=messages,
model=self.model,
system=self.system,
tools=self.tool_collection.to_params(),
betas=["computer-use-2024-10-22"],
)
self.api_response_callback(cast(APIResponse[BetaMessage], raw_response))
response = raw_response.parse()
print(f"AnthropicActor response: {response}")
self.total_token_usage += response.usage.input_tokens + response.usage.output_tokens
self.total_cost += (response.usage.input_tokens * 3 / 1000000 + response.usage.output_tokens * 15 / 1000000)
if self.print_usage:
print(f"Claude total token usage so far: {self.total_token_usage}, total cost so far: $USD{self.total_cost}")
return response
def _maybe_filter_to_n_most_recent_images(
messages: list[BetaMessageParam],
images_to_keep: int,
min_removal_threshold: int = 10,
):
"""
With the assumption that images are screenshots that are of diminishing value as
the conversation progresses, remove all but the final `images_to_keep` tool_result
images in place, with a chunk of min_removal_threshold to reduce the amount we
break the implicit prompt cache.
"""
if images_to_keep is None:
return messages
tool_result_blocks = cast(
list[ToolResultBlockParam],
[
item
for message in messages
for item in (
message["content"] if isinstance(message["content"], list) else []
)
if isinstance(item, dict) and item.get("type") == "tool_result"
],
)
total_images = sum(
1
for tool_result in tool_result_blocks
for content in tool_result.get("content", [])
if isinstance(content, dict) and content.get("type") == "image"
)
images_to_remove = total_images - images_to_keep
# for better cache behavior, we want to remove in chunks
images_to_remove -= images_to_remove % min_removal_threshold
for tool_result in tool_result_blocks:
if isinstance(tool_result.get("content"), list):
new_content = []
for content in tool_result.get("content", []):
if isinstance(content, dict) and content.get("type") == "image":
if images_to_remove > 0:
images_to_remove -= 1
continue
new_content.append(content)
tool_result["content"] = new_content

View File

@@ -0,0 +1,59 @@
from groq import Groq
import os
from .utils import is_image_path
def run_groq_interleaved(messages: list, system: str, model_name: str, api_key: str, max_tokens=256, temperature=0.6):
"""
Run a chat completion through Groq's API, ignoring any images in the messages.
"""
api_key = api_key or os.environ.get("GROQ_API_KEY")
if not api_key:
raise ValueError("GROQ_API_KEY is not set")
client = Groq(api_key=api_key)
# avoid using system messages for R1
final_messages = [{"role": "user", "content": system}]
if isinstance(messages, list):
for item in messages:
if isinstance(item, dict):
# For dict items, concatenate all text content, ignoring images
text_contents = []
for cnt in item["content"]:
if isinstance(cnt, str):
if not is_image_path(cnt): # Skip image paths
text_contents.append(cnt)
else:
text_contents.append(str(cnt))
if text_contents: # Only add if there's text content
message = {"role": "user", "content": " ".join(text_contents)}
final_messages.append(message)
else: # str
message = {"role": "user", "content": item}
final_messages.append(message)
elif isinstance(messages, str):
final_messages.append({"role": "user", "content": messages})
try:
completion = client.chat.completions.create(
model="deepseek-r1-distill-llama-70b",
messages=final_messages,
temperature=0.6,
max_completion_tokens=max_tokens,
top_p=0.95,
stream=False,
reasoning_format="raw"
)
response = completion.choices[0].message.content
final_answer = response.split('</think>\n')[-1] if '</think>' in response else response
final_answer = final_answer.replace("<output>", "").replace("</output>", "")
token_usage = completion.usage.total_tokens
return final_answer, token_usage
except Exception as e:
print(f"Error in interleaved Groq: {e}")
return str(e), 0

View File

@@ -0,0 +1,62 @@
import os
import logging
import base64
import requests
from .utils import is_image_path, encode_image
def run_oai_interleaved(messages: list, system: str, model_name: str, api_key: str, max_tokens=256, temperature=0, provider_base_url: str = "https://api.openai.com/v1"):
headers = {"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}"}
final_messages = [{"role": "system", "content": system}]
if type(messages) == list:
for item in messages:
contents = []
if isinstance(item, dict):
for cnt in item["content"]:
if isinstance(cnt, str):
if is_image_path(cnt) and 'o3-mini' not in model_name:
# 03 mini does not support images
base64_image = encode_image(cnt)
content = {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}}
else:
content = {"type": "text", "text": cnt}
else:
# in this case it is a text block from anthropic
content = {"type": "text", "text": str(cnt)}
contents.append(content)
message = {"role": 'user', "content": contents}
else: # str
contents.append({"type": "text", "text": item})
message = {"role": "user", "content": contents}
final_messages.append(message)
elif isinstance(messages, str):
final_messages = [{"role": "user", "content": messages}]
payload = {
"model": model_name,
"messages": final_messages,
}
if 'o1' in model_name or 'o3-mini' in model_name:
payload['reasoning_effort'] = 'low'
payload['max_completion_tokens'] = max_tokens
else:
payload['max_tokens'] = max_tokens
response = requests.post(
f"{provider_base_url}/chat/completions", headers=headers, json=payload
)
try:
text = response.json()['choices'][0]['message']['content']
token_usage = int(response.json()['usage']['total_tokens'])
return text, token_usage
except Exception as e:
print(f"Error in interleaved openAI: {e}. This may due to your invalid API key. Please check the response: {response.json()} ")
return response.json()

View File

@@ -0,0 +1,44 @@
import requests
import base64
from pathlib import Path
from tools.screen_capture import get_screenshot
from agent.llm_utils.utils import encode_image
OUTPUT_DIR = "./tmp/outputs"
class OmniParserClient:
def __init__(self,
url: str) -> None:
self.url = url
def __call__(self,):
screenshot, screenshot_path = get_screenshot()
screenshot_path = str(screenshot_path)
image_base64 = encode_image(screenshot_path)
response = requests.post(self.url, json={"base64_image": image_base64})
response_json = response.json()
print('omniparser latency:', response_json['latency'])
som_image_data = base64.b64decode(response_json['som_image_base64'])
screenshot_path_uuid = Path(screenshot_path).stem.replace("screenshot_", "")
som_screenshot_path = f"{OUTPUT_DIR}/screenshot_som_{screenshot_path_uuid}.png"
with open(som_screenshot_path, "wb") as f:
f.write(som_image_data)
response_json['width'] = screenshot.size[0]
response_json['height'] = screenshot.size[1]
response_json['original_screenshot_base64'] = image_base64
response_json['screenshot_uuid'] = screenshot_path_uuid
response_json = self.reformat_messages(response_json)
return response_json
def reformat_messages(self, response_json: dict):
screen_info = ""
for idx, element in enumerate(response_json["parsed_content_list"]):
element['idx'] = idx
if element['type'] == 'text':
screen_info += f'ID: {idx}, Text: {element["content"]}\n'
elif element['type'] == 'icon':
screen_info += f'ID: {idx}, Icon: {element["content"]}\n'
response_json['screen_info'] = screen_info
return response_json

View File

@@ -0,0 +1,13 @@
import base64
def is_image_path(text):
image_extensions = (".jpg", ".jpeg", ".png", ".gif", ".bmp", ".tiff", ".tif")
if text.endswith(image_extensions):
return True
else:
return False
def encode_image(image_path):
"""Encode image file to base64."""
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode("utf-8")

View File

@@ -0,0 +1,353 @@
import json
from collections.abc import Callable
from typing import cast, Callable
import uuid
from PIL import Image, ImageDraw
import base64
from io import BytesIO
from anthropic import APIResponse
from anthropic.types import ToolResultBlockParam
from anthropic.types.beta import BetaMessage, BetaTextBlock, BetaToolUseBlock, BetaMessageParam, BetaUsage
from agent.llm_utils.oaiclient import run_oai_interleaved
from agent.llm_utils.groqclient import run_groq_interleaved
from agent.llm_utils.utils import is_image_path
import time
import re
OUTPUT_DIR = "./tmp/outputs"
def extract_data(input_string, data_type):
# Regular expression to extract content starting from '```python' until the end if there are no closing backticks
pattern = f"```{data_type}" + r"(.*?)(```|$)"
# Extract content
# re.DOTALL allows '.' to match newlines as well
matches = re.findall(pattern, input_string, re.DOTALL)
# Return the first match if exists, trimming whitespace and ignoring potential closing backticks
return matches[0][0].strip() if matches else input_string
class VLMAgent:
def __init__(
self,
model: str,
provider: str,
api_key: str,
output_callback: Callable,
api_response_callback: Callable,
max_tokens: int = 4096,
only_n_most_recent_images: int | None = None,
print_usage: bool = True,
):
if model == "omniparser + gpt-4o":
self.model = "gpt-4o-2024-11-20"
elif model == "omniparser + R1":
self.model = "deepseek-r1-distill-llama-70b"
elif model == "omniparser + qwen2.5vl":
self.model = "qwen2.5-vl-72b-instruct"
elif model == "omniparser + o1":
self.model = "o1"
elif model == "omniparser + o3-mini":
self.model = "o3-mini"
else:
raise ValueError(f"Model {model} not supported")
self.provider = provider
self.api_key = api_key
self.api_response_callback = api_response_callback
self.max_tokens = max_tokens
self.only_n_most_recent_images = only_n_most_recent_images
self.output_callback = output_callback
self.print_usage = print_usage
self.total_token_usage = 0
self.total_cost = 0
self.step_count = 0
self.system = ''
def __call__(self, messages: list, parsed_screen: list[str, list, dict]):
self.step_count += 1
image_base64 = parsed_screen['original_screenshot_base64']
latency_omniparser = parsed_screen['latency']
self.output_callback(f'-- Step {self.step_count}: --', sender="bot")
screen_info = str(parsed_screen['screen_info'])
screenshot_uuid = parsed_screen['screenshot_uuid']
screen_width, screen_height = parsed_screen['width'], parsed_screen['height']
boxids_and_labels = parsed_screen["screen_info"]
system = self._get_system_prompt(boxids_and_labels)
# drop looping actions msg, byte image etc
planner_messages = messages
_remove_som_images(planner_messages)
_maybe_filter_to_n_most_recent_images(planner_messages, self.only_n_most_recent_images)
if isinstance(planner_messages[-1], dict):
if not isinstance(planner_messages[-1]["content"], list):
planner_messages[-1]["content"] = [planner_messages[-1]["content"]]
planner_messages[-1]["content"].append(f"{OUTPUT_DIR}/screenshot_{screenshot_uuid}.png")
planner_messages[-1]["content"].append(f"{OUTPUT_DIR}/screenshot_som_{screenshot_uuid}.png")
start = time.time()
if "gpt" in self.model or "o1" in self.model or "o3-mini" in self.model:
vlm_response, token_usage = run_oai_interleaved(
messages=planner_messages,
system=system,
model_name=self.model,
api_key=self.api_key,
max_tokens=self.max_tokens,
provider_base_url="https://api.openai.com/v1",
temperature=0,
)
print(f"oai token usage: {token_usage}")
self.total_token_usage += token_usage
if 'gpt' in self.model:
self.total_cost += (token_usage * 2.5 / 1000000) # https://openai.com/api/pricing/
elif 'o1' in self.model:
self.total_cost += (token_usage * 15 / 1000000) # https://openai.com/api/pricing/
elif 'o3-mini' in self.model:
self.total_cost += (token_usage * 1.1 / 1000000) # https://openai.com/api/pricing/
elif "r1" in self.model:
vlm_response, token_usage = run_groq_interleaved(
messages=planner_messages,
system=system,
model_name=self.model,
api_key=self.api_key,
max_tokens=self.max_tokens,
)
print(f"groq token usage: {token_usage}")
self.total_token_usage += token_usage
self.total_cost += (token_usage * 0.99 / 1000000)
elif "qwen" in self.model:
vlm_response, token_usage = run_oai_interleaved(
messages=planner_messages,
system=system,
model_name=self.model,
api_key=self.api_key,
max_tokens=min(2048, self.max_tokens),
provider_base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
temperature=0,
)
print(f"qwen token usage: {token_usage}")
self.total_token_usage += token_usage
self.total_cost += (token_usage * 2.2 / 1000000) # https://help.aliyun.com/zh/model-studio/getting-started/models?spm=a2c4g.11186623.0.0.74b04823CGnPv7#fe96cfb1a422a
else:
raise ValueError(f"Model {self.model} not supported")
latency_vlm = time.time() - start
self.output_callback(f"LLM: {latency_vlm:.2f}s, OmniParser: {latency_omniparser:.2f}s", sender="bot")
print(f"{vlm_response}")
if self.print_usage:
print(f"Total token so far: {self.total_token_usage}. Total cost so far: $USD{self.total_cost:.5f}")
vlm_response_json = extract_data(vlm_response, "json")
vlm_response_json = json.loads(vlm_response_json)
img_to_show_base64 = parsed_screen["som_image_base64"]
if "Box ID" in vlm_response_json:
try:
bbox = parsed_screen["parsed_content_list"][int(vlm_response_json["Box ID"])]["bbox"]
vlm_response_json["box_centroid_coordinate"] = [int((bbox[0] + bbox[2]) / 2 * screen_width), int((bbox[1] + bbox[3]) / 2 * screen_height)]
img_to_show_data = base64.b64decode(img_to_show_base64)
img_to_show = Image.open(BytesIO(img_to_show_data))
draw = ImageDraw.Draw(img_to_show)
x, y = vlm_response_json["box_centroid_coordinate"]
radius = 10
draw.ellipse((x - radius, y - radius, x + radius, y + radius), fill='red')
draw.ellipse((x - radius*3, y - radius*3, x + radius*3, y + radius*3), fill=None, outline='red', width=2)
buffered = BytesIO()
img_to_show.save(buffered, format="PNG")
img_to_show_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
except:
print(f"Error parsing: {vlm_response_json}")
pass
self.output_callback(f'<img src="data:image/png;base64,{img_to_show_base64}">', sender="bot")
self.output_callback(
f'<details>'
f' <summary>Parsed Screen elemetns by OmniParser</summary>'
f' <pre>{screen_info}</pre>'
f'</details>',
sender="bot"
)
vlm_plan_str = ""
for key, value in vlm_response_json.items():
if key == "Reasoning":
vlm_plan_str += f'{value}'
else:
vlm_plan_str += f'\n{key}: {value}'
# construct the response so that anthropicExcutor can execute the tool
response_content = [BetaTextBlock(text=vlm_plan_str, type='text')]
if 'box_centroid_coordinate' in vlm_response_json:
move_cursor_block = BetaToolUseBlock(id=f'toolu_{uuid.uuid4()}',
input={'action': 'mouse_move', 'coordinate': vlm_response_json["box_centroid_coordinate"]},
name='computer', type='tool_use')
response_content.append(move_cursor_block)
if vlm_response_json["Next Action"] == "None":
print("Task paused/completed.")
elif vlm_response_json["Next Action"] == "type":
sim_content_block = BetaToolUseBlock(id=f'toolu_{uuid.uuid4()}',
input={'action': vlm_response_json["Next Action"], 'text': vlm_response_json["value"]},
name='computer', type='tool_use')
response_content.append(sim_content_block)
else:
sim_content_block = BetaToolUseBlock(id=f'toolu_{uuid.uuid4()}',
input={'action': vlm_response_json["Next Action"]},
name='computer', type='tool_use')
response_content.append(sim_content_block)
response_message = BetaMessage(id=f'toolu_{uuid.uuid4()}', content=response_content, model='', role='assistant', type='message', stop_reason='tool_use', usage=BetaUsage(input_tokens=0, output_tokens=0))
return response_message, vlm_response_json
def _api_response_callback(self, response: APIResponse):
self.api_response_callback(response)
def _get_system_prompt(self, screen_info: str = ""):
main_section = f"""
You are using a Windows device.
You are able to use a mouse and keyboard to interact with the computer based on the given task and screenshot.
You can only interact with the desktop GUI (no terminal or application menu access).
You may be given some history plan and actions, this is the response from the previous loop.
You should carefully consider your plan base on the task, screenshot, and history actions.
Here is the list of all detected bounding boxes by IDs on the screen and their description:{screen_info}
Your available "Next Action" only include:
- type: types a string of text.
- left_click: move mouse to box id and left clicks.
- right_click: move mouse to box id and right clicks.
- double_click: move mouse to box id and double clicks.
- hover: move mouse to box id.
- scroll_up: scrolls the screen up to view previous content.
- scroll_down: scrolls the screen down, when the desired button is not visible, or you need to see more content.
- wait: waits for 1 second for the device to load or respond.
Based on the visual information from the screenshot image and the detected bounding boxes, please determine the next action, the Box ID you should operate on (if action is one of 'type', 'hover', 'scroll_up', 'scroll_down', 'wait', there should be no Box ID field), and the value (if the action is 'type') in order to complete the task.
Output format:
```json
{{
"Reasoning": str, # describe what is in the current screen, taking into account the history, then describe your step-by-step thoughts on how to achieve the task, choose one action from available actions at a time.
"Next Action": "action_type, action description" | "None" # one action at a time, describe it in short and precisely.
"Box ID": n,
"value": "xxx" # only provide value field if the action is type, else don't include value key
}}
```
One Example:
```json
{{
"Reasoning": "The current screen shows google result of amazon, in previous action I have searched amazon on google. Then I need to click on the first search results to go to amazon.com.",
"Next Action": "left_click",
"Box ID": m
}}
```
Another Example:
```json
{{
"Reasoning": "The current screen shows the front page of amazon. There is no previous action. Therefore I need to type "Apple watch" in the search bar.",
"Next Action": "type",
"Box ID": n,
"value": "Apple watch"
}}
```
Another Example:
```json
{{
"Reasoning": "The current screen does not show 'submit' button, I need to scroll down to see if the button is available.",
"Next Action": "scroll_down",
}}
```
IMPORTANT NOTES:
1. You should only give a single action at a time.
"""
thinking_model = "r1" in self.model
if not thinking_model:
main_section += """
2. You should give an analysis to the current screen, and reflect on what has been done by looking at the history, then describe your step-by-step thoughts on how to achieve the task.
"""
else:
main_section += """
2. In <think> XML tags give an analysis to the current screen, and reflect on what has been done by looking at the history, then describe your step-by-step thoughts on how to achieve the task. In <output> XML tags put the next action prediction JSON.
"""
main_section += """
3. Attach the next action prediction in the "Next Action".
4. You should not include other actions, such as keyboard shortcuts.
5. When the task is completed, don't complete additional actions. You should say "Next Action": "None" in the json field.
6. The tasks involve buying multiple products or navigating through multiple pages. You should break it into subgoals and complete each subgoal one by one in the order of the instructions.
7. avoid choosing the same action/elements multiple times in a row, if it happens, reflect to yourself, what may have gone wrong, and predict a different action.
8. If you are prompted with login information page or captcha page, or you think it need user's permission to do the next action, you should say "Next Action": "None" in the json field.
"""
return main_section
def _remove_som_images(messages):
for msg in messages:
msg_content = msg["content"]
if isinstance(msg_content, list):
msg["content"] = [
cnt for cnt in msg_content
if not (isinstance(cnt, str) and 'som' in cnt and is_image_path(cnt))
]
def _maybe_filter_to_n_most_recent_images(
messages: list[BetaMessageParam],
images_to_keep: int,
min_removal_threshold: int = 10,
):
"""
With the assumption that images are screenshots that are of diminishing value as
the conversation progresses, remove all but the final `images_to_keep` tool_result
images in place
"""
if images_to_keep is None:
return messages
total_images = 0
for msg in messages:
for cnt in msg.get("content", []):
if isinstance(cnt, str) and is_image_path(cnt):
total_images += 1
elif isinstance(cnt, dict) and cnt.get("type") == "tool_result":
for content in cnt.get("content", []):
if isinstance(content, dict) and content.get("type") == "image":
total_images += 1
images_to_remove = total_images - images_to_keep
for msg in messages:
msg_content = msg["content"]
if isinstance(msg_content, list):
new_content = []
for cnt in msg_content:
# Remove images from SOM or screenshot as needed
if isinstance(cnt, str) and is_image_path(cnt):
if images_to_remove > 0:
images_to_remove -= 1
continue
# VLM shouldn't use anthropic screenshot tool so shouldn't have these but in case it does, remove as needed
elif isinstance(cnt, dict) and cnt.get("type") == "tool_result":
new_tool_result_content = []
for tool_result_entry in cnt.get("content", []):
if isinstance(tool_result_entry, dict) and tool_result_entry.get("type") == "image":
if images_to_remove > 0:
images_to_remove -= 1
continue
new_tool_result_content.append(tool_result_entry)
cnt["content"] = new_tool_result_content
# Append fixed content to current message's content list
new_content.append(cnt)
msg["content"] = new_content

View File

@@ -0,0 +1,498 @@
import json
from collections.abc import Callable
from typing import cast, Callable
import uuid
from PIL import Image, ImageDraw
import base64
from io import BytesIO
import copy
from pathlib import Path
from datetime import datetime
from anthropic import APIResponse
from anthropic.types import ToolResultBlockParam
from anthropic.types.beta import BetaMessage, BetaTextBlock, BetaToolUseBlock, BetaMessageParam, BetaUsage
from agent.llm_utils.oaiclient import run_oai_interleaved
from agent.llm_utils.groqclient import run_groq_interleaved
from agent.llm_utils.utils import is_image_path
import time
import re
import os
OUTPUT_DIR = "./tmp/outputs"
ORCHESTRATOR_LEDGER_PROMPT = """
Recall we are working on the following request:
{task}
To make progress on the request, please answer the following questions, including necessary reasoning:
- Is the request fully satisfied? (True if complete, or False if the original request has yet to be SUCCESSFULLY and FULLY addressed)
- Are we in a loop where we are repeating the same requests and / or getting the same responses as before? Loops can span multiple turns, and can include repeated actions like scrolling up or down more than a handful of times.
- Are we making forward progress? (True if just starting, or recent messages are adding value. False if recent messages show evidence of being stuck in a loop or if there is evidence of significant barriers to success such as the inability to read from a required file)
- What instruction or question would you give in order to complete the task?
Please output an answer in pure JSON format according to the following schema. The JSON object must be parsable as-is. DO NOT OUTPUT ANYTHING OTHER THAN JSON, AND DO NOT DEVIATE FROM THIS SCHEMA:
{{
"is_request_satisfied": {{
"reason": string,
"answer": boolean
}},
"is_in_loop": {{
"reason": string,
"answer": boolean
}},
"is_progress_being_made": {{
"reason": string,
"answer": boolean
}},
"instruction_or_question": {{
"reason": string,
"answer": string
}}
}}
"""
def extract_data(input_string, data_type):
# Regular expression to extract content starting from '```python' until the end if there are no closing backticks
pattern = f"```{data_type}" + r"(.*?)(```|$)"
# Extract content
# re.DOTALL allows '.' to match newlines as well
matches = re.findall(pattern, input_string, re.DOTALL)
# Return the first match if exists, trimming whitespace and ignoring potential closing backticks
return matches[0][0].strip() if matches else input_string
class VLMOrchestratedAgent:
def __init__(
self,
model: str,
provider: str,
api_key: str,
output_callback: Callable,
api_response_callback: Callable,
max_tokens: int = 4096,
only_n_most_recent_images: int | None = None,
print_usage: bool = True,
save_folder: str = None,
):
if model == "omniparser + gpt-4o" or model == "omniparser + gpt-4o-orchestrated":
self.model = "gpt-4o-2024-11-20"
elif model == "omniparser + R1" or model == "omniparser + R1-orchestrated":
self.model = "deepseek-r1-distill-llama-70b"
elif model == "omniparser + qwen2.5vl" or model == "omniparser + qwen2.5vl-orchestrated":
self.model = "qwen2.5-vl-72b-instruct"
elif model == "omniparser + o1" or model == "omniparser + o1-orchestrated":
self.model = "o1"
elif model == "omniparser + o3-mini" or model == "omniparser + o3-mini-orchestrated":
self.model = "o3-mini"
else:
raise ValueError(f"Model {model} not supported")
self.provider = provider
self.api_key = api_key
self.api_response_callback = api_response_callback
self.max_tokens = max_tokens
self.only_n_most_recent_images = only_n_most_recent_images
self.output_callback = output_callback
self.save_folder = save_folder
self.print_usage = print_usage
self.total_token_usage = 0
self.total_cost = 0
self.step_count = 0
self.plan, self.ledger = None, None
self.system = ''
def __call__(self, messages: list, parsed_screen: list[str, list, dict]):
if self.step_count == 0:
plan = self._initialize_task(messages)
self.output_callback(f'-- Plan: {plan} --', )
# update messages with the plan
messages.append({"role": "assistant", "content": plan})
else:
updated_ledger = self._update_ledger(messages)
self.output_callback(
f'<details>'
f' <summary><strong>Task Progress Ledger (click to expand)</strong></summary>'
f' <div style="padding: 10px; background-color: #f8f9fa; border-radius: 5px; margin-top: 5px;">'
f' <pre>{updated_ledger}</pre>'
f' </div>'
f'</details>',
)
# update messages with the ledger
messages.append({"role": "assistant", "content": updated_ledger})
self.ledger = updated_ledger
self.step_count += 1
# save the image to the output folder
with open(f"{self.save_folder}/screenshot_{self.step_count}.png", "wb") as f:
f.write(base64.b64decode(parsed_screen['original_screenshot_base64']))
with open(f"{self.save_folder}/som_screenshot_{self.step_count}.png", "wb") as f:
f.write(base64.b64decode(parsed_screen['som_image_base64']))
latency_omniparser = parsed_screen['latency']
screen_info = str(parsed_screen['screen_info'])
screenshot_uuid = parsed_screen['screenshot_uuid']
screen_width, screen_height = parsed_screen['width'], parsed_screen['height']
boxids_and_labels = parsed_screen["screen_info"]
system = self._get_system_prompt(boxids_and_labels)
# drop looping actions msg, byte image etc
planner_messages = messages
_remove_som_images(planner_messages)
_maybe_filter_to_n_most_recent_images(planner_messages, self.only_n_most_recent_images)
if isinstance(planner_messages[-1], dict):
if not isinstance(planner_messages[-1]["content"], list):
planner_messages[-1]["content"] = [planner_messages[-1]["content"]]
planner_messages[-1]["content"].append(f"{OUTPUT_DIR}/screenshot_{screenshot_uuid}.png")
planner_messages[-1]["content"].append(f"{OUTPUT_DIR}/screenshot_som_{screenshot_uuid}.png")
start = time.time()
if "gpt" in self.model or "o1" in self.model or "o3-mini" in self.model:
vlm_response, token_usage = run_oai_interleaved(
messages=planner_messages,
system=system,
model_name=self.model,
api_key=self.api_key,
max_tokens=self.max_tokens,
provider_base_url="https://api.openai.com/v1",
temperature=0,
)
print(f"oai token usage: {token_usage}")
self.total_token_usage += token_usage
if 'gpt' in self.model:
self.total_cost += (token_usage * 2.5 / 1000000) # https://openai.com/api/pricing/
elif 'o1' in self.model:
self.total_cost += (token_usage * 15 / 1000000) # https://openai.com/api/pricing/
elif 'o3-mini' in self.model:
self.total_cost += (token_usage * 1.1 / 1000000) # https://openai.com/api/pricing/
elif "r1" in self.model:
vlm_response, token_usage = run_groq_interleaved(
messages=planner_messages,
system=system,
model_name=self.model,
api_key=self.api_key,
max_tokens=self.max_tokens,
)
print(f"groq token usage: {token_usage}")
self.total_token_usage += token_usage
self.total_cost += (token_usage * 0.99 / 1000000)
elif "qwen" in self.model:
vlm_response, token_usage = run_oai_interleaved(
messages=planner_messages,
system=system,
model_name=self.model,
api_key=self.api_key,
max_tokens=min(2048, self.max_tokens),
provider_base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
temperature=0,
)
print(f"qwen token usage: {token_usage}")
self.total_token_usage += token_usage
self.total_cost += (token_usage * 2.2 / 1000000) # https://help.aliyun.com/zh/model-studio/getting-started/models?spm=a2c4g.11186623.0.0.74b04823CGnPv7#fe96cfb1a422a
else:
raise ValueError(f"Model {self.model} not supported")
latency_vlm = time.time() - start
# Update step counter with both latencies
self.output_callback(f'<i>Step {self.step_count} | OmniParser: {latency_omniparser:.2f}s | LLM: {latency_vlm:.2f}s</i>', )
print(f"{vlm_response}")
if self.print_usage:
print(f"Total token so far: {self.total_token_usage}. Total cost so far: $USD{self.total_cost:.5f}")
vlm_response_json = extract_data(vlm_response, "json")
vlm_response_json = json.loads(vlm_response_json)
img_to_show_base64 = parsed_screen["som_image_base64"]
if "Box ID" in vlm_response_json:
try:
bbox = parsed_screen["parsed_content_list"][int(vlm_response_json["Box ID"])]["bbox"]
vlm_response_json["box_centroid_coordinate"] = [int((bbox[0] + bbox[2]) / 2 * screen_width), int((bbox[1] + bbox[3]) / 2 * screen_height)]
img_to_show_data = base64.b64decode(img_to_show_base64)
img_to_show = Image.open(BytesIO(img_to_show_data))
draw = ImageDraw.Draw(img_to_show)
x, y = vlm_response_json["box_centroid_coordinate"]
radius = 10
draw.ellipse((x - radius, y - radius, x + radius, y + radius), fill='red')
draw.ellipse((x - radius*3, y - radius*3, x + radius*3, y + radius*3), fill=None, outline='red', width=2)
buffered = BytesIO()
img_to_show.save(buffered, format="PNG")
img_to_show_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
except:
print(f"Error parsing: {vlm_response_json}")
pass
self.output_callback(f'<img src="data:image/png;base64,{img_to_show_base64}">', )
# Display screen info in a collapsible dropdown
self.output_callback(
f'<details>'
f' <summary><strong>Parsed Screen Elements (click to expand)</strong></summary>'
f' <div style="padding: 10px; background-color: #f8f9fa; border-radius: 5px; margin-top: 5px;">'
f' <pre>{screen_info}</pre>'
f' </div>'
f'</details>',
)
vlm_plan_str = ""
for key, value in vlm_response_json.items():
if key == "Reasoning":
vlm_plan_str += f'{value}'
else:
vlm_plan_str += f'\n{key}: {value}'
# construct the response so that anthropicExcutor can execute the tool
response_content = [BetaTextBlock(text=vlm_plan_str, type='text')]
if 'box_centroid_coordinate' in vlm_response_json:
move_cursor_block = BetaToolUseBlock(id=f'toolu_{uuid.uuid4()}',
input={'action': 'mouse_move', 'coordinate': vlm_response_json["box_centroid_coordinate"]},
name='computer', type='tool_use')
response_content.append(move_cursor_block)
if vlm_response_json["Next Action"] == "None":
print("Task paused/completed.")
elif vlm_response_json["Next Action"] == "type":
sim_content_block = BetaToolUseBlock(id=f'toolu_{uuid.uuid4()}',
input={'action': vlm_response_json["Next Action"], 'text': vlm_response_json["value"]},
name='computer', type='tool_use')
response_content.append(sim_content_block)
else:
sim_content_block = BetaToolUseBlock(id=f'toolu_{uuid.uuid4()}',
input={'action': vlm_response_json["Next Action"]},
name='computer', type='tool_use')
response_content.append(sim_content_block)
response_message = BetaMessage(id=f'toolu_{uuid.uuid4()}', content=response_content, model='', role='assistant', type='message', stop_reason='tool_use', usage=BetaUsage(input_tokens=0, output_tokens=0))
# save the intermediate step trajectory to the save folder
step_trajectory = {
"screenshot_path": f"{self.save_folder}/screenshot_{self.step_count}.png",
"som_screenshot_path": f"{self.save_folder}/som_screenshot_{self.step_count}.png",
"screen_info": screen_info,
"latency_omniparser": latency_omniparser,
"latency_vlm": latency_vlm,
"vlm_response_json": vlm_response_json,
'ledger': self.ledger,
}
with open(f"{self.save_folder}/trajectory.json", "a") as f:
f.write(json.dumps(step_trajectory))
f.write("\n")
return response_message, vlm_response_json
def _api_response_callback(self, response: APIResponse):
self.api_response_callback(response)
def _get_system_prompt(self, screen_info: str = ""):
main_section = f"""
You are using a Windows device.
You are able to use a mouse and keyboard to interact with the computer based on the given task and screenshot.
You can only interact with the desktop GUI (no terminal or application menu access).
You may be given some history plan and actions, this is the response from the previous loop.
You should carefully consider your plan base on the task, screenshot, and history actions.
Here is the list of all detected bounding boxes by IDs on the screen and their description:{screen_info}
Your available "Next Action" only include:
- type: types a string of text.
- left_click: move mouse to box id and left clicks.
- right_click: move mouse to box id and right clicks.
- double_click: move mouse to box id and double clicks.
- hover: move mouse to box id.
- scroll_up: scrolls the screen up to view previous content.
- scroll_down: scrolls the screen down, when the desired button is not visible, or you need to see more content.
- wait: waits for 1 second for the device to load or respond.
Based on the visual information from the screenshot image and the detected bounding boxes, please determine the next action, the Box ID you should operate on (if action is one of 'type', 'hover', 'scroll_up', 'scroll_down', 'wait', there should be no Box ID field), and the value (if the action is 'type') in order to complete the task.
Output format:
```json
{{
"Reasoning": str, # describe what is in the current screen, taking into account the history, then describe your step-by-step thoughts on how to achieve the task, choose one action from available actions at a time.
"Next Action": "action_type, action description" | "None" # one action at a time, describe it in short and precisely.
"Box ID": n,
"value": "xxx" # only provide value field if the action is type, else don't include value key
}}
```
One Example:
```json
{{
"Reasoning": "The current screen shows google result of amazon, in previous action I have searched amazon on google. Then I need to click on the first search results to go to amazon.com.",
"Next Action": "left_click",
"Box ID": m
}}
```
Another Example:
```json
{{
"Reasoning": "The current screen shows the front page of amazon. There is no previous action. Therefore I need to type "Apple watch" in the search bar.",
"Next Action": "type",
"Box ID": n,
"value": "Apple watch"
}}
```
Another Example:
```json
{{
"Reasoning": "The current screen does not show 'submit' button, I need to scroll down to see if the button is available.",
"Next Action": "scroll_down",
}}
```
IMPORTANT NOTES:
1. You should only give a single action at a time.
"""
thinking_model = "r1" in self.model
if not thinking_model:
main_section += """
2. You should give an analysis to the current screen, and reflect on what has been done by looking at the history, then describe your step-by-step thoughts on how to achieve the task.
"""
else:
main_section += """
2. In <think> XML tags give an analysis to the current screen, and reflect on what has been done by looking at the history, then describe your step-by-step thoughts on how to achieve the task. In <output> XML tags put the next action prediction JSON.
"""
main_section += """
3. Attach the next action prediction in the "Next Action".
4. You should not include other actions, such as keyboard shortcuts.
5. When the task is completed, don't complete additional actions. You should say "Next Action": "None" in the json field.
6. The tasks involve buying multiple products or navigating through multiple pages. You should break it into subgoals and complete each subgoal one by one in the order of the instructions.
7. avoid choosing the same action/elements multiple times in a row, if it happens, reflect to yourself, what may have gone wrong, and predict a different action.
8. If you are prompted with login information page or captcha page, or you think it need user's permission to do the next action, you should say "Next Action": "None" in the json field.
"""
return main_section
def _initialize_task(self, messages: list):
self._task = messages[0]["content"]
# make a plan
plan_prompt = self._get_plan_prompt(self._task)
input_message = copy.deepcopy(messages)
input_message.append({"role": "user", "content": plan_prompt})
vlm_response, token_usage = run_oai_interleaved(
messages=input_message,
system="",
model_name=self.model,
api_key=self.api_key,
max_tokens=self.max_tokens,
provider_base_url="https://api.openai.com/v1",
temperature=0,
)
plan = extract_data(vlm_response, "json")
# Create a filename with timestamp
plan_filename = f"plan.json"
plan_path = os.path.join(self.save_folder, plan_filename)
# Save the plan to a file
try:
with open(plan_path, "w") as f:
f.write(plan)
print(f"Plan successfully saved to {plan_path}")
except Exception as e:
print(f"Error saving plan to {plan_path}: {str(e)}")
return plan
def _update_ledger(self, messages):
# tobe implemented
# update the ledger with the current task and plan
# return the updated ledger
update_ledger_prompt = ORCHESTRATOR_LEDGER_PROMPT.format(task=self._task)
input_message = copy.deepcopy(messages)
input_message.append({"role": "user", "content": update_ledger_prompt})
vlm_response, token_usage = run_oai_interleaved(
messages=input_message,
system="",
model_name=self.model,
api_key=self.api_key,
max_tokens=self.max_tokens,
provider_base_url="https://api.openai.com/v1",
temperature=0,
)
updated_ledger = extract_data(vlm_response, "json")
return updated_ledger
def _get_plan_prompt(self, task):
plan_prompt = f"""
please devise a short bullet-point plan for addressing the original user task: {task}
You should write your plan in a json dict, e.g:```json
{{
'step 1': xxx,
'step 2': xxxx,
...
}}```
Now start your answer directly.
"""
return plan_prompt
def _remove_som_images(messages):
for msg in messages:
msg_content = msg["content"]
if isinstance(msg_content, list):
msg["content"] = [
cnt for cnt in msg_content
if not (isinstance(cnt, str) and 'som' in cnt and is_image_path(cnt))
]
def _maybe_filter_to_n_most_recent_images(
messages: list[BetaMessageParam],
images_to_keep: int,
min_removal_threshold: int = 10,
):
"""
With the assumption that images are screenshots that are of diminishing value as
the conversation progresses, remove all but the final `images_to_keep` tool_result
images in place
"""
if images_to_keep is None:
return messages
total_images = 0
for msg in messages:
for cnt in msg.get("content", []):
if isinstance(cnt, str) and is_image_path(cnt):
total_images += 1
elif isinstance(cnt, dict) and cnt.get("type") == "tool_result":
for content in cnt.get("content", []):
if isinstance(content, dict) and content.get("type") == "image":
total_images += 1
images_to_remove = total_images - images_to_keep
for msg in messages:
msg_content = msg["content"]
if isinstance(msg_content, list):
new_content = []
for cnt in msg_content:
# Remove images from SOM or screenshot as needed
if isinstance(cnt, str) and is_image_path(cnt):
if images_to_remove > 0:
images_to_remove -= 1
continue
# VLM shouldn't use anthropic screenshot tool so shouldn't have these but in case it does, remove as needed
elif isinstance(cnt, dict) and cnt.get("type") == "tool_result":
new_tool_result_content = []
for tool_result_entry in cnt.get("content", []):
if isinstance(tool_result_entry, dict) and tool_result_entry.get("type") == "image":
if images_to_remove > 0:
images_to_remove -= 1
continue
new_tool_result_content.append(tool_result_entry)
cnt["content"] = new_tool_result_content
# Append fixed content to current message's content list
new_content.append(cnt)
msg["content"] = new_content

426
omnitool/gradio/app.py Normal file
View File

@@ -0,0 +1,426 @@
"""
python app.py --windows_host_url localhost:8006 --omniparser_server_url localhost:8000
"""
import os
from datetime import datetime
from enum import StrEnum
from functools import partial
from pathlib import Path
from typing import cast
import argparse
import gradio as gr
from anthropic import APIResponse
from anthropic.types import TextBlock
from anthropic.types.beta import BetaMessage, BetaTextBlock, BetaToolUseBlock
from anthropic.types.tool_use_block import ToolUseBlock
from loop import (
APIProvider,
sampling_loop_sync,
)
from tools import ToolResult
import requests
from requests.exceptions import RequestException
import base64
CONFIG_DIR = Path("~/.anthropic").expanduser()
API_KEY_FILE = CONFIG_DIR / "api_key"
INTRO_TEXT = '''
OmniParser lets you turn any vision-langauge model into an AI agent. We currently support **OpenAI (4o/o1/o3-mini), DeepSeek (R1), Qwen (2.5VL) or Anthropic Computer Use (Sonnet).**
Type a message and press submit to start OmniTool. Press stop to pause, and press the trash icon in the chat to clear the message history.
'''
def parse_arguments():
parser = argparse.ArgumentParser(description="Gradio App")
parser.add_argument("--windows_host_url", type=str, default='localhost:8006')
parser.add_argument("--omniparser_server_url", type=str, default="localhost:8000")
return parser.parse_args()
args = parse_arguments()
class Sender(StrEnum):
USER = "user"
BOT = "assistant"
TOOL = "tool"
def setup_state(state):
if "messages" not in state:
state["messages"] = []
if "model" not in state:
state["model"] = "omniparser + gpt-4o"
if "provider" not in state:
state["provider"] = "openai"
if "openai_api_key" not in state: # Fetch API keys from environment variables
state["openai_api_key"] = os.getenv("OPENAI_API_KEY", "")
if "anthropic_api_key" not in state:
state["anthropic_api_key"] = os.getenv("ANTHROPIC_API_KEY", "")
if "api_key" not in state:
state["api_key"] = ""
if "auth_validated" not in state:
state["auth_validated"] = False
if "responses" not in state:
state["responses"] = {}
if "tools" not in state:
state["tools"] = {}
if "only_n_most_recent_images" not in state:
state["only_n_most_recent_images"] = 2
if 'chatbot_messages' not in state:
state['chatbot_messages'] = []
if 'stop' not in state:
state['stop'] = False
async def main(state):
"""Render loop for Gradio"""
setup_state(state)
return "Setup completed"
def validate_auth(provider: APIProvider, api_key: str | None):
if provider == APIProvider.ANTHROPIC:
if not api_key:
return "Enter your Anthropic API key to continue."
if provider == APIProvider.BEDROCK:
import boto3
if not boto3.Session().get_credentials():
return "You must have AWS credentials set up to use the Bedrock API."
if provider == APIProvider.VERTEX:
import google.auth
from google.auth.exceptions import DefaultCredentialsError
if not os.environ.get("CLOUD_ML_REGION"):
return "Set the CLOUD_ML_REGION environment variable to use the Vertex API."
try:
google.auth.default(scopes=["https://www.googleapis.com/auth/cloud-platform"])
except DefaultCredentialsError:
return "Your google cloud credentials are not set up correctly."
def load_from_storage(filename: str) -> str | None:
"""Load data from a file in the storage directory."""
try:
file_path = CONFIG_DIR / filename
if file_path.exists():
data = file_path.read_text().strip()
if data:
return data
except Exception as e:
print(f"Debug: Error loading {filename}: {e}")
return None
def save_to_storage(filename: str, data: str) -> None:
"""Save data to a file in the storage directory."""
try:
CONFIG_DIR.mkdir(parents=True, exist_ok=True)
file_path = CONFIG_DIR / filename
file_path.write_text(data)
# Ensure only user can read/write the file
file_path.chmod(0o600)
except Exception as e:
print(f"Debug: Error saving {filename}: {e}")
def _api_response_callback(response: APIResponse[BetaMessage], response_state: dict):
response_id = datetime.now().isoformat()
response_state[response_id] = response
def _tool_output_callback(tool_output: ToolResult, tool_id: str, tool_state: dict):
tool_state[tool_id] = tool_output
def chatbot_output_callback(message, chatbot_state, hide_images=False, sender="bot"):
def _render_message(message: str | BetaTextBlock | BetaToolUseBlock | ToolResult, hide_images=False):
print(f"_render_message: {str(message)[:100]}")
if isinstance(message, str):
return message
is_tool_result = not isinstance(message, str) and (
isinstance(message, ToolResult)
or message.__class__.__name__ == "ToolResult"
)
if not message or (
is_tool_result
and hide_images
and not hasattr(message, "error")
and not hasattr(message, "output")
): # return None if hide_images is True
return
# render tool result
if is_tool_result:
message = cast(ToolResult, message)
if message.output:
return message.output
if message.error:
return f"Error: {message.error}"
if message.base64_image and not hide_images:
# somehow can't display via gr.Image
# image_data = base64.b64decode(message.base64_image)
# return gr.Image(value=Image.open(io.BytesIO(image_data)))
return f'<img src="data:image/png;base64,{message.base64_image}">'
elif isinstance(message, BetaTextBlock) or isinstance(message, TextBlock):
return f"Analysis: {message.text}"
elif isinstance(message, BetaToolUseBlock) or isinstance(message, ToolUseBlock):
# return f"Tool Use: {message.name}\nInput: {message.input}"
return f"Next I will perform the following action: {message.input}"
else:
return message
def _truncate_string(s, max_length=500):
"""Truncate long strings for concise printing."""
if isinstance(s, str) and len(s) > max_length:
return s[:max_length] + "..."
return s
# processing Anthropic messages
message = _render_message(message, hide_images)
if sender == "bot":
chatbot_state.append((None, message))
else:
chatbot_state.append((message, None))
# Create a concise version of the chatbot state for printing
concise_state = [(_truncate_string(user_msg), _truncate_string(bot_msg))
for user_msg, bot_msg in chatbot_state]
# print(f"chatbot_output_callback chatbot_state: {concise_state} (truncated)")
def valid_params(user_input, state):
"""Validate all requirements and return a list of error messages."""
errors = []
for server_name, url in [('Windows Host', 'localhost:5000'), ('OmniParser Server', args.omniparser_server_url)]:
try:
url = f'http://{url}/probe'
response = requests.get(url, timeout=3)
if response.status_code != 200:
errors.append(f"{server_name} is not responding")
except RequestException as e:
errors.append(f"{server_name} is not responding")
if not state["api_key"].strip():
errors.append("LLM API Key is not set")
if not user_input:
errors.append("no computer use request provided")
return errors
def process_input(user_input, state):
# Reset the stop flag
if state["stop"]:
state["stop"] = False
errors = valid_params(user_input, state)
if errors:
raise gr.Error("Validation errors: " + ", ".join(errors))
# Append the user message to state["messages"]
state["messages"].append(
{
"role": Sender.USER,
"content": [TextBlock(type="text", text=user_input)],
}
)
# Append the user's message to chatbot_messages with None for the assistant's reply
state['chatbot_messages'].append((user_input, None))
yield state['chatbot_messages'] # Yield to update the chatbot UI with the user's message
print("state")
print(state)
# Run sampling_loop_sync with the chatbot_output_callback
for loop_msg in sampling_loop_sync(
model=state["model"],
provider=state["provider"],
messages=state["messages"],
output_callback=partial(chatbot_output_callback, chatbot_state=state['chatbot_messages'], hide_images=False),
tool_output_callback=partial(_tool_output_callback, tool_state=state["tools"]),
api_response_callback=partial(_api_response_callback, response_state=state["responses"]),
api_key=state["api_key"],
only_n_most_recent_images=state["only_n_most_recent_images"],
max_tokens=16384,
omniparser_url=args.omniparser_server_url
):
if loop_msg is None or state.get("stop"):
yield state['chatbot_messages']
print("End of task. Close the loop.")
break
yield state['chatbot_messages'] # Yield the updated chatbot_messages to update the chatbot UI
def stop_app(state):
state["stop"] = True
return "App stopped"
def get_header_image_base64():
try:
# Get the absolute path to the image relative to this script
script_dir = Path(__file__).parent
image_path = script_dir.parent.parent / "imgs" / "header_bar_thin.png"
with open(image_path, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read()).decode()
return f'data:image/png;base64,{encoded_string}'
except Exception as e:
print(f"Failed to load header image: {e}")
return None
with gr.Blocks(theme=gr.themes.Default()) as demo:
gr.HTML("""
<style>
.no-padding {
padding: 0 !important;
}
.no-padding > div {
padding: 0 !important;
}
.markdown-text p {
font-size: 18px; /* Adjust the font size as needed */
}
</style>
""")
state = gr.State({})
setup_state(state.value)
header_image = get_header_image_base64()
if header_image:
gr.HTML(f'<img src="{header_image}" alt="OmniTool Header" width="100%">', elem_classes="no-padding")
gr.HTML('<h1 style="text-align: center; font-weight: normal;">Omni<span style="font-weight: bold;">Tool</span></h1>')
else:
gr.Markdown("# OmniTool")
if not os.getenv("HIDE_WARNING", False):
gr.Markdown(INTRO_TEXT, elem_classes="markdown-text")
with gr.Accordion("Settings", open=True):
with gr.Row():
with gr.Column():
model = gr.Dropdown(
label="Model",
choices=["omniparser + gpt-4o", "omniparser + o1", "omniparser + o3-mini", "omniparser + R1", "omniparser + qwen2.5vl", "claude-3-5-sonnet-20241022", "omniparser + gpt-4o-orchestrated", "omniparser + o1-orchestrated", "omniparser + o3-mini-orchestrated", "omniparser + R1-orchestrated", "omniparser + qwen2.5vl-orchestrated"],
value="omniparser + gpt-4o",
interactive=True,
)
with gr.Column():
only_n_images = gr.Slider(
label="N most recent screenshots",
minimum=0,
maximum=10,
step=1,
value=2,
interactive=True
)
with gr.Row():
with gr.Column(1):
provider = gr.Dropdown(
label="API Provider",
choices=[option.value for option in APIProvider],
value="openai",
interactive=False,
)
with gr.Column(2):
api_key = gr.Textbox(
label="API Key",
type="password",
value=state.value.get("api_key", ""),
placeholder="Paste your API key here",
interactive=True,
)
with gr.Row():
with gr.Column(scale=8):
chat_input = gr.Textbox(show_label=False, placeholder="Type a message to send to Omniparser + X ...", container=False)
with gr.Column(scale=1, min_width=50):
submit_button = gr.Button(value="Send", variant="primary")
with gr.Column(scale=1, min_width=50):
stop_button = gr.Button(value="Stop", variant="secondary")
with gr.Row():
with gr.Column(scale=2):
chatbot = gr.Chatbot(label="Chatbot History", autoscroll=True, height=580)
with gr.Column(scale=3):
iframe = gr.HTML(
f'<iframe src="http://{args.windows_host_url}/vnc.html?view_only=1&autoconnect=1&resize=scale" width="100%" height="580" allow="fullscreen"></iframe>',
container=False,
elem_classes="no-padding"
)
def update_model(model_selection, state):
state["model"] = model_selection
print(f"Model updated to: {state['model']}")
if model_selection == "claude-3-5-sonnet-20241022":
provider_choices = [option.value for option in APIProvider if option.value != "openai"]
elif model_selection in set(["omniparser + gpt-4o", "omniparser + o1", "omniparser + o3-mini", "omniparser + gpt-4o-orchestrated", "omniparser + o1-orchestrated", "omniparser + o3-mini-orchestrated"]):
provider_choices = ["openai"]
elif model_selection == "omniparser + R1":
provider_choices = ["groq"]
elif model_selection == "omniparser + qwen2.5vl":
provider_choices = ["dashscope"]
else:
provider_choices = [option.value for option in APIProvider]
default_provider_value = provider_choices[0]
provider_interactive = len(provider_choices) > 1
api_key_placeholder = f"{default_provider_value.title()} API Key"
# Update state
state["provider"] = default_provider_value
state["api_key"] = state.get(f"{default_provider_value}_api_key", "")
# Calls to update other components UI
provider_update = gr.update(
choices=provider_choices,
value=default_provider_value,
interactive=provider_interactive
)
api_key_update = gr.update(
placeholder=api_key_placeholder,
value=state["api_key"]
)
return provider_update, api_key_update
def update_only_n_images(only_n_images_value, state):
state["only_n_most_recent_images"] = only_n_images_value
def update_provider(provider_value, state):
# Update state
state["provider"] = provider_value
state["api_key"] = state.get(f"{provider_value}_api_key", "")
# Calls to update other components UI
api_key_update = gr.update(
placeholder=f"{provider_value.title()} API Key",
value=state["api_key"]
)
return api_key_update
def update_api_key(api_key_value, state):
state["api_key"] = api_key_value
state[f'{state["provider"]}_api_key'] = api_key_value
def clear_chat(state):
# Reset message-related state
state["messages"] = []
state["responses"] = {}
state["tools"] = {}
state['chatbot_messages'] = []
return state['chatbot_messages']
model.change(fn=update_model, inputs=[model, state], outputs=[provider, api_key])
only_n_images.change(fn=update_only_n_images, inputs=[only_n_images, state], outputs=None)
provider.change(fn=update_provider, inputs=[provider, state], outputs=api_key)
api_key.change(fn=update_api_key, inputs=[api_key, state], outputs=None)
chatbot.clear(fn=clear_chat, inputs=[state], outputs=[chatbot])
submit_button.click(process_input, [chat_input, state], chatbot)
stop_button.click(stop_app, [state], None)
if __name__ == "__main__":
demo.launch(server_name="127.0.0.1", server_port=7888)

760
omnitool/gradio/app_new.py Normal file
View File

@@ -0,0 +1,760 @@
"""
The app contains:
- a new UI for the OmniParser AI Agent.
-
python app_new.py --windows_host_url localhost:8006 --omniparser_server_url localhost:8000
"""
import os
import io
import shutil
import mimetypes
from datetime import datetime
from enum import StrEnum
from functools import partial
from pathlib import Path
from typing import cast, List, Optional
import argparse
import gradio as gr
from anthropic import APIResponse
from anthropic.types import TextBlock
from anthropic.types.beta import BetaMessage, BetaTextBlock, BetaToolUseBlock
from anthropic.types.tool_use_block import ToolUseBlock
from loop import (
APIProvider,
sampling_loop_sync,
)
from tools import ToolResult
import requests
from requests.exceptions import RequestException
import base64
CONFIG_DIR = Path("~/.anthropic").expanduser()
API_KEY_FILE = CONFIG_DIR / "api_key"
INTRO_TEXT = '''
<div style="text-align: center; margin-bottom: 10px;">
<h2>OmniParser AI Agent</h2>
<p>Turn any vision-language model into an AI agent. We currently support <b>OpenAI (4o/o1/o3-mini), DeepSeek (R1), Qwen (2.5VL) or Anthropic Computer Use (Sonnet)</b>.</p>
<p>Type a message and press send to start OmniTool. Press stop to pause, and press the trash icon in the chat to clear the message history.</p>
<p>You can also upload files for analysis using the file upload section.</p>
</div>
'''
def parse_arguments():
parser = argparse.ArgumentParser(description="Gradio App")
parser.add_argument("--windows_host_url", type=str, default='localhost:8006')
parser.add_argument("--omniparser_server_url", type=str, default="localhost:8000")
parser.add_argument("--run_folder", type=str, default="./tmp/outputs")
return parser.parse_args()
args = parse_arguments()
# Update upload folder from args if provided
RUN_FOLDER = Path(os.path.join(args.run_folder, datetime.now().strftime('%Y%m%d_%H%M')))
RUN_FOLDER.mkdir(parents=True, exist_ok=True)
class Sender(StrEnum):
USER = "user"
BOT = "assistant"
TOOL = "tool"
def load_existing_files():
"""Load all existing files from the uploads folder"""
files = []
if RUN_FOLDER.exists():
for file_path in RUN_FOLDER.iterdir():
if file_path.is_file():
files.append(str(file_path))
return files
def setup_state(state):
if "messages" not in state:
state["messages"] = []
if "model" not in state:
state["model"] = "omniparser + gpt-4o-orchestrated"
if "provider" not in state:
state["provider"] = "openai"
if "openai_api_key" not in state: # Fetch API keys from environment variables
state["openai_api_key"] = os.getenv("OPENAI_API_KEY", "")
if "anthropic_api_key" not in state:
state["anthropic_api_key"] = os.getenv("ANTHROPIC_API_KEY", "")
if "api_key" not in state:
state["api_key"] = ""
if "auth_validated" not in state:
state["auth_validated"] = False
if "responses" not in state:
state["responses"] = {}
if "tools" not in state:
state["tools"] = {}
if "only_n_most_recent_images" not in state:
state["only_n_most_recent_images"] = 2
if 'chatbot_messages' not in state:
state['chatbot_messages'] = []
if 'stop' not in state:
state['stop'] = False
if 'uploaded_files' not in state:
state['uploaded_files'] = [] # Start with an empty list instead of loading existing files
async def main(state):
"""Render loop for Gradio"""
setup_state(state)
return "Setup completed"
def validate_auth(provider: APIProvider, api_key: str | None):
if provider == APIProvider.ANTHROPIC:
if not api_key:
return "Enter your Anthropic API key to continue."
if provider == APIProvider.BEDROCK:
import boto3
if not boto3.Session().get_credentials():
return "You must have AWS credentials set up to use the Bedrock API."
if provider == APIProvider.VERTEX:
import google.auth
from google.auth.exceptions import DefaultCredentialsError
if not os.environ.get("CLOUD_ML_REGION"):
return "Set the CLOUD_ML_REGION environment variable to use the Vertex API."
try:
google.auth.default(scopes=["https://www.googleapis.com/auth/cloud-platform"])
except DefaultCredentialsError:
return "Your google cloud credentials are not set up correctly."
def load_from_storage(filename: str) -> str | None:
"""Load data from a file in the storage directory."""
try:
file_path = CONFIG_DIR / filename
if file_path.exists():
data = file_path.read_text().strip()
if data:
return data
except Exception as e:
print(f"Debug: Error loading {filename}: {e}")
return None
def save_to_storage(filename: str, data: str) -> None:
"""Save data to a file in the storage directory."""
try:
CONFIG_DIR.mkdir(parents=True, exist_ok=True)
file_path = CONFIG_DIR / filename
file_path.write_text(data)
# Ensure only user can read/write the file
file_path.chmod(0o600)
except Exception as e:
print(f"Debug: Error saving {filename}: {e}")
def _api_response_callback(response: APIResponse[BetaMessage], response_state: dict):
response_id = datetime.now().isoformat()
response_state[response_id] = response
def _tool_output_callback(tool_output: ToolResult, tool_id: str, tool_state: dict):
tool_state[tool_id] = tool_output
def chatbot_output_callback(message, chatbot_state, hide_images=False, sender="bot"):
def _render_message(message: str | BetaTextBlock | BetaToolUseBlock | ToolResult, hide_images=False):
print(f"_render_message: {str(message)[:100]}")
if isinstance(message, str):
return message
is_tool_result = not isinstance(message, str) and (
isinstance(message, ToolResult)
or message.__class__.__name__ == "ToolResult"
)
if not message or (
is_tool_result
and hide_images
and not hasattr(message, "error")
and not hasattr(message, "output")
): # return None if hide_images is True
return
# render tool result
if is_tool_result:
message = cast(ToolResult, message)
if message.output:
return message.output
if message.error:
return f"Error: {message.error}"
if message.base64_image and not hide_images:
# somehow can't display via gr.Image
# image_data = base64.b64decode(message.base64_image)
# return gr.Image(value=Image.open(io.BytesIO(image_data)))
return f'<img src="data:image/png;base64,{message.base64_image}">'
elif isinstance(message, BetaTextBlock) or isinstance(message, TextBlock):
# Format reasoning text in a collapsible dropdown
return f"Next step Reasoning: {message.text}"
# reasoning_text = message.text
# return f'''
# <details>
# <summary><Current Step Reasoning (click to expand):</summary>
# <div style="padding: 10px; background-color: #f8f9fa; border-radius: 5px; margin-top: 5px;">
# <pre>{reasoning_text}</pre>
# </div>
# </details>
# '''
elif isinstance(message, BetaToolUseBlock) or isinstance(message, ToolUseBlock):
# return f"Next I will perform the following action: {message.input}"
return None
else:
return message
def _truncate_string(s, max_length=500):
"""Truncate long strings for concise printing."""
if isinstance(s, str) and len(s) > max_length:
return s[:max_length] + "..."
return s
# processing Anthropic messages
message = _render_message(message, hide_images)
if sender == "bot":
chatbot_state.append((None, message))
else:
chatbot_state.append((message, None))
# Create a concise version of the chatbot state for printing
concise_state = [(_truncate_string(user_msg), _truncate_string(bot_msg))
for user_msg, bot_msg in chatbot_state]
# print(f"chatbot_output_callback chatbot_state: {concise_state} (truncated)")
def valid_params(user_input, state):
"""Validate all requirements and return a list of error messages."""
errors = []
for server_name, url in [('Windows Host', 'localhost:5000'), ('OmniParser Server', args.omniparser_server_url)]:
try:
url = f'http://{url}/probe'
response = requests.get(url, timeout=3)
if response.status_code != 200:
errors.append(f"{server_name} is not responding")
except RequestException as e:
errors.append(f"{server_name} is not responding")
if not state["api_key"].strip():
errors.append("LLM API Key is not set")
if not user_input:
errors.append("no computer use request provided")
return errors
def process_input(user_input, state):
# Reset the stop flag
if state["stop"]:
state["stop"] = False
errors = valid_params(user_input, state)
if errors:
raise gr.Error("Validation errors: " + ", ".join(errors))
# Append the user message to state["messages"]
state["messages"].append(
{
"role": Sender.USER,
"content": [TextBlock(type="text", text=user_input)],
}
)
# Append the user's message to chatbot_messages with None for the assistant's reply
state['chatbot_messages'].append((user_input, None))
yield state['chatbot_messages'], gr.update() # Yield to update the chatbot UI with the user's message
print("state")
print(state)
# Run sampling_loop_sync with the chatbot_output_callback
for loop_msg in sampling_loop_sync(
model=state["model"],
provider=state["provider"],
messages=state["messages"],
output_callback=partial(chatbot_output_callback, chatbot_state=state['chatbot_messages'], hide_images=False),
tool_output_callback=partial(_tool_output_callback, tool_state=state["tools"]),
api_response_callback=partial(_api_response_callback, response_state=state["responses"]),
api_key=state["api_key"],
only_n_most_recent_images=state["only_n_most_recent_images"],
max_tokens=16384,
omniparser_url=args.omniparser_server_url,
save_folder=str(RUN_FOLDER)
):
if loop_msg is None or state.get("stop"):
# Detect and add new files to the state
file_choices_update = detect_new_files(state)
yield state['chatbot_messages'], file_choices_update
print("End of task. Close the loop.")
break
yield state['chatbot_messages'], gr.update() # Yield the updated chatbot_messages to update the chatbot UI
# Final detection of new files
file_choices_update = detect_new_files(state)
yield state['chatbot_messages'], file_choices_update
def stop_app(state):
state["stop"] = True
return "App stopped"
def get_header_image_base64():
try:
# Get the absolute path to the image relative to this script
script_dir = Path(__file__).parent
image_path = script_dir.parent.parent / "imgs" / "header_bar_thin.png"
with open(image_path, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read()).decode()
return f'data:image/png;base64,{encoded_string}'
except Exception as e:
print(f"Failed to load header image: {e}")
return None
def get_file_viewer_html(file_path=None):
"""Generate HTML to view a file based on its type"""
if not file_path:
# Return the VNC viewer iframe
return f'<iframe src="http://{args.windows_host_url}/vnc.html?view_only=1&autoconnect=1&resize=scale" width="100%" height="580" allow="fullscreen"></iframe>'
file_path = Path(file_path)
if not file_path.exists():
return f'<div class="error-message">File not found: {file_path.name}</div>'
# Determine the file type
mime_type, _ = mimetypes.guess_type(file_path)
file_type = mime_type.split('/')[0] if mime_type else 'unknown'
file_extension = file_path.suffix.lower()
# Handle different file types
if file_type == 'image':
# For images, display them directly
with open(file_path, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read()).decode()
return f'<div class="file-viewer"><h3>{file_path.name}</h3><img src="data:{mime_type};base64,{encoded_string}" style="max-width:100%; max-height:500px;"></div>'
elif file_extension in ['.txt', '.py', '.js', '.html', '.css', '.json', '.md', '.csv'] or file_type == 'text':
# For text files, display the content with syntax highlighting for code
try:
content = file_path.read_text(errors='replace') # Use 'replace' to handle encoding issues
# Escape HTML characters
content = content.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;')
# Add syntax highlighting class based on file extension
highlight_class = ""
if file_extension == '.py':
highlight_class = "language-python"
elif file_extension == '.js':
highlight_class = "language-javascript"
elif file_extension == '.html':
highlight_class = "language-html"
elif file_extension == '.css':
highlight_class = "language-css"
elif file_extension == '.json':
highlight_class = "language-json"
return f'''
<div class="file-viewer">
<h3>{file_path.name}</h3>
<pre class="{highlight_class}" style="background-color: #f5f5f5; padding: 10px; border-radius: 5px; overflow: auto; max-height: 500px; white-space: pre-wrap;"><code>{content}</code></pre>
<script>
// Add basic syntax highlighting with CSS
if (document.querySelector('.language-python')) {{
const keywords = ['def', 'class', 'import', 'from', 'return', 'if', 'else', 'elif', 'for', 'while', 'try', 'except', 'with', 'as', 'in', 'not', 'and', 'or', 'True', 'False', 'None'];
const code = document.querySelector('.language-python code');
let html = code.innerHTML;
keywords.forEach(keyword => {{
const regex = new RegExp('\\\\b' + keyword + '\\\\b', 'g');
html = html.replace(regex, `<span style="color: #0000FF; font-weight: bold;">$&</span>`);
}});
// Highlight strings
html = html.replace(/(["'])(?:(?=(\\\\?))\2.)*?\1/g, '<span style="color: #008000;">$&</span>');
// Highlight comments
html = html.replace(/(#.*)$/gm, '<span style="color: #808080;">$1</span>');
code.innerHTML = html;
}}
</script>
</div>
'''
except UnicodeDecodeError:
return f'<div class="error-message">Cannot display binary file: {file_path.name}</div>'
elif file_type == 'video':
# For videos, use video tag
with open(file_path, "rb") as video_file:
encoded_string = base64.b64encode(video_file.read()).decode()
return f'''
<div class="file-viewer">
<h3>{file_path.name}</h3>
<video controls style="max-width:100%; max-height:500px;">
<source src="data:{mime_type};base64,{encoded_string}" type="{mime_type}">
Your browser does not support the video tag.
</video>
</div>
'''
elif file_type == 'audio':
# For audio, use audio tag
with open(file_path, "rb") as audio_file:
encoded_string = base64.b64encode(audio_file.read()).decode()
return f'''
<div class="file-viewer">
<h3>{file_path.name}</h3>
<audio controls>
<source src="data:{mime_type};base64,{encoded_string}" type="{mime_type}">
Your browser does not support the audio tag.
</audio>
</div>
'''
elif file_extension == '.pdf':
# For PDFs, embed them using an iframe with base64 data
try:
with open(file_path, "rb") as pdf_file:
encoded_string = base64.b64encode(pdf_file.read()).decode()
return f'''
<div class="file-viewer">
<h3>{file_path.name}</h3>
<iframe src="data:application/pdf;base64,{encoded_string}" width="100%" height="500px" style="border: none;"></iframe>
</div>
'''
except Exception as e:
return f'<div class="error-message">Error displaying PDF: {str(e)}</div>'
else:
# For other file types, show info but can't display
size_kb = file_path.stat().st_size / 1024
return f'<div class="file-viewer"><h3>{file_path.name}</h3><p>File type: {mime_type or "Unknown"}</p><p>Size: {size_kb:.2f} KB</p><p>This file type cannot be displayed in the browser.</p></div>'
def handle_file_upload(files, state):
"""Handle file uploads and store them in the upload directory"""
if not files:
return gr.update(choices=[])
file_choices = []
for file in files:
# Get the file name and create a path in the upload directory
file_name = Path(file.name).name
file_path = RUN_FOLDER / file_name
# Save the file
shutil.copy(file.name, file_path)
# Add to the list of uploaded files
file_path_str = str(file_path)
file_choices.append((file_name, file_path_str))
# Add to state
if file_path_str not in state['uploaded_files']:
state['uploaded_files'].append(file_path_str)
# Update the view file dropdown with all uploaded files
all_file_choices = [(Path(path).name, path) for path in state['uploaded_files']]
return gr.update(choices=all_file_choices)
def toggle_view(view_mode, file_path=None, state=None):
"""Toggle between OmniTool Computer view and file viewer"""
# If switching to File Viewer mode, detect and add new files to the state
file_choices_update = gr.update()
if view_mode == "File Viewer" and state is not None:
file_choices_update = detect_new_files(state)
# Return the appropriate view
if view_mode == "OmniTool Computer":
return get_file_viewer_html(), file_choices_update # This returns the VNC iframe
else: # File Viewer mode
if file_path:
return get_file_viewer_html(file_path), file_choices_update
else:
return get_file_viewer_html(), file_choices_update # Default to VNC if no file selected
def detect_new_files(state):
"""Detect new files in the uploads folder and add them to the state"""
new_files_count = 0
if RUN_FOLDER.exists():
current_files = set(state['uploaded_files'])
for file_path in RUN_FOLDER.iterdir():
if file_path.is_file():
file_path_str = str(file_path)
if file_path_str not in current_files:
# This is a new file not yet in the state
state['uploaded_files'].append(file_path_str)
new_files_count += 1
print(f"Added new file to state: {file_path_str}")
# Return updated file choices
file_choices = [(Path(path).name, path) for path in state['uploaded_files']]
print(f"Detected {new_files_count} new files. Total files in state: {len(state['uploaded_files'])}")
return gr.update(choices=file_choices)
def refresh_files(state):
"""Refresh the list of files from the current session and detect new files"""
return detect_new_files(state)
def auto_refresh_files(state):
"""Automatically refresh the list of files from the current session and detect new files"""
return detect_new_files(state)
with gr.Blocks(theme=gr.themes.Default()) as demo:
gr.HTML("""
<style>
.no-padding {
padding: 0 !important;
}
.no-padding > div {
padding: 0 !important;
}
.markdown-text p {
font-size: 18px; /* Adjust the font size as needed */
}
</style>
""")
state = gr.State({})
setup_state(state.value)
header_image = get_header_image_base64()
if header_image:
gr.HTML(f'<img src="{header_image}" alt="OmniTool Header" width="100%">', elem_classes="no-padding")
gr.HTML('<h1 style="text-align: center; font-weight: normal; margin-bottom: 20px;">Omni<span style="font-weight: bold;">Tool</span></h1>')
else:
gr.Markdown("# OmniTool", elem_classes="text-center")
if not os.getenv("HIDE_WARNING", False):
gr.HTML(INTRO_TEXT, elem_classes="markdown-text")
with gr.Accordion("Settings", open=True, elem_classes="accordion-header"):
with gr.Row():
with gr.Column():
model = gr.Dropdown(
label="Model",
choices=["omniparser + gpt-4o", "omniparser + o1", "omniparser + o3-mini", "omniparser + R1", "omniparser + qwen2.5vl", "claude-3-5-sonnet-20241022", "omniparser + gpt-4o-orchestrated", "omniparser + o1-orchestrated", "omniparser + o3-mini-orchestrated", "omniparser + R1-orchestrated", "omniparser + qwen2.5vl-orchestrated"],
value="omniparser + gpt-4o-orchestrated",
interactive=True,
container=True
)
with gr.Column():
only_n_images = gr.Slider(
label="N most recent screenshots",
minimum=0,
maximum=10,
step=1,
value=2,
interactive=True
)
with gr.Row():
with gr.Column(1):
provider = gr.Dropdown(
label="API Provider",
choices=[option.value for option in APIProvider],
value="openai",
interactive=False,
container=True
)
with gr.Column(2):
api_key = gr.Textbox(
label="API Key",
type="password",
value=state.value.get("api_key", ""),
placeholder="Paste your API key here",
interactive=True,
container=True
)
# File Upload Section
with gr.Accordion("File Upload & Management", open=True, elem_classes="accordion-header"):
with gr.Row():
with gr.Column():
file_upload = gr.File(
label="Upload Files",
file_count="multiple",
type="filepath",
elem_classes="file-upload-area"
)
with gr.Column():
with gr.Row():
upload_button = gr.Button("Upload Files", variant="primary", elem_classes="primary-button")
refresh_button = gr.Button("Refresh Files", variant="secondary", elem_classes="secondary-button")
with gr.Row():
# Initialize file choices as an empty list
view_file_dropdown = gr.Dropdown(
label="View File",
choices=[],
interactive=True,
container=True
)
view_toggle = gr.Radio(
label="Display Mode",
choices=["OmniTool Computer", "File Viewer"],
value="OmniTool Computer",
interactive=True
)
with gr.Row():
with gr.Column(scale=8):
chat_input = gr.Textbox(
show_label=False,
placeholder="Type a message to send to Omniparser + X ...",
container=False
)
with gr.Column(scale=1, min_width=50):
submit_button = gr.Button(value="Send", variant="primary", elem_classes="primary-button")
with gr.Column(scale=1, min_width=50):
stop_button = gr.Button(value="Stop", variant="secondary", elem_classes="secondary-button")
with gr.Row():
with gr.Column(scale=2):
chatbot = gr.Chatbot(
label="Chatbot History",
autoscroll=True,
height=580,
avatar_images=("👤", "🤖")
)
with gr.Column(scale=3):
display_area = gr.HTML(
get_file_viewer_html(),
elem_classes="no-padding"
)
def update_model(model_selection, state):
state["model"] = model_selection
print(f"Model updated to: {state['model']}")
if model_selection == "claude-3-5-sonnet-20241022":
provider_choices = [option.value for option in APIProvider if option.value != "openai"]
elif model_selection in set(["omniparser + gpt-4o", "omniparser + o1", "omniparser + o3-mini", "omniparser + gpt-4o-orchestrated", "omniparser + o1-orchestrated", "omniparser + o3-mini-orchestrated"]):
provider_choices = ["openai"]
elif model_selection == "omniparser + R1":
provider_choices = ["groq"]
elif model_selection == "omniparser + qwen2.5vl":
provider_choices = ["dashscope"]
else:
provider_choices = [option.value for option in APIProvider]
default_provider_value = provider_choices[0]
provider_interactive = len(provider_choices) > 1
api_key_placeholder = f"{default_provider_value.title()} API Key"
# Update state
state["provider"] = default_provider_value
state["api_key"] = state.get(f"{default_provider_value}_api_key", "")
# Calls to update other components UI
provider_update = gr.update(
choices=provider_choices,
value=default_provider_value,
interactive=provider_interactive
)
api_key_update = gr.update(
placeholder=api_key_placeholder,
value=state["api_key"]
)
return provider_update, api_key_update
def update_only_n_images(only_n_images_value, state):
state["only_n_most_recent_images"] = only_n_images_value
def update_provider(provider_value, state):
# Update state
state["provider"] = provider_value
state["api_key"] = state.get(f"{provider_value}_api_key", "")
# Calls to update other components UI
api_key_update = gr.update(
placeholder=f"{provider_value.title()} API Key",
value=state["api_key"]
)
return api_key_update
def update_api_key(api_key_value, state):
state["api_key"] = api_key_value
state[f'{state["provider"]}_api_key'] = api_key_value
def clear_chat(state):
# Reset message-related state
state["messages"] = []
state["responses"] = {}
state["tools"] = {}
state['chatbot_messages'] = []
return state['chatbot_messages']
def view_file(file_path, view_mode):
"""Generate HTML to view the selected file if in File Viewer mode"""
if view_mode == "File Viewer" and file_path:
return get_file_viewer_html(file_path)
elif view_mode == "OmniTool Computer":
return get_file_viewer_html() # Return VNC viewer
else:
return display_area.value # Keep current display
def update_view_file_dropdown(uploaded_files):
"""Update the view file dropdown when uploaded files change"""
if not uploaded_files:
return gr.update(choices=[])
file_choices = [(Path(path).name, path) for path in uploaded_files]
return gr.update(choices=file_choices)
def reset_view():
"""Reset the view to the VNC viewer"""
return get_file_viewer_html()
model.change(fn=update_model, inputs=[model, state], outputs=[provider, api_key])
only_n_images.change(fn=update_only_n_images, inputs=[only_n_images, state], outputs=None)
provider.change(fn=update_provider, inputs=[provider, state], outputs=api_key)
api_key.change(fn=update_api_key, inputs=[api_key, state], outputs=None)
chatbot.clear(fn=clear_chat, inputs=[state], outputs=[chatbot])
# File upload event handlers
upload_button.click(
fn=handle_file_upload,
inputs=[file_upload, state],
outputs=[view_file_dropdown]
)
# File viewing handlers
view_file_dropdown.change(
fn=view_file,
inputs=[view_file_dropdown, view_toggle],
outputs=[display_area]
)
submit_button.click(process_input, [chat_input, state], [chatbot, view_file_dropdown])
stop_button.click(stop_app, [state], None)
# Toggle view handler
view_toggle.change(
fn=toggle_view,
inputs=[view_toggle, view_file_dropdown, state],
outputs=[display_area, view_file_dropdown]
)
# Refresh files handler
refresh_button.click(fn=refresh_files, inputs=[state], outputs=[view_file_dropdown])
# Add JavaScript for auto-refresh instead of using demo.load()
js_refresh = """
function() {
// Auto-refresh files every 5 seconds
const refreshInterval = setInterval(function() {
// Find and click the refresh button
const refreshButtons = document.querySelectorAll('button');
for (const button of refreshButtons) {
if (button.textContent.includes('Refresh Files')) {
button.click();
break;
}
}
}, 5000);
// Return a cleanup function
return () => clearInterval(refreshInterval);
}
"""
# Add the JavaScript to the page
gr.HTML("<script>(" + js_refresh + ")();</script>")
if __name__ == "__main__":
demo.launch(server_name="127.0.0.1", server_port=7888)

View File

@@ -0,0 +1,470 @@
"""
Streamlit implementation of the OmniTool frontend.
Usage: streamlit run app_streamlit.py -- --windows_host_url localhost:8006 --omniparser_server_url localhost:8000
"""
import os
import io
import shutil
import mimetypes
import argparse
import base64
from datetime import datetime
from pathlib import Path
from typing import cast
from enum import StrEnum
import streamlit as st
from anthropic import APIResponse
from anthropic.types import TextBlock
from anthropic.types.beta import BetaMessage, BetaTextBlock, BetaToolUseBlock
from anthropic.types.tool_use_block import ToolUseBlock
import requests
from requests.exceptions import RequestException
from loop import (
APIProvider,
sampling_loop_sync,
)
from tools import ToolResult
# Constants and configurations
CONFIG_DIR = Path("~/.anthropic").expanduser()
API_KEY_FILE = CONFIG_DIR / "api_key"
UPLOAD_FOLDER = Path("./uploads").absolute()
UPLOAD_FOLDER.mkdir(parents=True, exist_ok=True)
class Sender(StrEnum):
USER = "user"
BOT = "assistant"
TOOL = "tool"
def parse_arguments():
parser = argparse.ArgumentParser(description="Streamlit App")
parser.add_argument("--windows_host_url", type=str, default='localhost:8006')
parser.add_argument("--omniparser_server_url", type=str, default="localhost:8000")
parser.add_argument("--upload_folder", type=str, default="./uploads")
return parser.parse_known_args()[0]
def initialize_session_state():
"""Initialize session state variables"""
if "messages" not in st.session_state:
st.session_state.messages = []
if "model" not in st.session_state:
st.session_state.model = "omniparser + gpt-4o-orchestrated"
if "provider" not in st.session_state:
st.session_state.provider = "openai"
if "api_key" not in st.session_state:
st.session_state.api_key = os.getenv("OPENAI_API_KEY", "")
if "anthropic_api_key" not in st.session_state:
st.session_state.anthropic_api_key = os.getenv("ANTHROPIC_API_KEY", "")
if "only_n_most_recent_images" not in st.session_state:
st.session_state.only_n_most_recent_images = 2
if "responses" not in st.session_state:
st.session_state.responses = {}
if "tools" not in st.session_state:
st.session_state.tools = {}
if "uploaded_files" not in st.session_state:
st.session_state.uploaded_files = []
if "selected_file" not in st.session_state:
st.session_state.selected_file = "None"
if "stop" not in st.session_state:
st.session_state.stop = False
def get_file_viewer_html(file_path=None, windows_host_url=None):
"""Generate HTML to view a file based on its type"""
if not file_path:
# Return the VNC viewer iframe
return f'<iframe src="http://{windows_host_url}/vnc.html?view_only=1&autoconnect=1&resize=scale" width="100%" height="580" allow="fullscreen"></iframe>'
file_path = Path(file_path)
if not file_path.exists():
return f'<div class="error-message">File not found: {file_path.name}</div>'
mime_type, _ = mimetypes.guess_type(file_path)
file_type = mime_type.split('/')[0] if mime_type else 'unknown'
file_extension = file_path.suffix.lower()
if file_type == 'image':
with open(file_path, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read()).decode()
return f'<div class="file-viewer"><h3>{file_path.name}</h3><img src="data:{mime_type};base64,{encoded_string}" style="max-width:100%; max-height:500px;"></div>'
elif file_extension in ['.txt', '.py', '.js', '.html', '.css', '.json', '.md', '.csv'] or file_type == 'text':
try:
content = file_path.read_text(errors='replace')
content = content.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;')
return f'<div class="file-viewer"><h3>{file_path.name}</h3><pre style="background-color: #f5f5f5; padding: 10px; border-radius: 5px; overflow: auto; max-height: 500px; white-space: pre-wrap;"><code>{content}</code></pre></div>'
except UnicodeDecodeError:
return f'<div class="error-message">Cannot display binary file: {file_path.name}</div>'
else:
size_kb = file_path.stat().st_size / 1024
return f'<div class="file-viewer"><h3>{file_path.name}</h3><p>File type: {mime_type or "Unknown"}</p><p>Size: {size_kb:.2f} KB</p><p>This file type cannot be displayed in the browser.</p></div>'
def handle_file_upload(uploaded_files):
"""Handle file uploads and store them in the upload directory"""
if uploaded_files:
for file in uploaded_files:
file_path = UPLOAD_FOLDER / file.name
with open(file_path, "wb") as f:
f.write(file.getvalue())
if str(file_path) not in st.session_state.uploaded_files:
st.session_state.uploaded_files.append(str(file_path))
def _api_response_callback(response: APIResponse[BetaMessage]):
response_id = datetime.now().isoformat()
st.session_state.responses[response_id] = response
def _tool_output_callback(tool_output: ToolResult, tool_id: str):
st.session_state.tools[tool_id] = tool_output
def chatbot_output_callback(message, hide_images=False):
def _render_message(message: str | BetaTextBlock | BetaToolUseBlock | ToolResult, hide_images=False):
if isinstance(message, str):
return message
is_tool_result = not isinstance(message, str) and (
isinstance(message, ToolResult)
or message.__class__.__name__ == "ToolResult"
)
if is_tool_result:
message = cast(ToolResult, message)
if message.output:
return message.output
if message.error:
return f"Error: {message.error}"
if message.base64_image and not hide_images:
return f'<img src="data:image/png;base64,{message.base64_image}">'
elif isinstance(message, (BetaTextBlock, TextBlock)):
return f"Next step Reasoning: {message.text}"
elif isinstance(message, (BetaToolUseBlock, ToolUseBlock)):
return None
return message
rendered_message = _render_message(message, hide_images)
if rendered_message:
st.session_state.messages.append({"role": "assistant", "content": rendered_message})
def main():
args = parse_arguments()
initialize_session_state()
# Page configuration
st.set_page_config(
page_title="OmniTool",
page_icon="🤖",
layout="wide"
)
# Custom CSS
st.markdown("""
<style>
.stApp {
max-width: 100%;
padding: 1rem;
}
.chat-container {
height: calc(100vh - 200px);
overflow-y: auto;
position: relative;
}
.viewer-container {
height: calc(100vh - 200px);
overflow-y: auto;
}
.chat-input-container {
display: flex;
align-items: flex-end;
}
.icon-button {
border: none;
background: none;
cursor: pointer;
padding: 0.5rem;
border-radius: 50%;
transition: background-color 0.3s;
}
.icon-button:hover {
background-color: #f0f0f0;
}
.stButton button {
border-radius: 50%;
width: 40px;
height: 40px;
padding: 0;
display: flex;
align-items: center;
justify-content: center;
box-shadow: 0 1px 3px rgba(0,0,0,0.1);
}
/* Custom button styles */
.send-btn {
background-color: #000 !important;
color: white !important;
}
.stop-btn {
background-color: #f8f9fa !important;
color: #d9534f !important;
border: 1px solid #d9534f !important;
}
.upload-btn {
background-color: #f8f9fa !important;
color: #0275d8 !important;
border: 1px solid #0275d8 !important;
}
/* Hide the default button styling */
div[data-testid="stHorizontalBlock"] button[kind="secondary"] {
background-color: transparent;
border: none;
}
/* Share button positioning */
.share-button-container {
position: absolute;
top: 0;
right: 0;
z-index: 100;
}
/* Chat header with title and share button */
.chat-header {
display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 10px;
}
/* Input placeholder styling */
.stTextInput input::placeholder {
color: #6c757d;
font-style: italic;
}
</style>
""", unsafe_allow_html=True)
# Header
st.title("OmniTool")
# Sidebar with settings
with st.sidebar:
st.header("Settings")
# Model selection
model = st.selectbox(
"Model",
["omniparser + gpt-4o", "omniparser + o1", "omniparser + o3-mini",
"omniparser + R1", "omniparser + qwen2.5vl", "claude-3-5-sonnet-20241022",
"omniparser + gpt-4o-orchestrated", "omniparser + o1-orchestrated",
"omniparser + o3-mini-orchestrated", "omniparser + R1-orchestrated",
"omniparser + qwen2.5vl-orchestrated"],
index=6
)
st.session_state.model = model
# API settings
api_key = st.text_input("API Key", value=st.session_state.api_key, type="password")
st.session_state.api_key = api_key
# Image settings
n_images = st.slider("N most recent screenshots", 0, 10, 2)
st.session_state.only_n_most_recent_images = n_images
# File viewer selection
file_options = ["None"]
if st.session_state.uploaded_files:
file_options.extend([Path(f).name for f in st.session_state.uploaded_files])
selected_file = st.selectbox(
"View File",
options=file_options,
format_func=lambda x: x
)
st.session_state.selected_file = selected_file
view_mode = st.radio("Display Mode", ["OmniTool Computer", "File Viewer"])
# Main content area with two columns
col1, col2 = st.columns([2, 3])
# Chat interface (left column)
with col1:
# Chat header with title and share button
col_header_1, col_header_2 = st.columns([3, 1])
with col_header_1:
st.markdown("### Chat")
with col_header_2:
share_button = st.button("📤 Share", key="share_btn", help="Share conversation")
# Apply custom styling with HTML
st.markdown("""
<style>
button[data-testid="share_btn"] {
background-color: #f8f9fa !important;
color: #0275d8 !important;
border: 1px solid #0275d8 !important;
border-radius: 4px !important;
width: auto !important;
height: auto !important;
padding: 2px 8px !important;
font-size: 0.8rem !important;
}
</style>
""", unsafe_allow_html=True)
# Share functionality
if share_button:
# Create a shareable text of the conversation
conversation_text = ""
for message in st.session_state.messages:
if message["role"] == "user":
conversation_text += f"User: {message['content']}\n\n"
else:
conversation_text += f"Assistant: {message['content']}\n\n"
# Create a download link
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
st.download_button(
label="Download Conversation",
data=conversation_text,
file_name=f"omnitool_conversation_{timestamp}.txt",
mime="text/plain",
key="download_conversation"
)
# Display chat messages
chat_container = st.container(height=450)
with chat_container:
for message in st.session_state.messages:
if message["role"] == "user":
st.markdown(f"**You:** {message['content']}")
else:
st.markdown(f"**Assistant:** {message['content']}", unsafe_allow_html=True)
# Chat input and buttons
user_input = st.text_input(
"Type your message:",
key="user_input",
label_visibility="collapsed",
placeholder="Send message to OmniTool..."
)
# Button row with icons
col1_1, col1_2, col1_3, col1_4 = st.columns([6, 1, 1, 1])
with col1_2:
# Send button with icon - using arrow up icon
send_button = st.button("⬆️", help="Send message", key="send_btn")
# Apply custom styling with HTML
st.markdown("""
<style>
button[data-testid="send_btn"] {
background-color: black !important;
color: white !important;
}
</style>
""", unsafe_allow_html=True)
with col1_3:
# Stop button with icon
stop_button = st.button("🛑", help="Stop processing", key="stop_btn")
# Apply custom styling with HTML
st.markdown("""
<style>
button[data-testid="stop_btn"] {
background-color: #f8f9fa !important;
color: #d9534f !important;
border: 1px solid #d9534f !important;
}
</style>
""", unsafe_allow_html=True)
with col1_4:
# File upload button with icon
upload_button = st.button("📎", help="Upload files", key="upload_btn")
# Apply custom styling with HTML
st.markdown("""
<style>
button[data-testid="upload_btn"] {
background-color: #f8f9fa !important;
color: #0275d8 !important;
border: 1px solid #0275d8 !important;
}
</style>
""", unsafe_allow_html=True)
# File upload area (hidden by default, shown when upload button is clicked)
if upload_button:
uploaded_files = st.file_uploader("Upload Files", accept_multiple_files=True, label_visibility="collapsed")
if uploaded_files:
handle_file_upload(uploaded_files)
st.success(f"Uploaded {len(uploaded_files)} file(s)")
# Update file options
file_options = ["None"]
if st.session_state.uploaded_files:
file_options.extend([Path(f).name for f in st.session_state.uploaded_files])
st.rerun()
# Process send button click
if send_button and user_input:
# Add user message to state
st.session_state.messages.append({"role": "user", "content": user_input})
# Process the message through sampling_loop_sync
for loop_msg in sampling_loop_sync(
model=st.session_state.model,
provider=st.session_state.provider,
messages=[{"role": "user", "content": [TextBlock(type="text", text=msg["content"])]} for msg in st.session_state.messages],
output_callback=chatbot_output_callback,
tool_output_callback=_tool_output_callback,
api_response_callback=_api_response_callback,
api_key=st.session_state.api_key,
only_n_most_recent_images=st.session_state.only_n_most_recent_images,
max_tokens=16384,
omniparser_url=args.omniparser_server_url,
save_folder=str(UPLOAD_FOLDER)
):
if loop_msg is None or st.session_state.stop:
break
st.rerun()
# Process stop button click
if stop_button:
st.session_state.stop = True
st.info("Processing stopped")
# Viewer interface (right column)
with col2:
st.markdown("### Display")
if view_mode == "OmniTool Computer":
viewer_html = get_file_viewer_html(windows_host_url=args.windows_host_url)
st.components.v1.html(
viewer_html,
height=600,
scrolling=True
)
else: # File Viewer mode
if st.session_state.selected_file and st.session_state.selected_file != "None":
file_path = next((f for f in st.session_state.uploaded_files
if Path(f).name == st.session_state.selected_file), None)
if file_path:
viewer_html = get_file_viewer_html(file_path=file_path)
st.components.v1.html(
viewer_html,
height=600,
scrolling=True
)
else:
st.error(f"Could not find file: {st.session_state.selected_file}")
else:
st.info("Please select a file to view from the sidebar.")
# Debug information (temporary)
with st.expander("Debug Info"):
st.write("View Mode:", view_mode)
st.write("Selected File:", st.session_state.selected_file)
st.write("Available Files:", st.session_state.uploaded_files)
if view_mode == "File Viewer" and st.session_state.selected_file != "None":
st.write("File Path:", file_path if 'file_path' in locals() else "Not found")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,132 @@
import asyncio
from typing import Any, Dict, cast
from collections.abc import Callable
from anthropic.types.beta import (
BetaContentBlock,
BetaContentBlockParam,
BetaImageBlockParam,
BetaMessage,
BetaMessageParam,
BetaTextBlockParam,
BetaToolResultBlockParam,
)
from anthropic.types import TextBlock
from anthropic.types.beta import BetaMessage, BetaTextBlock, BetaToolUseBlock
from tools import ComputerTool, ToolCollection, ToolResult
class AnthropicExecutor:
def __init__(
self,
output_callback: Callable[[BetaContentBlockParam], None],
tool_output_callback: Callable[[Any, str], None],
):
self.tool_collection = ToolCollection(
ComputerTool()
)
self.output_callback = output_callback
self.tool_output_callback = tool_output_callback
def __call__(self, response: BetaMessage, messages: list[BetaMessageParam]):
new_message = {
"role": "assistant",
"content": cast(list[BetaContentBlockParam], response.content),
}
if new_message not in messages:
messages.append(new_message)
else:
print("new_message already in messages, there are duplicates.")
tool_result_content: list[BetaToolResultBlockParam] = []
for content_block in cast(list[BetaContentBlock], response.content):
self.output_callback(content_block, sender="bot")
# Execute the tool
if content_block.type == "tool_use":
# Run the asynchronous tool execution in a synchronous context
result = asyncio.run(self.tool_collection.run(
name=content_block.name,
tool_input=cast(dict[str, Any], content_block.input),
))
self.output_callback(result, sender="bot")
tool_result_content.append(
_make_api_tool_result(result, content_block.id)
)
# self.tool_output_callback(result, content_block.id)
# Craft messages based on the content_block
# Note: to display the messages in the gradio, you should organize the messages in the following way (user message, bot message)
display_messages = _message_display_callback(messages)
# display_messages = []
# Send the messages to the gradio
for user_msg, bot_msg in display_messages:
# yield [user_msg, bot_msg], tool_result_content
yield [None, None], tool_result_content
if not tool_result_content:
return messages
return tool_result_content
def _message_display_callback(messages):
display_messages = []
for msg in messages:
try:
if isinstance(msg["content"][0], TextBlock):
display_messages.append((msg["content"][0].text, None)) # User message
elif isinstance(msg["content"][0], BetaTextBlock):
display_messages.append((None, msg["content"][0].text)) # Bot message
elif isinstance(msg["content"][0], BetaToolUseBlock):
display_messages.append((None, f"Tool Use: {msg['content'][0].name}\nInput: {msg['content'][0].input}")) # Bot message
elif isinstance(msg["content"][0], Dict) and msg["content"][0]["content"][-1]["type"] == "image":
display_messages.append((None, f'<img src="data:image/png;base64,{msg["content"][0]["content"][-1]["source"]["data"]}">')) # Bot message
else:
print(msg["content"][0])
except Exception as e:
print("error", e)
pass
return display_messages
def _make_api_tool_result(
result: ToolResult, tool_use_id: str
) -> BetaToolResultBlockParam:
"""Convert an agent ToolResult to an API ToolResultBlockParam."""
tool_result_content: list[BetaTextBlockParam | BetaImageBlockParam] | str = []
is_error = False
if result.error:
is_error = True
tool_result_content = _maybe_prepend_system_tool_result(result, result.error)
else:
if result.output:
tool_result_content.append(
{
"type": "text",
"text": _maybe_prepend_system_tool_result(result, result.output),
}
)
if result.base64_image:
tool_result_content.append(
{
"type": "image",
"source": {
"type": "base64",
"media_type": "image/png",
"data": result.base64_image,
},
}
)
return {
"type": "tool_result",
"content": tool_result_content,
"tool_use_id": tool_use_id,
"is_error": is_error,
}
def _maybe_prepend_system_tool_result(result: ToolResult, result_text: str):
if result.system:
result_text = f"<system>{result.system}</system>\n{result_text}"
return result_text

127
omnitool/gradio/loop.py Normal file
View File

@@ -0,0 +1,127 @@
"""
Agentic sampling loop that calls the Anthropic API and local implenmentation of anthropic-defined computer use tools.
"""
from collections.abc import Callable
from enum import StrEnum
from anthropic import APIResponse
from anthropic.types import (
TextBlock,
)
from anthropic.types.beta import (
BetaContentBlock,
BetaMessage,
BetaMessageParam
)
from tools import ToolResult
from agent.llm_utils.omniparserclient import OmniParserClient
from agent.anthropic_agent import AnthropicActor
from agent.vlm_agent import VLMAgent
from agent.vlm_agent_with_orchestrator import VLMOrchestratedAgent
from executor.anthropic_executor import AnthropicExecutor
BETA_FLAG = "computer-use-2024-10-22"
class APIProvider(StrEnum):
ANTHROPIC = "anthropic"
BEDROCK = "bedrock"
VERTEX = "vertex"
OPENAI = "openai"
PROVIDER_TO_DEFAULT_MODEL_NAME: dict[APIProvider, str] = {
APIProvider.ANTHROPIC: "claude-3-5-sonnet-20241022",
APIProvider.BEDROCK: "anthropic.claude-3-5-sonnet-20241022-v2:0",
APIProvider.VERTEX: "claude-3-5-sonnet-v2@20241022",
APIProvider.OPENAI: "gpt-4o",
}
def sampling_loop_sync(
*,
model: str,
provider: APIProvider | None,
messages: list[BetaMessageParam],
output_callback: Callable[[BetaContentBlock], None],
tool_output_callback: Callable[[ToolResult, str], None],
api_response_callback: Callable[[APIResponse[BetaMessage]], None],
api_key: str,
only_n_most_recent_images: int | None = 2,
max_tokens: int = 4096,
omniparser_url: str,
save_folder: str = "./uploads"
):
"""
Synchronous agentic sampling loop for the assistant/tool interaction of computer use.
"""
print('in sampling_loop_sync, model:', model)
omniparser_client = OmniParserClient(url=f"http://{omniparser_url}/parse/")
if model == "claude-3-5-sonnet-20241022":
# Register Actor and Executor
actor = AnthropicActor(
model=model,
provider=provider,
api_key=api_key,
api_response_callback=api_response_callback,
max_tokens=max_tokens,
only_n_most_recent_images=only_n_most_recent_images
)
elif model in set(["omniparser + gpt-4o", "omniparser + o1", "omniparser + o3-mini", "omniparser + R1", "omniparser + qwen2.5vl"]):
actor = VLMAgent(
model=model,
provider=provider,
api_key=api_key,
api_response_callback=api_response_callback,
output_callback=output_callback,
max_tokens=max_tokens,
only_n_most_recent_images=only_n_most_recent_images
)
elif model in set(["omniparser + gpt-4o-orchestrated", "omniparser + o1-orchestrated", "omniparser + o3-mini-orchestrated", "omniparser + R1-orchestrated", "omniparser + qwen2.5vl-orchestrated"]):
actor = VLMOrchestratedAgent(
model=model,
provider=provider,
api_key=api_key,
api_response_callback=api_response_callback,
output_callback=output_callback,
max_tokens=max_tokens,
only_n_most_recent_images=only_n_most_recent_images,
save_folder=save_folder
)
else:
raise ValueError(f"Model {model} not supported")
executor = AnthropicExecutor(
output_callback=output_callback,
tool_output_callback=tool_output_callback,
)
print(f"Model Inited: {model}, Provider: {provider}")
tool_result_content = None
print(f"Start the message loop. User messages: {messages}")
if model == "claude-3-5-sonnet-20241022": # Anthropic loop
while True:
parsed_screen = omniparser_client() # parsed_screen: {"som_image_base64": dino_labled_img, "parsed_content_list": parsed_content_list, "screen_info"}
screen_info_block = TextBlock(text='Below is the structured accessibility information of the current UI screen, which includes text and icons you can operate on, take these information into account when you are making the prediction for the next action. Note you will still need to take screenshot to get the image: \n' + parsed_screen['screen_info'], type='text')
screen_info_dict = {"role": "user", "content": [screen_info_block]}
messages.append(screen_info_dict)
tools_use_needed = actor(messages=messages)
for message, tool_result_content in executor(tools_use_needed, messages):
yield message
if not tool_result_content:
return messages
messages.append({"content": tool_result_content, "role": "user"})
elif model in set(["omniparser + gpt-4o", "omniparser + o1", "omniparser + o3-mini", "omniparser + R1", "omniparser + qwen2.5vl", "omniparser + gpt-4o-orchestrated", "omniparser + o1-orchestrated", "omniparser + o3-mini-orchestrated", "omniparser + R1-orchestrated", "omniparser + qwen2.5vl-orchestrated"]):
while True:
parsed_screen = omniparser_client()
tools_use_needed, vlm_response_json = actor(messages=messages, parsed_screen=parsed_screen)
for message, tool_result_content in executor(tools_use_needed, messages):
yield message
if not tool_result_content:
return messages

View File

@@ -0,0 +1,11 @@
from .base import ToolResult
from .collection import ToolCollection
from .computer import ComputerTool
from .screen_capture import get_screenshot
__ALL__ = [
ComputerTool,
ToolCollection,
ToolResult,
get_screenshot,
]

View File

@@ -0,0 +1,65 @@
from abc import ABCMeta, abstractmethod
from dataclasses import dataclass, fields, replace
from typing import Any
from anthropic.types.beta import BetaToolUnionParam
class BaseAnthropicTool(metaclass=ABCMeta):
"""Abstract base class for Anthropic-defined tools."""
@abstractmethod
def __call__(self, **kwargs) -> Any:
"""Executes the tool with the given arguments."""
...
@abstractmethod
def to_params(
self,
) -> BetaToolUnionParam:
raise NotImplementedError
@dataclass(kw_only=True, frozen=True)
class ToolResult:
"""Represents the result of a tool execution."""
output: str | None = None
error: str | None = None
base64_image: str | None = None
system: str | None = None
def __bool__(self):
return any(getattr(self, field.name) for field in fields(self))
def __add__(self, other: "ToolResult"):
def combine_fields(
field: str | None, other_field: str | None, concatenate: bool = True
):
if field and other_field:
if concatenate:
return field + other_field
raise ValueError("Cannot combine tool results")
return field or other_field
return ToolResult(
output=combine_fields(self.output, other.output),
error=combine_fields(self.error, other.error),
base64_image=combine_fields(self.base64_image, other.base64_image, False),
system=combine_fields(self.system, other.system),
)
def replace(self, **kwargs):
"""Returns a new ToolResult with the given fields replaced."""
return replace(self, **kwargs)
class ToolFailure(ToolResult):
"""A ToolResult that represents a failure."""
class ToolError(Exception):
"""Raised when a tool encounters an error."""
def __init__(self, message):
self.message = message

View File

@@ -0,0 +1,34 @@
"""Collection classes for managing multiple tools."""
from typing import Any
from anthropic.types.beta import BetaToolUnionParam
from .base import (
BaseAnthropicTool,
ToolError,
ToolFailure,
ToolResult,
)
class ToolCollection:
"""A collection of anthropic-defined tools."""
def __init__(self, *tools: BaseAnthropicTool):
self.tools = tools
self.tool_map = {tool.to_params()["name"]: tool for tool in tools}
def to_params(
self,
) -> list[BetaToolUnionParam]:
return [tool.to_params() for tool in self.tools]
async def run(self, *, name: str, tool_input: dict[str, Any]) -> ToolResult:
tool = self.tool_map.get(name)
if not tool:
return ToolFailure(error=f"Tool {name} is invalid")
try:
return await tool(**tool_input)
except ToolError as e:
return ToolFailure(error=e.message)

View File

@@ -0,0 +1,329 @@
import base64
import time
from enum import StrEnum
from typing import Literal, TypedDict
from PIL import Image
from anthropic.types.beta import BetaToolComputerUse20241022Param
from .base import BaseAnthropicTool, ToolError, ToolResult
from .screen_capture import get_screenshot
import requests
import re
OUTPUT_DIR = "./tmp/outputs"
TYPING_DELAY_MS = 12
TYPING_GROUP_SIZE = 50
Action = Literal[
"key",
"type",
"mouse_move",
"left_click",
"left_click_drag",
"right_click",
"middle_click",
"double_click",
"screenshot",
"cursor_position",
"hover",
"wait"
]
class Resolution(TypedDict):
width: int
height: int
MAX_SCALING_TARGETS: dict[str, Resolution] = {
"XGA": Resolution(width=1024, height=768), # 4:3
"WXGA": Resolution(width=1280, height=800), # 16:10
"FWXGA": Resolution(width=1366, height=768), # ~16:9
}
class ScalingSource(StrEnum):
COMPUTER = "computer"
API = "api"
class ComputerToolOptions(TypedDict):
display_height_px: int
display_width_px: int
display_number: int | None
def chunks(s: str, chunk_size: int) -> list[str]:
return [s[i : i + chunk_size] for i in range(0, len(s), chunk_size)]
class ComputerTool(BaseAnthropicTool):
"""
A tool that allows the agent to interact with the screen, keyboard, and mouse of the current computer.
Adapted for Windows using 'pyautogui'.
"""
name: Literal["computer"] = "computer"
api_type: Literal["computer_20241022"] = "computer_20241022"
width: int
height: int
display_num: int | None
_screenshot_delay = 2.0
_scaling_enabled = True
@property
def options(self) -> ComputerToolOptions:
width, height = self.scale_coordinates(
ScalingSource.COMPUTER, self.width, self.height
)
return {
"display_width_px": width,
"display_height_px": height,
"display_number": self.display_num,
}
def to_params(self) -> BetaToolComputerUse20241022Param:
return {"name": self.name, "type": self.api_type, **self.options}
def __init__(self, is_scaling: bool = False):
super().__init__()
# Get screen width and height using Windows command
self.display_num = None
self.offset_x = 0
self.offset_y = 0
self.is_scaling = is_scaling
self.width, self.height = self.get_screen_size()
print(f"screen size: {self.width}, {self.height}")
self.key_conversion = {"Page_Down": "pagedown",
"Page_Up": "pageup",
"Super_L": "win",
"Escape": "esc"}
async def __call__(
self,
*,
action: Action,
text: str | None = None,
coordinate: tuple[int, int] | None = None,
**kwargs,
):
print(f"action: {action}, text: {text}, coordinate: {coordinate}, is_scaling: {self.is_scaling}")
if action in ("mouse_move", "left_click_drag"):
if coordinate is None:
raise ToolError(f"coordinate is required for {action}")
if text is not None:
raise ToolError(f"text is not accepted for {action}")
if not isinstance(coordinate, (list, tuple)) or len(coordinate) != 2:
raise ToolError(f"{coordinate} must be a tuple of length 2")
# if not all(isinstance(i, int) and i >= 0 for i in coordinate):
if not all(isinstance(i, int) for i in coordinate):
raise ToolError(f"{coordinate} must be a tuple of non-negative ints")
if self.is_scaling:
x, y = self.scale_coordinates(
ScalingSource.API, coordinate[0], coordinate[1]
)
else:
x, y = coordinate
# print(f"scaled_coordinates: {x}, {y}")
# print(f"offset: {self.offset_x}, {self.offset_y}")
# x += self.offset_x # TODO - check if this is needed
# y += self.offset_y
print(f"mouse move to {x}, {y}")
if action == "mouse_move":
self.send_to_vm(f"pyautogui.moveTo({x}, {y})")
return ToolResult(output=f"Moved mouse to ({x}, {y})")
elif action == "left_click_drag":
current_x, current_y = self.send_to_vm("pyautogui.position()")
self.send_to_vm(f"pyautogui.dragTo({x}, {y}, duration=0.5)")
return ToolResult(output=f"Dragged mouse from ({current_x}, {current_y}) to ({x}, {y})")
if action in ("key", "type"):
if text is None:
raise ToolError(f"text is required for {action}")
if coordinate is not None:
raise ToolError(f"coordinate is not accepted for {action}")
if not isinstance(text, str):
raise ToolError(output=f"{text} must be a string")
if action == "key":
# Handle key combinations
keys = text.split('+')
for key in keys:
key = self.key_conversion.get(key.strip(), key.strip())
key = key.lower()
self.send_to_vm(f"pyautogui.keyDown('{key}')") # Press down each key
for key in reversed(keys):
key = self.key_conversion.get(key.strip(), key.strip())
key = key.lower()
self.send_to_vm(f"pyautogui.keyUp('{key}')") # Release each key in reverse order
return ToolResult(output=f"Pressed keys: {text}")
elif action == "type":
# default click before type TODO: check if this is needed
self.send_to_vm("pyautogui.click()")
self.send_to_vm(f"pyautogui.typewrite('{text}', interval={TYPING_DELAY_MS / 1000})")
self.send_to_vm("pyautogui.press('enter')")
screenshot_base64 = (await self.screenshot()).base64_image
return ToolResult(output=text, base64_image=screenshot_base64)
if action in (
"left_click",
"right_click",
"double_click",
"middle_click",
"screenshot",
"cursor_position",
"left_press",
):
if text is not None:
raise ToolError(f"text is not accepted for {action}")
if coordinate is not None:
raise ToolError(f"coordinate is not accepted for {action}")
if action == "screenshot":
return await self.screenshot()
elif action == "cursor_position":
x, y = self.send_to_vm("pyautogui.position()")
x, y = self.scale_coordinates(ScalingSource.COMPUTER, x, y)
return ToolResult(output=f"X={x},Y={y}")
else:
if action == "left_click":
self.send_to_vm("pyautogui.click()")
elif action == "right_click":
self.send_to_vm("pyautogui.rightClick()")
elif action == "middle_click":
self.send_to_vm("pyautogui.middleClick()")
elif action == "double_click":
self.send_to_vm("pyautogui.doubleClick()")
elif action == "left_press":
self.send_to_vm("pyautogui.mouseDown()")
time.sleep(1)
self.send_to_vm("pyautogui.mouseUp()")
return ToolResult(output=f"Performed {action}")
if action in ("scroll_up", "scroll_down"):
if action == "scroll_up":
self.send_to_vm("pyautogui.scroll(100)")
elif action == "scroll_down":
self.send_to_vm("pyautogui.scroll(-100)")
return ToolResult(output=f"Performed {action}")
if action == "hover":
return ToolResult(output=f"Performed {action}")
if action == "wait":
time.sleep(1)
return ToolResult(output=f"Performed {action}")
raise ToolError(f"Invalid action: {action}")
def send_to_vm(self, action: str):
"""
Executes a python command on the server. Only return tuple of x,y when action is "pyautogui.position()"
"""
prefix = "import pyautogui; pyautogui.FAILSAFE = False;"
command_list = ["python", "-c", f"{prefix} {action}"]
parse = action == "pyautogui.position()"
if parse:
command_list[-1] = f"{prefix} print({action})"
try:
print(f"sending to vm: {command_list}")
response = requests.post(
f"http://localhost:5000/execute",
headers={'Content-Type': 'application/json'},
json={"command": command_list},
timeout=90
)
time.sleep(0.7) # avoid async error as actions take time to complete
print(f"action executed")
if response.status_code != 200:
raise ToolError(f"Failed to execute command. Status code: {response.status_code}")
if parse:
output = response.json()['output'].strip()
match = re.search(r'Point\(x=(\d+),\s*y=(\d+)\)', output)
if not match:
raise ToolError(f"Could not parse coordinates from output: {output}")
x, y = map(int, match.groups())
return x, y
except requests.exceptions.RequestException as e:
raise ToolError(f"An error occurred while trying to execute the command: {str(e)}")
async def screenshot(self):
if not hasattr(self, 'target_dimension'):
screenshot = self.padding_image(screenshot)
self.target_dimension = MAX_SCALING_TARGETS["WXGA"]
width, height = self.target_dimension["width"], self.target_dimension["height"]
screenshot, path = get_screenshot(resize=True, target_width=width, target_height=height)
time.sleep(0.7) # avoid async error as actions take time to complete
return ToolResult(base64_image=base64.b64encode(path.read_bytes()).decode())
def padding_image(self, screenshot):
"""Pad the screenshot to 16:10 aspect ratio, when the aspect ratio is not 16:10."""
_, height = screenshot.size
new_width = height * 16 // 10
padding_image = Image.new("RGB", (new_width, height), (255, 255, 255))
# padding to top left
padding_image.paste(screenshot, (0, 0))
return padding_image
def scale_coordinates(self, source: ScalingSource, x: int, y: int):
"""Scale coordinates to a target maximum resolution."""
if not self._scaling_enabled:
return x, y
ratio = self.width / self.height
target_dimension = None
for target_name, dimension in MAX_SCALING_TARGETS.items():
# allow some error in the aspect ratio - not ratios are exactly 16:9
if abs(dimension["width"] / dimension["height"] - ratio) < 0.02:
if dimension["width"] < self.width:
target_dimension = dimension
self.target_dimension = target_dimension
# print(f"target_dimension: {target_dimension}")
break
if target_dimension is None:
# TODO: currently we force the target to be WXGA (16:10), when it cannot find a match
target_dimension = MAX_SCALING_TARGETS["WXGA"]
self.target_dimension = MAX_SCALING_TARGETS["WXGA"]
# should be less than 1
x_scaling_factor = target_dimension["width"] / self.width
y_scaling_factor = target_dimension["height"] / self.height
if source == ScalingSource.API:
if x > self.width or y > self.height:
raise ToolError(f"Coordinates {x}, {y} are out of bounds")
# scale up
return round(x / x_scaling_factor), round(y / y_scaling_factor)
# scale down
return round(x * x_scaling_factor), round(y * y_scaling_factor)
def get_screen_size(self):
"""Return width and height of the screen"""
try:
response = requests.post(
f"http://localhost:5000/execute",
headers={'Content-Type': 'application/json'},
json={"command": ["python", "-c", "import pyautogui; print(pyautogui.size())"]},
timeout=90
)
if response.status_code != 200:
raise ToolError(f"Failed to get screen size. Status code: {response.status_code}")
output = response.json()['output'].strip()
match = re.search(r'Size\(width=(\d+),\s*height=(\d+)\)', output)
if not match:
raise ToolError(f"Could not parse screen size from output: {output}")
width, height = map(int, match.groups())
return width, height
except requests.exceptions.RequestException as e:
raise ToolError(f"An error occurred while trying to get screen size: {str(e)}")

View File

@@ -0,0 +1,29 @@
from pathlib import Path
from uuid import uuid4
import requests
from PIL import Image
from .base import BaseAnthropicTool, ToolError
from io import BytesIO
OUTPUT_DIR = "./tmp/outputs"
def get_screenshot(resize: bool = False, target_width: int = 1920, target_height: int = 1080):
"""Capture screenshot by requesting from HTTP endpoint - returns native resolution unless resized"""
output_dir = Path(OUTPUT_DIR)
output_dir.mkdir(parents=True, exist_ok=True)
path = output_dir / f"screenshot_{uuid4().hex}.png"
try:
response = requests.get('http://localhost:5000/screenshot')
if response.status_code != 200:
raise ToolError(f"Failed to capture screenshot: HTTP {response.status_code}")
# (1280, 800)
screenshot = Image.open(BytesIO(response.content))
if resize and screenshot.size != (target_width, target_height):
screenshot = screenshot.resize((target_width, target_height))
screenshot.save(path)
return screenshot, path
except Exception as e:
raise ToolError(f"Failed to capture screenshot: {str(e)}")

4
omnitool/omnibox/.gitignore vendored Normal file
View File

@@ -0,0 +1,4 @@
vm/win11iso/custom.iso
vm/win11storage
vm/win11setup/setupscripts/firstboot_log.txt
vm/win11setup/setupscripts/server/server.log

View File

@@ -0,0 +1,48 @@
ARG VERSION_ARG="latest"
FROM scratch AS build-amd64
COPY --from=qemux/qemu-docker:6.08 / /
ARG DEBCONF_NOWARNINGS="yes"
ARG DEBIAN_FRONTEND="noninteractive"
ARG DEBCONF_NONINTERACTIVE_SEEN="true"
RUN set -eu && \
apt-get update && \
apt-get --no-install-recommends -y install \
bc \
jq \
curl \
7zip \
wsdd \
samba \
xz-utils \
wimtools \
dos2unix \
cabextract \
genisoimage \
libxml2-utils \
libarchive-tools && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
COPY --chmod=755 ./vm/buildcontainer /run/
RUN dos2unix /run/*
COPY --chmod=755 ./vm/win11def /run/assets
RUN dos2unix /run/assets/*
ADD --chmod=755 https://raw.githubusercontent.com/christgau/wsdd/v0.8/src/wsdd.py /usr/sbin/wsdd
ADD --chmod=664 https://github.com/qemus/virtiso-whql/releases/download/v1.9.43-0/virtio-win-1.9.43.tar.xz /drivers.txz
FROM dockurr/windows-arm:${VERSION_ARG} AS build-arm64
FROM build-${TARGETARCH}
ARG VERSION_ARG="0.00"
RUN echo "$VERSION_ARG" > /run/version
EXPOSE 8006 3389
ENV VERSION="win11e"
ENTRYPOINT ["/usr/bin/tini", "-s", "/run/entry.sh"]

View File

@@ -0,0 +1,22 @@
services:
windows:
image: windows-local
container_name: omni-windows
privileged: true
environment:
RAM_SIZE: "8G"
CPU_CORES: "4"
DISK_SIZE: "20G"
devices:
- /dev/kvm
- /dev/net/tun
cap_add:
- NET_ADMIN
ports:
- 8006:8006 # Web Viewer access
volumes:
- ./vm/win11iso/custom.iso:/custom.iso
- ./vm/win11setup/firstboot:/oem
- ./vm/win11setup/setupscripts:/data
- ./vm/win11storage:/storage

View File

@@ -0,0 +1,70 @@
function Create-VM {
if (-not (docker images windows-local -q)) {
Write-Host "Image not found locally. Building..."
docker build -t windows-local ..
} else {
Write-Host "Image found locally. Skipping build."
}
docker compose -f ../compose.yml up -d
while ($true) {
try {
$response = Invoke-WebRequest -Uri "http://localhost:5000/probe" -Method GET -UseBasicParsing
if ($response.StatusCode -eq 200) {
break
}
} catch {
Write-Host "Waiting for a response from the computer control server. When first building the VM storage folder this can take a while..."
Start-Sleep -Seconds 5
}
}
Write-Host "VM + server is up and running!"
}
function Start-LocalVM {
Write-Host "Starting VM..."
docker compose -f ../compose.yml start
while ($true) {
try {
$response = Invoke-WebRequest -Uri "http://localhost:5000/probe" -Method GET -UseBasicParsing
if ($response.StatusCode -eq 200) {
break
}
} catch {
Write-Host "Waiting for a response from the computer control server"
Start-Sleep -Seconds 5
}
}
Write-Host "VM started"
}
function Stop-LocalVM {
Write-Host "Stopping VM..."
docker compose -f ../compose.yml stop
Write-Host "VM stopped"
}
function Remove-VM {
Write-Host "Removing VM and associated containers..."
docker compose -f ../compose.yml down
Write-Host "VM removed"
}
if (-not $args[0]) {
Write-Host "Usage: $($MyInvocation.MyCommand.Name) [create|start|stop|delete]"
exit 1
}
switch ($args[0]) {
"create" { Create-VM }
"start" { Start-LocalVM }
"stop" { Stop-LocalVM }
"delete" { Remove-VM }
default {
Write-Host "Invalid option: $($args[0])"
Write-Host "Usage: $($MyInvocation.MyCommand.Name) [create|start|stop|delete]"
exit 1
}
}

View File

@@ -0,0 +1,77 @@
#!/bin/bash
create_vm() {
if ! docker images windows-local -q | grep -q .; then
echo "Image not found locally. Building..."
docker build -t windows-local ..
else
echo "Image found locally. Skipping build."
fi
docker compose -f ../compose.yml up -d
# Wait for the VM to start up
while true; do
response=$(docker exec -it omni-windows bash -c "curl --write-out '%{http_code}' --silent --output /dev/null localhost:5000/probe")
if [ $response -eq 200 ]; then
break
fi
echo "Waiting for a response from the computer control server. When first building the VM storage folder this can take a while..."
sleep 5
done
echo "VM + server is up and running!"
}
start_vm() {
echo "Starting VM..."
docker compose -f ../compose.yml start
while true; do
response=$(docker exec -it omni-windows bash -c "curl --write-out '%{http_code}' --silent --output /dev/null localhost:5000/probe")
if [ $response -eq 200 ]; then
break
fi
echo "Waiting for a response from the computer control server"
sleep 5
done
echo "VM started"
}
stop_vm() {
echo "Stopping VM..."
docker compose -f ../compose.yml stop
echo "VM stopped"
}
delete_vm() {
echo "Removing VM and associated containers..."
docker compose -f ../compose.yml down
echo "VM removed"
}
# Check if control parameter is provided
if [ -z "$1" ]; then
echo "Usage: $0 [create|start|stop|delete]"
exit 1
fi
# Execute the appropriate function based on the control parameter
case "$1" in
"create")
create_vm
;;
"start")
start_vm
;;
"stop")
stop_vm
;;
"delete")
delete_vm
;;
*)
echo "Invalid option: $1"
echo "Usage: $0 [create|start|stop|delete]"
exit 1
;;
esac

View File

@@ -0,0 +1,410 @@
#!/usr/bin/env bash
set -Eeuo pipefail
: "${WIDTH:=""}"
: "${HEIGHT:=""}"
: "${VERIFY:=""}"
: "${REGION:=""}"
: "${MANUAL:=""}"
: "${REMOVE:=""}"
: "${VERSION:=""}"
: "${DETECTED:=""}"
: "${KEYBOARD:=""}"
: "${LANGUAGE:=""}"
: "${USERNAME:=""}"
: "${PASSWORD:=""}"
MIRRORS=4
PLATFORM="x64"
parseVersion() {
if [[ "${VERSION}" == \"*\" || "${VERSION}" == \'*\' ]]; then
VERSION="${VERSION:1:-1}"
fi
[ -z "$VERSION" ] && VERSION="win11"
case "${VERSION,,}" in
"11" | "11p" | "win11" | "pro11" | "win11p" | "windows11" | "windows 11" )
VERSION="win11x64"
;;
"11e" | "win11e" | "windows11e" | "windows 11e" | "win11x64-enterprise-eval" )
VERSION="win11x64-enterprise-eval"
;;
esac
return 0
}
getLanguage() {
local id="$1"
local ret="$2"
local lang=""
local desc=""
local culture=""
case "${id,,}" in
"ar" | "ar-"* )
lang="Arabic"
desc="$lang"
culture="ar-SA" ;;
"bg" | "bg-"* )
lang="Bulgarian"
desc="$lang"
culture="bg-BG" ;;
"cs" | "cs-"* | "cz" | "cz-"* )
lang="Czech"
desc="$lang"
culture="cs-CZ" ;;
"da" | "da-"* | "dk" | "dk-"* )
lang="Danish"
desc="$lang"
culture="da-DK" ;;
"de" | "de-"* )
lang="German"
desc="$lang"
culture="de-DE" ;;
"el" | "el-"* | "gr" | "gr-"* )
lang="Greek"
desc="$lang"
culture="el-GR" ;;
"gb" | "en-gb" )
lang="English International"
desc="English"
culture="en-GB" ;;
"en" | "en-"* )
lang="English"
desc="English"
culture="en-US" ;;
"mx" | "es-mx" )
lang="Spanish (Mexico)"
desc="Spanish"
culture="es-MX" ;;
"es" | "es-"* )
lang="Spanish"
desc="$lang"
culture="es-ES" ;;
"et" | "et-"* )
lang="Estonian"
desc="$lang"
culture="et-EE" ;;
"fi" | "fi-"* )
lang="Finnish"
desc="$lang"
culture="fi-FI" ;;
"ca" | "fr-ca" )
lang="French Canadian"
desc="French"
culture="fr-CA" ;;
"fr" | "fr-"* )
lang="French"
desc="$lang"
culture="fr-FR" ;;
"he" | "he-"* | "il" | "il-"* )
lang="Hebrew"
desc="$lang"
culture="he-IL" ;;
"hr" | "hr-"* | "cr" | "cr-"* )
lang="Croatian"
desc="$lang"
culture="hr-HR" ;;
"hu" | "hu-"* )
lang="Hungarian"
desc="$lang"
culture="hu-HU" ;;
"it" | "it-"* )
lang="Italian"
desc="$lang"
culture="it-IT" ;;
"ja" | "ja-"* | "jp" | "jp-"* )
lang="Japanese"
desc="$lang"
culture="ja-JP" ;;
"ko" | "ko-"* | "kr" | "kr-"* )
lang="Korean"
desc="$lang"
culture="ko-KR" ;;
"lt" | "lt-"* )
lang="Lithuanian"
desc="$lang"
culture="lv-LV" ;;
"lv" | "lv-"* )
lang="Latvian"
desc="$lang"
culture="lt-LT" ;;
"nb" | "nb-"* |"nn" | "nn-"* | "no" | "no-"* )
lang="Norwegian"
desc="$lang"
culture="nb-NO" ;;
"nl" | "nl-"* )
lang="Dutch"
desc="$lang"
culture="nl-NL" ;;
"pl" | "pl-"* )
lang="Polish"
desc="$lang"
culture="pl-PL" ;;
"br" | "pt-br" )
lang="Brazilian Portuguese"
desc="Portuguese"
culture="pt-BR" ;;
"pt" | "pt-"* )
lang="Portuguese"
desc="$lang"
culture="pt-BR" ;;
"ro" | "ro-"* )
lang="Romanian"
desc="$lang"
culture="ro-RO" ;;
"ru" | "ru-"* )
lang="Russian"
desc="$lang"
culture="ru-RU" ;;
"sk" | "sk-"* )
lang="Slovak"
desc="$lang"
culture="sk-SK" ;;
"sl" | "sl-"* | "si" | "si-"* )
lang="Slovenian"
desc="$lang"
culture="sl-SI" ;;
"sr" | "sr-"* )
lang="Serbian Latin"
desc="Serbian"
culture="sr-Latn-RS" ;;
"sv" | "sv-"* | "se" | "se-"* )
lang="Swedish"
desc="$lang"
culture="sv-SE" ;;
"th" | "th-"* )
lang="Thai"
desc="$lang"
culture="th-TH" ;;
"tr" | "tr-"* )
lang="Turkish"
desc="$lang"
culture="tr-TR" ;;
"ua" | "ua-"* | "uk" | "uk-"* )
lang="Ukrainian"
desc="$lang"
culture="uk-UA" ;;
"hk" | "zh-hk" | "cn-hk" )
lang="Chinese (Traditional)"
desc="Chinese HK"
culture="zh-TW" ;;
"tw" | "zh-tw" | "cn-tw" )
lang="Chinese (Traditional)"
desc="Chinese TW"
culture="zh-TW" ;;
"zh" | "zh-"* | "cn" | "cn-"* )
lang="Chinese (Simplified)"
desc="Chinese"
culture="zh-CN" ;;
esac
case "${ret,,}" in
"desc" ) echo "$desc" ;;
"name" ) echo "$lang" ;;
"culture" ) echo "$culture" ;;
*) echo "$desc";;
esac
return 0
}
parseLanguage() {
REGION="${REGION//_/-/}"
KEYBOARD="${KEYBOARD//_/-/}"
LANGUAGE="${LANGUAGE//_/-/}"
[ -z "$LANGUAGE" ] && LANGUAGE="en"
case "${LANGUAGE,,}" in
"arabic" | "arab" ) LANGUAGE="ar" ;;
"bulgarian" | "bu" ) LANGUAGE="bg" ;;
"chinese" | "cn" ) LANGUAGE="zh" ;;
"croatian" | "cr" | "hrvatski" ) LANGUAGE="hr" ;;
"czech" | "cz" | "cesky" ) LANGUAGE="cs" ;;
"danish" | "dk" | "danske" ) LANGUAGE="da" ;;
"dutch" | "nederlands" ) LANGUAGE="nl" ;;
"english" | "gb" | "british" ) LANGUAGE="en" ;;
"estonian" | "eesti" ) LANGUAGE="et" ;;
"finnish" | "suomi" ) LANGUAGE="fi" ;;
"french" | "français" | "francais" ) LANGUAGE="fr" ;;
"german" | "deutsch" ) LANGUAGE="de" ;;
"greek" | "gr" ) LANGUAGE="el" ;;
"hebrew" | "il" ) LANGUAGE="he" ;;
"hungarian" | "magyar" ) LANGUAGE="hu" ;;
"italian" | "italiano" ) LANGUAGE="it" ;;
"japanese" | "jp" ) LANGUAGE="ja" ;;
"korean" | "kr" ) LANGUAGE="ko" ;;
"latvian" | "latvijas" ) LANGUAGE="lv" ;;
"lithuanian" | "lietuvos" ) LANGUAGE="lt" ;;
"norwegian" | "no" | "nb" | "norsk" ) LANGUAGE="nn" ;;
"polish" | "polski" ) LANGUAGE="pl" ;;
"portuguese" | "pt" | "br" ) LANGUAGE="pt-br" ;;
"português" | "portugues" ) LANGUAGE="pt-br" ;;
"romanian" | "română" | "romana" ) LANGUAGE="ro" ;;
"russian" | "ruski" ) LANGUAGE="ru" ;;
"serbian" | "serbian latin" ) LANGUAGE="sr" ;;
"slovak" | "slovenský" | "slovensky" ) LANGUAGE="sk" ;;
"slovenian" | "si" | "slovenski" ) LANGUAGE="sl" ;;
"spanish" | "espanol" | "español" ) LANGUAGE="es" ;;
"swedish" | "se" | "svenska" ) LANGUAGE="sv" ;;
"turkish" | "türk" | "turk" ) LANGUAGE="tr" ;;
"thai" ) LANGUAGE="th" ;;
"ukrainian" | "ua" ) LANGUAGE="uk" ;;
esac
local culture
culture=$(getLanguage "$LANGUAGE" "culture")
[ -n "$culture" ] && return 0
error "Invalid LANGUAGE specified, value \"$LANGUAGE\" is not recognized!"
return 1
}
printVersion() {
local id="$1"
local desc="$2"
case "${id,,}" in
"win11"* ) desc="Windows 11" ;;
esac
if [ -z "$desc" ]; then
desc="Windows"
[[ "${PLATFORM,,}" != "x64" ]] && desc+=" for ${PLATFORM}"
fi
echo "$desc"
return 0
}
printEdition() {
local id="$1"
local desc="$2"
local result=""
local edition=""
result=$(printVersion "$id" "x")
[[ "$result" == "x" ]] && echo "$desc" && return 0
case "${id,,}" in
*"-enterprise" )
edition="Enterprise"
;;
*"-enterprise-eval" )
edition="Enterprise (Evaluation)"
;;
esac
[ -n "$edition" ] && result+=" $edition"
echo "$result"
return 0
}
fromName() {
local id=""
local name="$1"
local arch="$2"
local add=""
[[ "$arch" != "x64" ]] && add="$arch"
case "${name,,}" in
*"windows 11"* ) id="win11${arch}" ;;
esac
echo "$id"
return 0
}
getVersion() {
local id
local name="$1"
local arch="$2"
id=$(fromName "$name" "$arch")
case "${id,,}" in
"win11"* )
case "${name,,}" in
*" enterprise evaluation"* ) id="$id-enterprise-eval" ;;
*" enterprise"* ) id="$id-enterprise" ;;
esac
;;
esac
echo "$id"
return 0
}
addFolder() {
local src="$1"
local folder="/oem"
[ ! -d "$folder" ] && folder="/OEM"
[ ! -d "$folder" ] && folder="$STORAGE/oem"
[ ! -d "$folder" ] && folder="$STORAGE/OEM"
[ ! -d "$folder" ] && return 0
local msg="Adding OEM folder to image..."
info "$msg" && html "$msg"
local dest="$src/\$OEM\$/\$1/OEM"
mkdir -p "$dest" || return 1
cp -Lr "$folder/." "$dest" || return 1
local file
file=$(find "$dest" -maxdepth 1 -type f -iname install.bat | head -n 1)
[ -f "$file" ] && unix2dos -q "$file"
return 0
}
# migrateFiles() {
# local base="$1"
# local version="$2"
# local file=""
# [ -f "$base" ] && return 0
# [[ "${version,,}" == "tiny10" ]] && file="tiny10_x64_23h2.iso"
# [[ "${version,,}" == "tiny11" ]] && file="tiny11_2311_x64.iso"
# [[ "${version,,}" == "core11" ]] && file="tiny11_core_x64_beta_1.iso"
# [[ "${version,,}" == "winxpx86" ]] && file="en_windows_xp_professional_with_service_pack_3_x86_cd_x14-80428.iso"
# [[ "${version,,}" == "winvistax64" ]] && file="en_windows_vista_sp2_x64_dvd_342267.iso"
# [[ "${version,,}" == "win7x64" ]] && file="en_windows_7_enterprise_with_sp1_x64_dvd_u_677651.iso"
# [ ! -f "$STORAGE/$file" ] && return 0
# mv -f "$STORAGE/$file" "$base" || return 1
# return 0
# }
migrateFiles() {
local base="$1"
local version="$2"
local file=""
[ -f "$base" ] && return 0
[ ! -f "$STORAGE/$file" ] && return 0
mv -f "$STORAGE/$file" "$base" || return 1
return 0
}
return 0

View File

@@ -0,0 +1,38 @@
#!/usr/bin/env bash
set -Eeuo pipefail
: "${BOOT_MODE:="windows"}"
APP="OmniParser Windows"
SUPPORT="https://github.com/microsoft/OmniParser"
cd /run
. reset.sh # Initialize system
. define.sh # Define versions
. install.sh # Run installation
. disk.sh # Initialize disks
. display.sh # Initialize graphics
. network.sh # Initialize network
. samba.sh # Configure samba
. boot.sh # Configure boot
. proc.sh # Initialize processor
. power.sh # Configure shutdown
. config.sh # Configure arguments
trap - ERR
version=$(qemu-system-x86_64 --version | head -n 1 | cut -d '(' -f 1 | awk '{ print $NF }')
info "Booting ${APP}${BOOT_DESC} using QEMU v$version..."
{ qemu-system-x86_64 ${ARGS:+ $ARGS} >"$QEMU_OUT" 2>"$QEMU_LOG"; rc=$?; } || :
(( rc != 0 )) && error "$(<"$QEMU_LOG")" && exit 15
terminal
( sleep 30; boot ) &
tail -fn +0 "$QEMU_LOG" 2>/dev/null &
cat "$QEMU_TERM" 2> /dev/null | tee "$QEMU_PTY" &
wait $! || :
sleep 1 & wait $!
[ ! -f "$QEMU_END" ] && finish 0

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,223 @@
#!/usr/bin/env bash
set -Eeuo pipefail
# Configure QEMU for graceful shutdown
QEMU_TERM=""
QEMU_PORT=7100
QEMU_TIMEOUT=110
QEMU_DIR="/run/shm"
QEMU_PID="$QEMU_DIR/qemu.pid"
QEMU_PTY="$QEMU_DIR/qemu.pty"
QEMU_LOG="$QEMU_DIR/qemu.log"
QEMU_OUT="$QEMU_DIR/qemu.out"
QEMU_END="$QEMU_DIR/qemu.end"
rm -f "$QEMU_DIR/qemu.*"
touch "$QEMU_LOG"
_trap() {
func="$1" ; shift
for sig ; do
trap "$func $sig" "$sig"
done
}
boot() {
[ -f "$QEMU_END" ] && return 0
if [ -s "$QEMU_PTY" ]; then
if [ "$(stat -c%s "$QEMU_PTY")" -gt 7 ]; then
local fail=""
if [[ "${BOOT_MODE,,}" == "windows_legacy" ]]; then
grep -Fq "No bootable device." "$QEMU_PTY" && fail="y"
grep -Fq "BOOTMGR is missing" "$QEMU_PTY" && fail="y"
fi
if [ -z "$fail" ]; then
info "Windows has started successfully. You can directly view the VM at http://localhost:8006/vnc.html?view_only=1&autoconnect=1&resize=scale. Wait until setup is complete before interacting manually."
return 0
fi
fi
fi
error "Timeout while waiting for QEMU to boot the machine!"
local pid
pid=$(<"$QEMU_PID")
{ kill -15 "$pid" || true; } 2>/dev/null
return 0
}
ready() {
[ -f "$STORAGE/windows.boot" ] && return 0
[ ! -s "$QEMU_PTY" ] && return 1
if [[ "${BOOT_MODE,,}" == "windows_legacy" ]]; then
local last
local bios="Booting from Hard"
last=$(grep "^Booting.*" "$QEMU_PTY" | tail -1)
[[ "${last,,}" != "${bios,,}"* ]] && return 1
grep -Fq "No bootable device." "$QEMU_PTY" && return 1
grep -Fq "BOOTMGR is missing" "$QEMU_PTY" && return 1
return 0
fi
local line="\"Windows Boot Manager\""
grep -Fq "$line" "$QEMU_PTY" && return 0
return 1
}
finish() {
local pid
local reason=$1
touch "$QEMU_END"
if [ -s "$QEMU_PID" ]; then
pid=$(<"$QEMU_PID")
error "Forcefully terminating Windows, reason: $reason..."
{ kill -15 "$pid" || true; } 2>/dev/null
while isAlive "$pid"; do
sleep 1
# Workaround for zombie pid
[ ! -s "$QEMU_PID" ] && break
done
fi
if [ ! -f "$STORAGE/windows.boot" ] && [ -f "$BOOT" ]; then
# Remove CD-ROM ISO after install
if ready; then
touch "$STORAGE/windows.boot"
if [[ "$REMOVE" != [Nn]* ]]; then
rm -f "$BOOT" 2>/dev/null || true
fi
fi
fi
pid="/var/run/tpm.pid"
[ -s "$pid" ] && pKill "$(<"$pid")"
pid="/var/run/wsdd.pid"
[ -s "$pid" ] && pKill "$(<"$pid")"
fKill "smbd"
closeNetwork
sleep 0.5
echo " Shutdown completed!"
exit "$reason"
}
terminal() {
local dev=""
if [ -s "$QEMU_OUT" ]; then
local msg
msg=$(<"$QEMU_OUT")
if [ -n "$msg" ]; then
if [[ "${msg,,}" != "char"* || "$msg" != *"serial0)" ]]; then
echo "$msg"
fi
dev="${msg#*/dev/p}"
dev="/dev/p${dev%% *}"
fi
fi
if [ ! -c "$dev" ]; then
dev=$(echo 'info chardev' | nc -q 1 -w 1 localhost "$QEMU_PORT" | tr -d '\000')
dev="${dev#*serial0}"
dev="${dev#*pty:}"
dev="${dev%%$'\n'*}"
dev="${dev%%$'\r'*}"
fi
if [ ! -c "$dev" ]; then
error "Device '$dev' not found!"
finish 34 && return 34
fi
QEMU_TERM="$dev"
return 0
}
_graceful_shutdown() {
local code=$?
set +e
if [ -f "$QEMU_END" ]; then
info "Received $1 while already shutting down..."
return
fi
touch "$QEMU_END"
info "Received $1, sending ACPI shutdown signal..."
if [ ! -s "$QEMU_PID" ]; then
error "QEMU PID file does not exist?"
finish "$code" && return "$code"
fi
local pid=""
pid=$(<"$QEMU_PID")
if ! isAlive "$pid"; then
error "QEMU process does not exist?"
finish "$code" && return "$code"
fi
if ! ready; then
info "Cannot send ACPI signal during Windows setup, aborting..."
finish "$code" && return "$code"
fi
# Send ACPI shutdown signal
echo 'system_powerdown' | nc -q 1 -w 1 localhost "${QEMU_PORT}" > /dev/null
local cnt=0
while [ "$cnt" -lt "$QEMU_TIMEOUT" ]; do
sleep 1
cnt=$((cnt+1))
! isAlive "$pid" && break
# Workaround for zombie pid
[ ! -s "$QEMU_PID" ] && break
info "Waiting for Windows to shutdown... ($cnt/$QEMU_TIMEOUT)"
# Send ACPI shutdown signal
echo 'system_powerdown' | nc -q 1 -w 1 localhost "${QEMU_PORT}" > /dev/null
done
if [ "$cnt" -ge "$QEMU_TIMEOUT" ]; then
error "Shutdown timeout reached, aborting..."
fi
finish "$code" && return "$code"
}
SERIAL="pty"
MONITOR="telnet:localhost:$QEMU_PORT,server,nowait,nodelay"
MONITOR+=" -daemonize -D $QEMU_LOG -pidfile $QEMU_PID"
_trap _graceful_shutdown SIGTERM SIGHUP SIGINT SIGABRT SIGQUIT
return 0

View File

@@ -0,0 +1,109 @@
#!/usr/bin/env bash
set -Eeuo pipefail
: "${SAMBA:="Y"}"
[[ "$SAMBA" == [Nn]* ]] && return 0
[[ "$NETWORK" == [Nn]* ]] && return 0
hostname="host.lan"
interface="dockerbridge"
if [[ "$DHCP" == [Yy1]* ]]; then
hostname="$IP"
interface="$VM_NET_DEV"
fi
addShare() {
local dir="$1"
local name="$2"
local comment="$3"
mkdir -p "$dir" || return 1
if [ -z "$(ls -A "$dir")" ]; then
chmod 777 "$dir"
{ echo "--------------------------------------------------------"
echo " $APP"
echo " For support visit $SUPPORT"
echo "--------------------------------------------------------"
echo ""
echo "Using this folder you can share files with the host machine."
echo ""
echo "To change its location, include the following bind mount in your compose file:"
echo ""
echo " volumes:"
echo " - \"/home/example:/${name,,}\""
echo ""
echo "Or in your run command:"
echo ""
echo " -v \"/home/example:/${name,,}\""
echo ""
echo "Replace the example path /home/example with the desired shared folder."
echo ""
} | unix2dos > "$dir/readme.txt"
fi
{ echo ""
echo "[$name]"
echo " path = $dir"
echo " comment = $comment"
echo " writable = yes"
echo " guest ok = yes"
echo " guest only = yes"
echo " force user = root"
echo " force group = root"
} >> "/etc/samba/smb.conf"
return 0
}
{ echo "[global]"
echo " server string = Dockur"
echo " netbios name = $hostname"
echo " workgroup = WORKGROUP"
echo " interfaces = $interface"
echo " bind interfaces only = yes"
echo " security = user"
echo " guest account = nobody"
echo " map to guest = Bad User"
echo " server min protocol = NT1"
echo ""
echo " # disable printing services"
echo " load printers = no"
echo " printing = bsd"
echo " printcap name = /dev/null"
echo " disable spoolss = yes"
} > "/etc/samba/smb.conf"
share="/data"
[ ! -d "$share" ] && [ -d "$STORAGE/data" ] && share="$STORAGE/data"
[ ! -d "$share" ] && [ -d "/shared" ] && share="/shared"
[ ! -d "$share" ] && [ -d "$STORAGE/shared" ] && share="$STORAGE/shared"
addShare "$share" "Data" "Shared" || error "Failed to create shared folder!"
[ -d "/data2" ] && addShare "/data2" "Data2" "Shared"
[ -d "/data3" ] && addShare "/data3" "Data3" "Shared"
if ! smbd; then
error "Samba daemon failed to start!"
smbd -i --debug-stdout || true
fi
if [[ "${BOOT_MODE:-}" == "windows_legacy" ]]; then
# Enable NetBIOS on Windows 7 and lower
if ! nmbd; then
error "NetBIOS daemon failed to start!"
nmbd -i --debug-stdout || true
fi
else
# Enable Web Service Discovery on Vista and up
wsdd -i "$interface" -p -n "$hostname" &
echo "$!" > /var/run/wsdd.pid
fi
return 0

View File

@@ -0,0 +1,462 @@
<?xml version="1.0" encoding="UTF-8"?>
<unattend xmlns="urn:schemas-microsoft-com:unattend" xmlns:wcm="http://schemas.microsoft.com/WMIConfig/2002/State">
<settings pass="windowsPE">
<component name="Microsoft-Windows-International-Core-WinPE" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS">
<SetupUILanguage>
<UILanguage>en-US</UILanguage>
</SetupUILanguage>
<InputLocale>0409:00000409</InputLocale>
<SystemLocale>en-US</SystemLocale>
<UILanguage>en-US</UILanguage>
<UserLocale>en-US</UserLocale>
</component>
<component name="Microsoft-Windows-Setup" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS">
<DiskConfiguration>
<Disk wcm:action="add">
<DiskID>0</DiskID>
<WillWipeDisk>true</WillWipeDisk>
<CreatePartitions>
<!-- System partition (ESP) -->
<CreatePartition wcm:action="add">
<Order>1</Order>
<Type>EFI</Type>
<Size>128</Size>
</CreatePartition>
<!-- Microsoft reserved partition (MSR) -->
<CreatePartition wcm:action="add">
<Order>2</Order>
<Type>MSR</Type>
<Size>128</Size>
</CreatePartition>
<!-- Windows partition -->
<CreatePartition wcm:action="add">
<Order>3</Order>
<Type>Primary</Type>
<Extend>true</Extend>
</CreatePartition>
</CreatePartitions>
<ModifyPartitions>
<!-- System partition (ESP) -->
<ModifyPartition wcm:action="add">
<Order>1</Order>
<PartitionID>1</PartitionID>
<Label>System</Label>
<Format>FAT32</Format>
</ModifyPartition>
<!-- MSR partition does not need to be modified -->
<ModifyPartition wcm:action="add">
<Order>2</Order>
<PartitionID>2</PartitionID>
</ModifyPartition>
<!-- Windows partition -->
<ModifyPartition wcm:action="add">
<Order>3</Order>
<PartitionID>3</PartitionID>
<Label>Windows</Label>
<Letter>C</Letter>
<Format>NTFS</Format>
</ModifyPartition>
</ModifyPartitions>
</Disk>
</DiskConfiguration>
<ImageInstall>
<OSImage>
<InstallTo>
<DiskID>0</DiskID>
<PartitionID>3</PartitionID>
</InstallTo>
<InstallToAvailablePartition>false</InstallToAvailablePartition>
</OSImage>
</ImageInstall>
<DynamicUpdate>
<Enable>true</Enable>
<WillShowUI>Never</WillShowUI>
</DynamicUpdate>
<UpgradeData>
<Upgrade>false</Upgrade>
<WillShowUI>Never</WillShowUI>
</UpgradeData>
<UserData>
<AcceptEula>true</AcceptEula>
<FullName>Docker</FullName>
<Organization>Windows for Docker</Organization>
</UserData>
<EnableFirewall>false</EnableFirewall>
<Diagnostics>
<OptIn>false</OptIn>
</Diagnostics>
<RunSynchronous>
<RunSynchronousCommand wcm:action="add">
<Order>1</Order>
<Path>reg.exe add "HKLM\SYSTEM\Setup\LabConfig" /v BypassTPMCheck /t REG_DWORD /d 1 /f</Path>
</RunSynchronousCommand>
<RunSynchronousCommand wcm:action="add">
<Order>2</Order>
<Path>reg.exe add "HKLM\SYSTEM\Setup\LabConfig" /v BypassSecureBootCheck /t REG_DWORD /d 1 /f</Path>
</RunSynchronousCommand>
<RunSynchronousCommand wcm:action="add">
<Order>3</Order>
<Path>reg.exe add "HKLM\SYSTEM\Setup\LabConfig" /v BypassRAMCheck /t REG_DWORD /d 1 /f</Path>
</RunSynchronousCommand>
<RunSynchronousCommand wcm:action="add">
<Order>4</Order>
<Path>reg.exe add "HKLM\SYSTEM\Setup\MoSetup" /v AllowUpgradesWithUnsupportedTPMOrCPU /t REG_DWORD /d 1 /f</Path>
</RunSynchronousCommand>
</RunSynchronous>
</component>
</settings>
<settings pass="offlineServicing">
<component name="Microsoft-Windows-LUA-Settings" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS">
<EnableLUA>false</EnableLUA>
</component>
</settings>
<settings pass="generalize">
<component name="Microsoft-Windows-PnPSysprep" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS">
<PersistAllDeviceInstalls>true</PersistAllDeviceInstalls>
</component>
<component name="Microsoft-Windows-Security-SPP" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS">
<SkipRearm>1</SkipRearm>
</component>
</settings>
<settings pass="specialize">
<component name="Microsoft-Windows-Security-SPP-UX" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS">
<SkipAutoActivation>true</SkipAutoActivation>
</component>
<component name="Microsoft-Windows-Shell-Setup" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS">
<ComputerName>*</ComputerName>
<OEMInformation>
<Manufacturer>Dockur</Manufacturer>
<Model>Windows for Docker</Model>
<SupportHours>24/7</SupportHours>
<SupportPhone />
<SupportProvider>Dockur</SupportProvider>
<SupportURL>https://github.com/dockur/windows/issues</SupportURL>
</OEMInformation>
<OEMName>Windows for Docker</OEMName>
</component>
<component name="Microsoft-Windows-ErrorReportingCore" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS">
<DisableWER>1</DisableWER>
</component>
<component name="Microsoft-Windows-IE-InternetExplorer" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS">
<DisableAccelerators>true</DisableAccelerators>
<DisableFirstRunWizard>true</DisableFirstRunWizard>
<Home_Page>https://google.com</Home_Page>
<Help_Page>about:blank</Help_Page>
</component>
<component name="Microsoft-Windows-IE-InternetExplorer" processorArchitecture="wow64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS">
<DisableAccelerators>true</DisableAccelerators>
<DisableFirstRunWizard>true</DisableFirstRunWizard>
<Home_Page>https://google.com</Home_Page>
<Help_Page>about:blank</Help_Page>
</component>
<component name="Microsoft-Windows-SQMApi" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS">
<CEIPEnabled>0</CEIPEnabled>
</component>
<component name="Microsoft-Windows-SystemRestore-Main" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS">
<DisableSR>1</DisableSR>
</component>
<component name="Microsoft-Windows-International-Core" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS">
<InputLocale>0409:00000409</InputLocale>
<SystemLocale>en-US</SystemLocale>
<UILanguage>en-US</UILanguage>
<UserLocale>en-US</UserLocale>
</component>
<component name="Microsoft-Windows-Deployment" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS">
<RunSynchronous>
<RunSynchronousCommand wcm:action="add">
<Order>1</Order>
<Path>reg.exe add "HKLM\SOFTWARE\Microsoft\Windows\CurrentVersion\OOBE" /v BypassNRO /t REG_DWORD /d 1 /f</Path>
</RunSynchronousCommand>
<RunSynchronousCommand wcm:action="add">
<Order>2</Order>
<Path>reg.exe load "HKU\mount" "C:\Users\Default\NTUSER.DAT"</Path>
</RunSynchronousCommand>
<RunSynchronousCommand wcm:action="add">
<Order>3</Order>
<Path>reg.exe add "HKU\mount\Software\Microsoft\Windows\CurrentVersion\ContentDeliveryManager" /v "ContentDeliveryAllowed" /t REG_DWORD /d 0 /f</Path>
</RunSynchronousCommand>
<RunSynchronousCommand wcm:action="add">
<Order>4</Order>
<Path>reg.exe add "HKU\mount\Software\Microsoft\Windows\CurrentVersion\ContentDeliveryManager" /v "FeatureManagementEnabled" /t REG_DWORD /d 0 /f</Path>
</RunSynchronousCommand>
<RunSynchronousCommand wcm:action="add">
<Order>5</Order>
<Path>reg.exe add "HKU\mount\Software\Microsoft\Windows\CurrentVersion\ContentDeliveryManager" /v "OEMPreInstalledAppsEnabled" /t REG_DWORD /d 0 /f</Path>
</RunSynchronousCommand>
<RunSynchronousCommand wcm:action="add">
<Order>6</Order>
<Path>reg.exe add "HKU\mount\Software\Microsoft\Windows\CurrentVersion\ContentDeliveryManager" /v "PreInstalledAppsEnabled" /t REG_DWORD /d 0 /f</Path>
</RunSynchronousCommand>
<RunSynchronousCommand wcm:action="add">
<Order>7</Order>
<Path>reg.exe add "HKU\mount\Software\Microsoft\Windows\CurrentVersion\ContentDeliveryManager" /v "PreInstalledAppsEverEnabled" /t REG_DWORD /d 0 /f</Path>
</RunSynchronousCommand>
<RunSynchronousCommand wcm:action="add">
<Order>8</Order>
<Path>reg.exe add "HKU\mount\Software\Microsoft\Windows\CurrentVersion\ContentDeliveryManager" /v "SilentInstalledAppsEnabled" /t REG_DWORD /d 0 /f</Path>
</RunSynchronousCommand>
<RunSynchronousCommand wcm:action="add">
<Order>9</Order>
<Path>reg.exe add "HKU\mount\Software\Microsoft\Windows\CurrentVersion\ContentDeliveryManager" /v "SoftLandingEnabled" /t REG_DWORD /d 0 /f</Path>
</RunSynchronousCommand>
<RunSynchronousCommand wcm:action="add">
<Order>10</Order>
<Path>reg.exe add "HKU\mount\Software\Microsoft\Windows\CurrentVersion\ContentDeliveryManager" /v "SubscribedContentEnabled" /t REG_DWORD /d 0 /f</Path>
</RunSynchronousCommand>
<RunSynchronousCommand wcm:action="add">
<Order>11</Order>
<Path>reg.exe add "HKU\mount\Software\Microsoft\Windows\CurrentVersion\ContentDeliveryManager" /v "SubscribedContent-310093Enabled" /t REG_DWORD /d 0 /f</Path>
</RunSynchronousCommand>
<RunSynchronousCommand wcm:action="add">
<Order>12</Order>
<Path>reg.exe add "HKU\mount\Software\Microsoft\Windows\CurrentVersion\ContentDeliveryManager" /v "SubscribedContent-338387Enabled" /t REG_DWORD /d 0 /f</Path>
</RunSynchronousCommand>
<RunSynchronousCommand wcm:action="add">
<Order>13</Order>
<Path>reg.exe add "HKU\mount\Software\Microsoft\Windows\CurrentVersion\ContentDeliveryManager" /v "SubscribedContent-338388Enabled" /t REG_DWORD /d 0 /f</Path>
</RunSynchronousCommand>
<RunSynchronousCommand wcm:action="add">
<Order>14</Order>
<Path>reg.exe add "HKU\mount\Software\Microsoft\Windows\CurrentVersion\ContentDeliveryManager" /v "SubscribedContent-338389Enabled" /t REG_DWORD /d 0 /f</Path>
</RunSynchronousCommand>
<RunSynchronousCommand wcm:action="add">
<Order>15</Order>
<Path>reg.exe add "HKU\mount\Software\Microsoft\Windows\CurrentVersion\ContentDeliveryManager" /v "SubscribedContent-338393Enabled" /t REG_DWORD /d 0 /f</Path>
</RunSynchronousCommand>
<RunSynchronousCommand wcm:action="add">
<Order>16</Order>
<Path>reg.exe add "HKU\mount\Software\Microsoft\Windows\CurrentVersion\ContentDeliveryManager" /v "SubscribedContent-353698Enabled" /t REG_DWORD /d 0 /f</Path>
</RunSynchronousCommand>
<RunSynchronousCommand wcm:action="add">
<Order>17</Order>
<Path>reg.exe add "HKU\mount\Software\Microsoft\Windows\CurrentVersion\ContentDeliveryManager" /v "SystemPaneSuggestionsEnabled" /t REG_DWORD /d 0 /f</Path>
</RunSynchronousCommand>
<RunSynchronousCommand wcm:action="add">
<Order>18</Order>
<Path>reg.exe add "HKU\mount\Software\Policies\Microsoft\Windows\CloudContent" /v "DisableCloudOptimizedContent" /t REG_DWORD /d 1 /f</Path>
</RunSynchronousCommand>
<RunSynchronousCommand wcm:action="add">
<Order>19</Order>
<Path>reg.exe add "HKU\mount\Software\Policies\Microsoft\Windows\CloudContent" /v "DisableWindowsConsumerFeatures" /t REG_DWORD /d 1 /f</Path>
</RunSynchronousCommand>
<RunSynchronousCommand wcm:action="add">
<Order>20</Order>
<Path>reg.exe add "HKU\mount\Software\Policies\Microsoft\Windows\CloudContent" /v "DisableConsumerAccountStateContent" /t REG_DWORD /d 1 /f</Path>
</RunSynchronousCommand>
<RunSynchronousCommand wcm:action="add">
<Order>21</Order>
<Path>reg.exe unload "HKU\mount"</Path>
</RunSynchronousCommand>
<RunSynchronousCommand wcm:action="add">
<Order>22</Order>
<Path>reg.exe add "HKLM\Software\Policies\Microsoft\Windows\CloudContent" /v "DisableCloudOptimizedContent" /t REG_DWORD /d 1 /f</Path>
</RunSynchronousCommand>
<RunSynchronousCommand wcm:action="add">
<Order>23</Order>
<Path>reg.exe add "HKLM\Software\Policies\Microsoft\Windows\CloudContent" /v "DisableWindowsConsumerFeatures" /t REG_DWORD /d 1 /f</Path>
</RunSynchronousCommand>
<RunSynchronousCommand wcm:action="add">
<Order>24</Order>
<Path>reg.exe add "HKLM\Software\Policies\Microsoft\Windows\CloudContent" /v "DisableConsumerAccountStateContent" /t REG_DWORD /d 1 /f</Path>
</RunSynchronousCommand>
<RunSynchronousCommand wcm:action="add">
<Order>25</Order>
<Path>reg.exe add "HKLM\SOFTWARE\Policies\Microsoft\Windows NT\CurrentVersion\NetworkList\Signatures\FirstNetwork" /v Category /t REG_DWORD /d 1 /f</Path>
<Description>Set Network Location to Home</Description>
</RunSynchronousCommand>
</RunSynchronous>
</component>
<component name="Microsoft-Windows-TerminalServices-LocalSessionManager" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS">
<fDenyTSConnections>false</fDenyTSConnections>
</component>
<component name="Microsoft-Windows-TerminalServices-RDP-WinStationExtensions" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS">
<UserAuthentication>0</UserAuthentication>
</component>
<component name="Networking-MPSSVC-Svc" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS">
<FirewallGroups>
<FirewallGroup wcm:action="add" wcm:keyValue="RemoteDesktop">
<Active>true</Active>
<Profile>all</Profile>
<Group>@FirewallAPI.dll,-28752</Group>
</FirewallGroup>
</FirewallGroups>
</component>
</settings>
<settings pass="auditSystem" />
<settings pass="auditUser" />
<settings pass="oobeSystem">
<component name="Microsoft-Windows-SecureStartup-FilterDriver" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS">
<PreventDeviceEncryption>true</PreventDeviceEncryption>
</component>
<component name="Microsoft-Windows-EnhancedStorage-Adm" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS">
<TCGSecurityActivationDisabled>1</TCGSecurityActivationDisabled>
</component>
<component name="Microsoft-Windows-Shell-Setup" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS">
<UserAccounts>
<LocalAccounts>
<LocalAccount wcm:action="add">
<Name>Docker</Name>
<Group>Administrators</Group>
<Password>
<Value />
<PlainText>true</PlainText>
</Password>
</LocalAccount>
</LocalAccounts>
<AdministratorPassword>
<Value>password</Value>
<PlainText>true</PlainText>
</AdministratorPassword>
</UserAccounts>
<AutoLogon>
<Username>Docker</Username>
<Enabled>true</Enabled>
<LogonCount>65432</LogonCount>
<Password>
<Value />
<PlainText>true</PlainText>
</Password>
</AutoLogon>
<Display>
<ColorDepth>32</ColorDepth>
<HorizontalResolution>1920</HorizontalResolution>
<VerticalResolution>1080</VerticalResolution>
</Display>
<OOBE>
<HideEULAPage>true</HideEULAPage>
<HideLocalAccountScreen>true</HideLocalAccountScreen>
<HideOEMRegistrationScreen>true</HideOEMRegistrationScreen>
<HideOnlineAccountScreens>true</HideOnlineAccountScreens>
<HideWirelessSetupInOOBE>true</HideWirelessSetupInOOBE>
<NetworkLocation>Home</NetworkLocation>
<ProtectYourPC>3</ProtectYourPC>
<SkipUserOOBE>true</SkipUserOOBE>
<SkipMachineOOBE>true</SkipMachineOOBE>
</OOBE>
<RegisteredOrganization>Dockur</RegisteredOrganization>
<RegisteredOwner>Windows for Docker</RegisteredOwner>
<FirstLogonCommands>
<SynchronousCommand wcm:action="add">
<Order>1</Order>
<CommandLine>reg.exe add "HKLM\SYSTEM\CurrentControlSet\Services\LanmanWorkstation\Parameters" /v "AllowInsecureGuestAuth" /t REG_DWORD /d 1 /f</CommandLine>
<Description>Allow guest access to network shares</Description>
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
<Order>2</Order>
<CommandLine>reg.exe add "HKLM\SYSTEM\CurrentControlSet\Services\LanmanWorkstation\Parameters" /v "RequireSecuritySignature" /t REG_DWORD /d 0 /f</CommandLine>
<Description>Disable SMB signing requirement</Description>
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
<Order>3</Order>
<CommandLine>reg.exe add "HKLM\SYSTEM\CurrentControlSet\Control\Lsa" /v LimitBlankPasswordUse /t REG_DWORD /d 0 /f</CommandLine>
<Description>Allow RDP login with blank password</Description>
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
<Order>4</Order>
<CommandLine>reg.exe add "HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\PasswordLess\Device" /v "DevicePasswordLessBuildVersion" /t REG_DWORD /d 0 /f</CommandLine>
<Description>Enable option for passwordless sign-in</Description>
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
<Order>5</Order>
<CommandLine>cmd /C wmic useraccount where name="Docker" set PasswordExpires=false</CommandLine>
<Description>Password Never Expires</Description>
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
<Order>6</Order>
<CommandLine>cmd /C POWERCFG -H OFF</CommandLine>
<Description>Disable Hibernation</Description>
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
<Order>7</Order>
<CommandLine>cmd /C POWERCFG -X -monitor-timeout-ac 0</CommandLine>
<Description>Disable monitor blanking</Description>
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
<Order>8</Order>
<CommandLine>reg.exe add "HKLM\SOFTWARE\Policies\Microsoft\Edge" /v "HideFirstRunExperience" /t REG_DWORD /d 1 /f</CommandLine>
<Description>Disable first-run experience in Edge</Description>
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
<Order>9</Order>
<CommandLine>reg.exe add "HKCU\SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\Advanced" /v "HideFileExt" /t REG_DWORD /d 0 /f</CommandLine>
<Description>Show file extensions in Explorer</Description>
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
<Order>10</Order>
<CommandLine>reg.exe add "HKLM\SYSTEM\CurrentControlSet\Control\Power" /v "HibernateFileSizePercent" /t REG_DWORD /d 0 /f</CommandLine>
<Description>Zero Hibernation File</Description>
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
<Order>11</Order>
<CommandLine>reg.exe add "HKLM\SYSTEM\CurrentControlSet\Control\Power" /v "HibernateEnabled" /t REG_DWORD /d 0 /f</CommandLine>
<Description>Disable Hibernation</Description>
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
<Order>12</Order>
<CommandLine>cmd /C POWERCFG -X -standby-timeout-ac 0</CommandLine>
<Description>Disable Sleep</Description>
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
<Order>13</Order>
<CommandLine>reg.exe add "HKLM\SOFTWARE\Policies\Microsoft\Windows NT\Terminal Services" /v "fAllowUnlistedRemotePrograms" /t REG_DWORD /d 1 /f</CommandLine>
<Description>Enable RemoteAPP to launch unlisted programs</Description>
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
<Order>14</Order>
<CommandLine>reg.exe add "HKCU\SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\Advanced" /v "ShowTaskViewButton" /t REG_DWORD /d 0 /f</CommandLine>
<Description>Remove Task View from the Taskbar</Description>
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
<Order>15</Order>
<CommandLine>reg.exe add "HKCU\SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\Advanced" /v "TaskbarDa" /t REG_DWORD /d 0 /f</CommandLine>
<Description>Remove Widgets from the Taskbar</Description>
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
<Order>16</Order>
<CommandLine>reg.exe add "HKCU\SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\Advanced" /v "TaskbarMn" /t REG_DWORD /d 0 /f</CommandLine>
<Description>Remove Chat from the Taskbar</Description>
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
<Order>17</Order>
<CommandLine>reg.exe add "HKLM\SOFTWARE\Policies\Microsoft\Windows\WindowsUpdate\AU" /v "NoAutoUpdate" /t REG_DWORD /d 1 /f</CommandLine>
<Description>Turn off Windows Update auto download</Description>
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
<Order>18</Order>
<CommandLine>netsh advfirewall firewall set rule group="@FirewallAPI.dll,-32752" new enable=Yes</CommandLine>
<Description>Enable Network Discovery</Description>
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
<Order>19</Order>
<CommandLine>netsh advfirewall firewall set rule group="@FirewallAPI.dll,-28502" new enable=Yes</CommandLine>
<Description>Enable File Sharing</Description>
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
<Order>20</Order>
<CommandLine>reg.exe add "HKCU\Control Panel\UnsupportedHardwareNotificationCache" /v SV1 /d 0 /t REG_DWORD /f</CommandLine>
<Description>Disable unsupported hardware notifications</Description>
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
<Order>21</Order>
<CommandLine>reg.exe add "HKCU\Control Panel\UnsupportedHardwareNotificationCache" /v SV2 /d 0 /t REG_DWORD /f</CommandLine>
<Description>Disable unsupported hardware notifications</Description>
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
<Order>22</Order>
<CommandLine>pnputil -i -a C:\Windows\Drivers\viogpudo\viogpudo.inf</CommandLine>
<Description>Install VirtIO display driver</Description>
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
<Order>23</Order>
<CommandLine>cmd /C rd /q C:\Windows.old</CommandLine>
<Description>Remove empty Windows.old folder</Description>
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
<Order>24</Order>
<CommandLine>cmd /C if exist "C:\OEM\install.bat" start "Install" "cmd /C C:\OEM\install.bat"</CommandLine>
<Description>Execute custom script from the OEM folder if exists</Description>
</SynchronousCommand>
</FirstLogonCommands>
</component>
</settings>
</unattend>

View File

@@ -0,0 +1 @@
Add your Win11E setup.iso to this folder

View File

@@ -0,0 +1,31 @@
@echo off
SET ScriptFolder=\\host.lan\Data
SET LogFile=%ScriptFolder%\firstboot_log.txt
echo Running PowerShell script... > %LogFile%
:: Check for PowerShell availability
where powershell >> %LogFile% 2>&1
if %ERRORLEVEL% neq 0 (
echo PowerShell is not available! >> %LogFile%
echo PowerShell is not available!
exit /b 1
)
:: Add a 30-second delay
echo Waiting for 30 seconds before continuing... >> %LogFile%
timeout /t 30 /nobreak >> %LogFile% 2>&1
:: Run PowerShell script with ExecutionPolicy Bypass and log errors
echo Running setup.ps1... >> %LogFile%
powershell -ExecutionPolicy Bypass -File "%ScriptFolder%\setup.ps1" >> %LogFile% 2>&1
if %ERRORLEVEL% neq 0 (
echo An error occurred. See %LogFile% for details.
) else (
echo PowerShell script has completed successfully.
)
echo PowerShell script has completed.

View File

@@ -0,0 +1,7 @@
$scriptFolder = "\\host.lan\Data"
$pythonScriptFile = "$scriptFolder\server\main.py"
$pythonServerPort = 5000
# Start the flask computer use server
Write-Host "Running the server on port $pythonServerPort"
python $pythonScriptFile --port $pythonServerPort

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

View File

@@ -0,0 +1,98 @@
import os
import logging
import argparse
import shlex
import subprocess
from flask import Flask, request, jsonify, send_file
import threading
import traceback
import pyautogui
from PIL import Image
from io import BytesIO
def execute_anything(data):
"""Execute any command received in the JSON request.
WARNING: This function executes commands without any safety checks."""
# The 'command' key in the JSON request should contain the command to be executed.
shell = data.get('shell', False)
command = data.get('command', "" if shell else [])
if isinstance(command, str) and not shell:
command = shlex.split(command)
# Expand user directory
for i, arg in enumerate(command):
if arg.startswith("~/"):
command[i] = os.path.expanduser(arg)
# Execute the command without any safety checks.
try:
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell, text=True, timeout=120)
return jsonify({
'status': 'success',
'output': result.stdout,
'error': result.stderr,
'returncode': result.returncode
})
except Exception as e:
logger.error("\n" + traceback.format_exc() + "\n")
return jsonify({
'status': 'error',
'message': str(e)
}), 500
def execute(data):
"""Action space aware implementation. Should not use arbitrary code execution."""
return jsonify({
'status': 'error',
'message': 'Not implemented. Please add your implementation to omnitool/omnibox/vm/win11setup/setupscripts/server/main.py.'
}), 500
execute_impl = execute # switch to execute_anything to allow any command. Please use with caution only for testing purposes.
parser = argparse.ArgumentParser()
parser.add_argument("--log_file", help="log file path", type=str,
default=os.path.join(os.path.dirname(__file__), "server.log"))
parser.add_argument("--port", help="port", type=int, default=5000)
args = parser.parse_args()
logging.basicConfig(filename=args.log_file,level=logging.DEBUG, filemode='w' )
logger = logging.getLogger('werkzeug')
app = Flask(__name__)
computer_control_lock = threading.Lock()
@app.route('/probe', methods=['GET'])
def probe_endpoint():
return jsonify({"status": "Probe successful", "message": "Service is operational"}), 200
@app.route('/execute', methods=['POST'])
def execute_command():
# Only execute one command at a time
with computer_control_lock:
data = request.json
return execute_impl(data)
@app.route('/screenshot', methods=['GET'])
def capture_screen_with_cursor():
cursor_path = os.path.join(os.path.dirname(__file__), "cursor.png")
screenshot = pyautogui.screenshot()
cursor_x, cursor_y = pyautogui.position()
cursor = Image.open(cursor_path)
# make the cursor smaller
cursor = cursor.resize((int(cursor.width / 1.5), int(cursor.height / 1.5)))
screenshot.paste(cursor, (cursor_x, cursor_y), cursor)
# Convert PIL Image to bytes and send
img_io = BytesIO()
screenshot.save(img_io, 'PNG')
img_io.seek(0)
return send_file(img_io, mimetype='image/png')
if __name__ == '__main__':
app.run(host="10.0.2.15", port=args.port)

View File

@@ -0,0 +1,2 @@
flask
PyAutoGUI

View File

@@ -0,0 +1,197 @@
function Get-Tools {
param(
[string]$toolsConfigJson
)
# Convert the JSON string to a PowerShell object
$toolsList = $toolsConfigJson | ConvertFrom-Json
return $toolsList
}
function Get-ToolDetails {
param(
$toolsList,
[string]$toolName
)
# Check if the program exists in the JSON data
if ($toolsList.PSObject.Properties.Name -contains $toolName) {
# Return the program details as a PowerShell object
return $toolsList.$toolName
} else {
# Handle the case where the program is not found
Write-Host "Program '$toolName' not found in the list."
return $null
}
}
function Invoke-DownloadFileFromAvailableMirrors {
param (
[string[]]$mirrorUrls,
[string]$outfile
)
foreach ($url in $mirrorUrls) {
try {
$result = Invoke-DownloadFile -url $url -outfile $outfile
if ($result -eq $true) {
Write-Host "Downloaded using $url"
return $true
}
} catch {
Write-Host "Error downloading from $url. Please check and update the mirrors."
}
}
Write-Host "Downloading from the provided mirrors failed. Please check and update the mirrors."
return $false
}
function Invoke-DownloadFile {
param (
[string]$url,
[string]$outfile
)
# Makes download faster by disabling progress bar
$ProgressPreference = "SilentlyContinue"
$retryCount = 0
$maxRetries = 3
$sleepSeconds = 2
$maxSleepSeconds = 10
$userAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36"
# Ensure directory exists
$directory = Split-Path -Path $outfile -Parent
if (-Not (Test-Path -Path $directory)) {
Write-Host "Creating directory $directory..."
New-Item -Path $directory -ItemType Directory -Force | Out-Null
}
while ($retryCount -lt $maxRetries) {
try {
Invoke-RestMethod -Uri $url -OutFile $outfile -Headers @{"User-Agent" = $userAgent}
Write-Host "Download successful, file saved to: $outfile"
break
} catch {
$retryCount++
Write-Host "Attempt $retryCount of $maxRetries failed. Error: $($_.Exception.Message)"
Start-Sleep -Seconds $sleepSeconds
$sleepSeconds = [Math]::Min($sleepSeconds * 2, $maxSleepSeconds) # Exponential backoff with a cap
}
}
if ($retryCount -eq $maxRetries) {
Write-Host "Failed to download the file after $maxRetries attempts."
return $false
}
return $true
}
function Add-ToEnvPath {
param (
[string]$NewPath
)
# Get the current PATH environment variable
$envPath = [Environment]::GetEnvironmentVariable("PATH", "Machine")
# Append the new path to the existing PATH
$newPath = "$envPath;$NewPath"
# Set the updated PATH environment variable
[Environment]::SetEnvironmentVariable("PATH", $newPath, "Machine")
# Fetch updates from the shell
$env:PATH += ";${newPath}"
}
function Register-LogonTask {
param(
[parameter(Mandatory = $true, ValueFromPipelineByPropertyName = $true, HelpMessage = "Name of the scheduled task")]
[string]
$TaskName,
[parameter(Mandatory = $true, ValueFromPipelineByPropertyName = $true, HelpMessage = "Path to the .py script")]
[string]
$ScriptPath,
[parameter(Mandatory = $false, ValueFromPipelineByPropertyName = $true, HelpMessage = "Arguments to the .py script")]
[string]
$Arguments = "",
[parameter(Mandatory = $false, ValueFromPipelineByPropertyName = $true, HelpMessage = "Local Account username")]
[string]
$LocalUser,
[parameter(Mandatory = $false, ValueFromPipelineByPropertyName = $true, HelpMessage = "Local Account password")]
[string]
$LocalPassword,
[parameter(Mandatory = $false, ValueFromPipelineByPropertyName = $true, HelpMessage = "Whether to execute the command as SYSTEM")]
[switch]
$AsSystem = $false,
[parameter(Mandatory = $false, ValueFromPipelineByPropertyName = $true, HelpMessage = "logging file")]
[string]
$LogFilePath
)
$scriptDirectory = Split-Path $ScriptPath
$taskActionArgument = "-ExecutionPolicy Bypass -windowstyle hidden -Command `"try { . '$ScriptPath' $Arguments } catch { Write `$_.Exception.Message | Out-File $($TaskName)_Log.txt } finally { } `""
$taskAction = New-ScheduledTaskAction -Execute "$PSHome\powershell.exe" -Argument $taskActionArgument -WorkingDirectory $scriptDirectory
$params = @{
Force = $True
Action = $taskAction
RunLevel = "Highest"
TaskName = $TaskName
}
$taskTrigger = New-ScheduledTaskTrigger -AtLogOn
$params.Add("Trigger", $taskTrigger)
if ($AsSystem) {
$params.Add("User", "NT AUTHORITY\SYSTEM")
}
else {
$params.Add("User", $LocalUser)
if ($LocalPassword) {
$params.Add("Password", $LocalPassword)
}
}
Write-Host "Registering scheduled task '$TaskName' to run 'powershell.exe $taskActionArgument'..."
Register-ScheduledTask @params
}
# Function to attempt pip install and handle failures
function Install-PythonPackages {
param (
[string]$Package = "",
[string]$Arguments = "",
[string]$RequirementsPath = ""
)
$RetryCount = 3
$currentAttempt = 0
while ($currentAttempt -lt $RetryCount) {
if (-not [string]::IsNullOrWhiteSpace($RequirementsPath)) {
& python -m pip install --no-cache-dir -r $RequirementsPath $Arguments
} else {
& python -m pip install --no-cache-dir $Package $Arguments
}
if ($LASTEXITCODE -eq 0) {
Write-Host "Installation successful."
return
} else {
Write-Host "Attempt $($currentAttempt + 1) failed. Retrying..."
Start-Sleep -Seconds 10
$currentAttempt++
}
}
Write-Error "Failed to install after $RetryCount attempts."
exit
}

View File

@@ -0,0 +1,392 @@
$ErrorActionPreference = "Continue" # until downloading from mirrors is more stable
# Section - General Setup
$scriptFolder = "\\host.lan\Data"
$toolsFolder = "C:\Users\$env:USERNAME\Tools"
# Load the shared setup-tools module
Import-Module (Join-Path $scriptFolder -ChildPath "setup-tools.psm1")
# Check if profile exists
if (-not (Test-Path $PROFILE)) {
New-Item -ItemType File -Path $PROFILE -Force
}
# Create a folder where we store all the standalone executables
if (-not (Test-Path $toolsFolder)) {
New-Item -ItemType Directory -Path $toolsFolder -Force
$envPath = [Environment]::GetEnvironmentVariable("PATH", "Machine")
$newPath = "$envPath;$toolsFolder"
[Environment]::SetEnvironmentVariable("PATH", $newPath, "Machine")
}
# Section - Tools Installation
# Set TLS version to 1.2 or higher
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 -bor [Net.SecurityProtocolType]::Tls13
# Load the tools config json listing mirrors and aliases used for installing tools
$toolsConfigJsonPath = Join-Path $scriptFolder -ChildPath "tools_config.json"
$toolsConfigJson = Get-Content -Path $toolsConfigJsonPath -Raw
$toolsList = Get-Tools -toolsConfigJson $toolsConfigJson
## - Python
$pythonToolName = "Python"
$userPythonPath = "$env:LOCALAPPDATA\Programs\Python"
$pythonDetails = Get-ToolDetails -toolsList $toolsList -toolName $pythonToolName
$pythonAlias = $pythonDetails.alias
# Check for Python installation
$pythonExecutablePath = Get-ChildItem -Path $userPythonPath -Filter python.exe -Recurse -ErrorAction SilentlyContinue | Select-Object -First 1 -ExpandProperty FullName
# Force to install Python 3.10 as the pre-installed version on Windows may not work sometimes
Write-Host "Downloading Python $pythonVersion..."
$pythonInstallerFilePath = "$env:TEMP\python_installer.exe"
$downloadResult = Invoke-DownloadFileFromAvailableMirrors -mirrorUrls $pythonDetails.mirrors -outfile $pythonInstallerFilePath
if (-not $downloadResult) {
Write-Host "Failed to download Python. Please try again later or install manually."
} else {
Write-Host "Installing Python for current user..."
Start-Process -FilePath $pythonInstallerFilePath -Args "/quiet InstallAllUsers=0 PrependPath=0" -NoNewWindow -Wait
$pythonExecutablePath = "$userPythonPath\Python310\python.exe"
$setAliasExpression = "Set-Alias -Name $pythonAlias -Value `"$pythonExecutablePath`""
Add-Content -Path $PROFILE -Value $setAliasExpression
Invoke-Expression $setAliasExpression
}
## - Git
$gitToolName = "git"
$gitToolDetails = Get-ToolDetails -toolsList $toolsList -toolName $gitToolName
# Check for Git installation
try {
git --version | Out-Null
Write-Host "Git is already installed."
} catch {
Write-Host "Git is not installed. Downloading and installing Git..."
$gitInstallerFilePath = "$env:TEMP\git_installer.exe"
$downloadResult = Invoke-DownloadFileFromAvailableMirrors -mirrorUrls $gitToolDetails.mirrors -outfile $gitInstallerFilePath
if (-not $downloadResult) {
Write-Host "Failed to download Git. Please try again later or install manually."
} else {
Start-Process -FilePath $gitInstallerFilePath -Args "/VERYSILENT /NORESTART /NOCANCEL /SP-" -Wait
Add-ToEnvPath -NewPath "C:\Program Files\Git\bin"
Write-Host "Git has been installed."
}
}
# - 7zip
$7ZipToolName = "7zip"
$7ZipToolDetails = Get-ToolDetails -toolsList $toolsList -toolName $7ZipToolName
Write-Host "$7ZipToolDetails"
if (Get-Command 7z -ErrorAction SilentlyContinue) {
Write-Host "7-Zip is already installed."
}
else {
Write-Host "Installing 7-Zip..."
$7ZipInstallerFilePath = "$env:TEMP\7_zip.exe"
Write-Host "$($7ZipToolDetails.mirrors)"
$downloadResult = Invoke-DownloadFileFromAvailableMirrors -mirrorUrls $7ZipToolDetails.mirrors -outfile $7ZipInstallerFilePath
if (-not $downloadResult) {
Write-Host "Failed to download 7-Zip. Please try again later or install manually."
} else {
Start-Process -FilePath $7ZipInstallerFilePath -Args "/S" -Verb RunAs -Wait
Remove-Item $7ZipInstallerFilePath
# add 7z to PATH
Add-ToEnvPath -NewPath "${env:ProgramFiles}\7-Zip"
}
}
# - ffpmeg
$ffpmegToolName = "ffmpeg"
$ffpmegToolDetails = Get-ToolDetails -toolsList $toolsList -toolName $ffpmegToolName
if (Get-Command ffmpeg -ErrorAction SilentlyContinue) {
Write-Host "ffmpeg is already installed."
} else {
Write-Host "ffmpeg is not installed. Installing it."
$ffpmegInstallerFilePath = "C:\ffmpeg.7z"
$downloadResult = Invoke-DownloadFileFromAvailableMirrors -mirrorUrls $ffpmegToolDetails.mirrors -outfile $ffpmegInstallerFilePath
if (-not $downloadResult) {
Write-Host "Failed to download ffmpeg. Please try again later or install manually."
} else {
Write-Host "Extracting $ffpmegInstallerFilePath..."
7z x -y -o"C:\" "C:\ffmpeg.7z"
$ffmpegFolder = Get-ChildItem -Path "C:\" -Filter "ffmpeg-*" -Directory
$ffmpegFolder = -join ("C:\", $ffmpegFolder)
#remove ffmpeg folder if exists
if (Test-Path "C:\ffmpeg") {
Remove-Item -Path "C:\ffmpeg" -Recurse -Force
}
Rename-Item -Path "$ffmpegFolder" -NewName "ffmpeg"
Write-Host "Adding ffmpeg to PATH..."
Add-ToEnvPath -NewPath "C:\ffmpeg\bin"
Write-Host "ffmpeg is installed"
}
}
# Disable Edge Auto Updates
Stop-Process -Name "MicrosoftEdgeUpdate" -Force -ErrorAction SilentlyContinue
$edgeUpdatePath = "${env:ProgramFiles(x86)}\Microsoft\EdgeUpdate"
Remove-Item -Path $edgeUpdatePath -Recurse -Force -ErrorAction SilentlyContinue
Write-Host "Edge Update processes terminated and directory removed."
# - Google Chrome
$chromeToolName = "Google Chrome"
$chromeToolDetails = Get-ToolDetails -toolsList $toolsList -toolName $chromeToolName
$chromeExePath = "C:\Program Files\Google\Chrome\Application\chrome.exe"
$chromeAlias = $chromeToolDetails.alias
# Check if Google Chrome is already installed by its alias
if (Get-Command $chromeAlias -ErrorAction SilentlyContinue) {
Write-Host "Google Chrome is already installed."
} else {
# Download the installer to the Temp directory
$chromeInstallerFilePath = "$env:TEMP\chrome_installer.exe"
$downloadResult = Invoke-DownloadFileFromAvailableMirrors -mirrorUrls $chromeToolDetails.mirrors -outfile $chromeInstallerFilePath
if (-not $downloadResult) {
Write-Host "Failed to download Google Chrome. Please try again later or install manually."
} else {
# Execute the installer silently with elevated permissions
Start-Process -FilePath $chromeInstallerFilePath -ArgumentList "/silent", "/install" -Verb RunAs -Wait
# Remove the installer file after installation
Remove-Item -Path $chromeInstallerFilePath
# Set alias
$setAliasExpression = "Set-Alias -Name $chromeAlias -Value `"$chromeExePath`""
Add-Content -Path $PROFILE -Value $setAliasExpression
Invoke-Expression $setAliasExpression
# Add Chrome to the system PATH environment variable
Add-ToEnvPath -NewPath "${env:ProgramFiles}\Google\Chrome\Application"
# Disable Google Chrome Auto Updates
$chromeRegPath = "HKLM:\SOFTWARE\Policies\Google\Update"
if (-not (Test-Path $chromeRegPath)) {
New-Item -Path $chromeRegPath -Force
}
Set-ItemProperty -Path $chromeRegPath -Name "AutoUpdateCheckPeriodMinutes" -Value 0
Set-ItemProperty -Path $chromeRegPath -Name "UpdateDefault" -Value 0
}
}
# - LibreOffice
$libreOfficeToolName = "LibreOffice"
$libreOfficeToolDetails = Get-ToolDetails -toolsList $toolsList -toolName $libreOfficeToolName
# Check for LibreOffice installation
$installedVersion = (Get-WmiObject -Query "SELECT * FROM Win32_Product WHERE Name like 'LibreOffice%'").Version
if (-not [string]::IsNullOrWhiteSpace($installedVersion)) {
Write-Host "LibreOffice $version is already installed."
} else {
Write-Host "LibreOffice is not installed. Downloading and installing LibreOffice..."
$libreOfficeInstallerFilePath = "$env:TEMP\libreOffice_installer.exe"
$downloadResult = Invoke-DownloadFileFromAvailableMirrors -mirrorUrls $libreOfficeToolDetails.mirrors -outfile $libreOfficeInstallerFilePath
if (-not $downloadResult) {
Write-Host "Failed to download LibreOffice. Please try again later or install manually."
} else {
Start-Process "msiexec.exe" -ArgumentList "/i `"$libreOfficeInstallerFilePath`" /quiet" -Wait -NoNewWindow
Write-Host "LibreOffice has been installed."
# Add LibreOffice to the system PATH environment variable
Add-ToEnvPath -NewPath "C:\Program Files\LibreOffice\program"
}
}
# - VLC
$vlcToolName = "VLC"
$vlcToolDetails = Get-ToolDetails -toolsList $toolsList -toolName $vlcToolName
$vlcAlias = $vlcToolDetails.alias
$vlcExecutableFilePath = "C:\Program Files\VideoLAN\VLC\vlc.exe"
# Check if VLC is already installed by checking the VLC command
if (Test-Path $vlcExecutableFilePath) {
Write-Host "VLC is already installed."
} else {
# Download the installer to the Temp directory
$vlcInstallerFilePath = "$env:TEMP\vlc_installer.exe"
$downloadResult = Invoke-DownloadFileFromAvailableMirrors -mirrorUrls $vlcToolDetails.mirrors -outfile $vlcInstallerFilePath
if (-not $downloadResult) {
Write-Host "Failed to download VLC. Please try again later or install manually."
} else {
# Execute the installer silently with elevated permissions
Start-Process -FilePath $vlcInstallerFilePath -ArgumentList "/S" -Verb RunAs -Wait
# Remove the installer file after installation
Remove-Item -Path $vlcInstallerFilePath
# Set alias
$setAliasExpression = "Set-Alias -Name $vlcAlias -Value `"$vlcExecutableFilePath`""
Add-Content -Path $PROFILE -Value $setAliasExpression
Invoke-Expression $setAliasExpression
# Add VLC to the system PATH environment variable
Add-ToEnvPath -NewPath "C:\Program Files\VideoLAN\VLC"
}
}
# - GIMP
$gimpToolName = "GIMP"
$gimpToolDetails = Get-ToolDetails -toolsList $toolsList -toolName $gimpToolName
$gimpAlias = $gimpToolDetails.alias
$gimpExecutablePath = "C:\Program Files\GIMP 2\bin\gimp-2.10.exe"
# Check if GIMP is already installed by checking the GIMP executable path
if (Test-Path $gimpExecutablePath) {
Write-Host "GIMP is already installed."
} else {
# Download the installer to the Temp directory
$gimpInstallerFilePath = "$env:TEMP\gimp_installer.exe"
$downloadResult = Invoke-DownloadFileFromAvailableMirrors -mirrorUrls $gimpToolDetails.mirrors -outfile $gimpInstallerFilePath
if (-not $downloadResult) {
Write-Host "Failed to download GIMP. Please try again later or install manually."
} else {
# Execute the installer silently with elevated permissions
Start-Process -FilePath $gimpInstallerFilePath -ArgumentList "/VERYSILENT /ALLUSERS" -Verb RunAs -Wait
# Remove the installer file after installation
Remove-Item -Path $gimpInstallerFilePath
# Set alias
$setAliasExpression = "Set-Alias -Name $gimpAlias -Value `"$gimpExecutablePath`""
Add-Content -Path $PROFILE -Value $setAliasExpression
Invoke-Expression $setAliasExpression
# Add GIMP to the system PATH environment variable
Add-ToEnvPath -NewPath "C:\Program Files\GIMP 2\bin"
}
}
# - VS Code
$vsCodeToolName = "VS Code"
$vsCodeToolDetails = Get-ToolDetails -toolsList $toolsList -toolName $vsCodeToolName
$vsCodeAlias = $gimpToolDetails.alias
$vsCodeExecutablePath = "C:\Users\$env:USERNAME\AppData\Local\Programs\Microsoft VS Code\Code.exe"
# Check if VS Code is already installed by checking the VS Code executable path
if (Test-Path $vsCodeExecutablePath) {
Write-Host "VS Code is already installed."
} else {
# Download the installer to the Temp directory
$vsCodeInstallerFilePath = "$env:TEMP\VSCodeSetup.exe"
$downloadResult = Invoke-DownloadFileFromAvailableMirrors -mirrorUrls $vsCodeToolDetails.mirrors -outfile $vsCodeInstallerFilePath
if (-not $downloadResult) {
Write-Host "Failed to download VS Code. Please try again later or install manually."
} else {
# Execute the installer silently with elevated permissions
Start-Process -FilePath $vsCodeInstallerFilePath -ArgumentList "/VERYSILENT", "/mergetasks=!runcode" -Verb RunAs -Wait
# Remove the installer file after installation
Remove-Item -Path $vsCodeInstallerFilePath
# Set alias
$setAliasExpression = "Set-Alias -Name $vsCodeAlias -Value `"$vsCodeExecutablePath`""
Add-Content -Path $PROFILE -Value $setAliasExpression
Invoke-Expression $setAliasExpression
# Add VS Code to the system PATH environment variable
Add-ToEnvPath -NewPath "C:\Users\$env:USERNAME\AppData\Local\Programs\Microsoft VS Code\bin"
# Disable Visual Studio Code Auto Updates
$vsCodeSettingsPath = "${env:APPDATA}\Code\User\settings.json"
if (-not (Test-Path $vsCodeSettingsPath)) {
# Create the directory if it doesn't exist
$dirPath = Split-Path -Path $vsCodeSettingsPath -Parent
if (-not (Test-Path $dirPath)) {
New-Item -ItemType Directory -Path $dirPath -Force
}
# Initialize an empty hashtable to act as the JSON object
$settingsObj = @{}
$settingsObj["update.mode"] = "none" # Set update mode to none
$settingsObj | ConvertTo-Json | Set-Content $vsCodeSettingsPath
} else {
# If the file exists, modify it
$settingsObj = Get-Content $vsCodeSettingsPath | ConvertFrom-Json
$settingsObj["update.mode"] = "none"
$settingsObj | ConvertTo-Json | Set-Content $vsCodeSettingsPath
}
}
}
# - Thunderbird
$thunderbirdToolName = "Thunderbird"
$thunderbirdToolDetails = Get-ToolDetails -toolsList $toolsList -toolName $thunderbirdToolName
$thunderbirdAlias = $thunderbirdToolDetails.alias
$thunderbirdExecutablePath = "C:\Program Files\Mozilla Thunderbird\thunderbird.exe"
# Check if Thunderbird is already installed by checking the Thunderbird executable path
if (Test-Path $thunderbirdExecutablePath) {
Write-Host "Thunderbird is already installed."
} else {
# Download the installer to the Temp directory
$thunderbirdInstallerFilePath = "$env:TEMP\ThunderbirdSetup.exe"
$downloadResult = Invoke-DownloadFileFromAvailableMirrors -mirrorUrls $thunderbirdToolDetails.mirrors -outfile $thunderbirdInstallerFilePath
if (-not $downloadResult) {
Write-Host "Failed to download Thunderbird. Please try again later or install manually."
} else {
# Execute the installer silently with elevated permissions
Start-Process -FilePath $thunderbirdInstallerFilePath -ArgumentList "/S" -Verb RunAs -Wait
# Remove the installer file after installation
Remove-Item -Path $thunderbirdInstallerFilePath
# Set alias
$setAliasExpression = "Set-Alias -Name $thunderbirdAlias -Value `"$thunderbirdExecutablePath`""
Add-Content -Path $PROFILE -Value $setAliasExpression
Invoke-Expression $setAliasExpression
# Add Thunderbird to the system PATH environment variable
Add-ToEnvPath -NewPath "C:\Program Files\Mozilla Thunderbird"
}
}
# - Server Setup
$pythonServerPort = 5000
$onLogonTaskName = "Server_OnLogon"
$requirementsFile = "$scriptFolder\server\requirements.txt"
# Ensure pip is updated to the latest version
Install-PythonPackages -Package "pip" -Arguments "--upgrade"
Install-PythonPackages -Package "wheel"
Install-PythonPackages -Package "pywinauto"
# Install Python packages from requirements.txt using Python's pip module
if (Test-Path $requirementsFile) {
Write-Host "Installing required Python packages using pip from requirements file..."
Install-PythonPackages -RequirementsPath $requirementsFile
} else {
Write-Error "Requirements file not found: $requirementsFile"
exit
}
# Add a firewall rule to allow incoming connections on the specified port for the Python executable
$pythonServerRuleName = "PythonHTTPServer-$pythonServerPort"
if (-not (Get-NetFirewallRule -Name $pythonServerRuleName -ErrorAction SilentlyContinue)) {
New-NetFirewallRule -DisplayName $pythonServerRuleName -Direction Inbound -Program $pythonExecutablePath -Protocol TCP -LocalPort $pythonServerPort -Action Allow -Profile Any
Write-Host "Firewall rule added to allow traffic on port $pythonServerPort for Python"
} else {
Write-Host "Firewall rule already exists. $pythonServerRuleName "
}
$onLogonScriptPath = "$scriptFolder\on-logon.ps1"
# Check if the scheduled task exists before unregistering it
if (Get-ScheduledTask -TaskName $onLogonTaskName -ErrorAction SilentlyContinue) {
Write-Host "Scheduled task $onLogonTaskName already exists."
} else {
Write-Host "Registering new task $onLogonTaskName..."
Register-LogonTask -TaskName $onLogonTaskName -ScriptPath $onLogonScriptPath -LocalUser "Docker"
}
Start-Sleep -Seconds 10
Start-ScheduledTask -TaskName $onLogonTaskName

View File

@@ -0,0 +1,69 @@
{
"Python": {
"mirrors": [
"https://www.python.org/ftp/python/3.10.0/python-3.10.0-amd64.exe"
],
"alias": "python"
},
"git": {
"mirrors": [
"https://github.com/git-for-windows/git/releases/download/v2.37.1.windows.1/Git-2.37.1-64-bit.exe"
]
},
"7zip": {
"mirrors": [
"https://www.7-zip.org/a/7z2407-x64.exe"
]
},
"ffmpeg": {
"mirrors": [
"https://www.gyan.dev/ffmpeg/builds/ffmpeg-release-essentials.7z"
]
},
"Google Chrome": {
"mirrors": [
"https://dl.google.com/chrome/install/latest/chrome_installer.exe"
],
"alias": "google-chrome"
},
"LibreOffice": {
"mirrors": [
"https://mirror.raiolanetworks.com/tdf/libreoffice/stable/24.8.4/win/x86_64/LibreOffice_24.8.4_Win_x86-64.msi",
"https://mirrors.iu13.net/tdf/libreoffice/stable/24.8.4/win/x86_64/LibreOffice_24.8.4_Win_x86-64.msi",
"https://download.documentfoundation.org/libreoffice/stable/24.8.4/win/x86_64/LibreOffice_24.8.4_Win_x86-64.msi"
]
},
"VLC": {
"mirrors": [
"https://ftp.free.org/mirrors/videolan/vlc/3.0.21/win64/vlc-3.0.21-win64.exe",
"https://mirror.fcix.net/videolan-ftp/vlc/3.0.21/win64/vlc-3.0.21-win64.exe",
"https://mirror.raiolanetworks.com/videolan/vlc/3.0.21/win64/vlc-3.0.21-win64.exe"
],
"alias": "vlc"
},
"GIMP": {
"mirrors": [
"https://download.gimp.org/gimp/v2.10/windows/gimp-2.10.38-setup.exe"
],
"alias": "gimp"
},
"VS Code": {
"mirrors": [
"https://update.code.visualstudio.com/latest/win32-x64-user/stable"
],
"alias": "code"
},
"Thunderbird": {
"mirrors": [
"https://download-installer.cdn.mozilla.net/pub/thunderbird/releases/115.12.1/win64/en-US/Thunderbird%20Setup%20115.12.1.exe",
"https://archive.mozilla.org/pub/thunderbird/releases/115.12.1/win64/en-US/Thunderbird%20Setup%20115.12.1.exe"
],
"alias": "thunderbird"
},
"Caddy Proxy": {
"mirrors": [
"https://caddyserver.com/api/download?os=windows&arch=amd64"
],
"alias": "caddy"
}
}

View File

@@ -0,0 +1,51 @@
'''
python -m omniparserserver --som_model_path ../../weights/icon_detect/model.pt --caption_model_name florence2 --caption_model_path ../../weights/icon_caption_florence --device cuda --BOX_TRESHOLD 0.05
'''
import sys
import os
import time
from fastapi import FastAPI
from pydantic import BaseModel
import argparse
import uvicorn
root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root_dir)
from util.omniparser import Omniparser
def parse_arguments():
parser = argparse.ArgumentParser(description='Omniparser API')
parser.add_argument('--som_model_path', type=str, default='../../weights/icon_detect/model.pt', help='Path to the som model')
parser.add_argument('--caption_model_name', type=str, default='florence2', help='Name of the caption model')
parser.add_argument('--caption_model_path', type=str, default='../../weights/icon_caption_florence', help='Path to the caption model')
parser.add_argument('--device', type=str, default='cpu', help='Device to run the model')
parser.add_argument('--BOX_TRESHOLD', type=float, default=0.05, help='Threshold for box detection')
parser.add_argument('--host', type=str, default='127.0.0.1', help='Host for the API')
parser.add_argument('--port', type=int, default=8000, help='Port for the API')
args = parser.parse_args()
return args
args = parse_arguments()
config = vars(args)
app = FastAPI()
omniparser = Omniparser(config)
class ParseRequest(BaseModel):
base64_image: str
@app.post("/parse/")
async def parse(parse_request: ParseRequest):
print('start parsing...')
start = time.time()
dino_labled_img, parsed_content_list = omniparser.parse(parse_request.base64_image)
latency = time.time() - start
print('time:', latency)
return {"som_image_base64": dino_labled_img, "parsed_content_list": parsed_content_list, 'latency': latency}
@app.get("/probe/")
async def root():
return {"message": "Omniparser API ready"}
if __name__ == "__main__":
uvicorn.run("omniparserserver:app", host=args.host, port=args.port, reload=True)

127
omnitool/readme.md Normal file
View File

@@ -0,0 +1,127 @@
<img src="../imgs/header_bar.png" alt="OmniTool Header" width="100%">
# OmniTool
Control a Windows 11 VM with OmniParser + your vision model of choice.
## Highlights:
1. **OmniParser V2** is 60% faster than V1 and now understands a wide variety of OS, app and inside app icons!
2. **OmniBox** uses 50% less disk space than other Windows VMs for agent testing, whilst providing the same computer use API
3. **OmniTool** supports out of the box the following vision models - OpenAI (4o/o1/o3-mini), DeepSeek (R1), Qwen (2.5VL) or Anthropic Computer Use
## Overview
There are three components:
<table style="border-collapse: collapse; border: none;">
<tr>
<td style="border: none;"><img src="../imgs/omniparsericon.png" width="50"></td>
<td style="border: none;"><strong>omniparserserver</strong></td>
<td style="border: none;">FastAPI server running OmniParser V2.</td>
</tr>
<tr>
<td style="border: none;"><img src="../imgs/omniboxicon.png" width="50"></td>
<td style="border: none;"><strong>omnibox</strong></td>
<td style="border: none;">A Windows 11 VM running in a Docker container.</td>
</tr>
<tr>
<td style="border: none;"><img src="../imgs/gradioicon.png" width="50"></td>
<td style="border: none;"><strong>gradio</strong></td>
<td style="border: none;">UI to provide commands and watch reasoning + execution on OmniBox</td>
</tr>
</table>
## Showcase Video
| OmniParser V2 | [Watch Video](https://1drv.ms/v/c/650b027c18d5a573/EWXbVESKWo9Buu6OYCwg06wBeoM97C6EOTG6RjvWLEN1Qg?e=alnHGC) |
|--------------|------------------------------------------------------------------|
| OmniTool | [Watch Video](https://1drv.ms/v/c/650b027c18d5a573/EehZ7RzY69ZHn-MeQHrnnR4BCj3by-cLLpUVlxMjF4O65Q?e=8LxMgX) |
## Notes:
1. Though **OmniParser V2** can run on a CPU, we have separated this out if you want to run it fast on a GPU machine
2. The **OmniBox** Windows 11 VM docker is dependent on KVM so can only run quickly on Windows and Linux. This can run on a CPU machine (doesn't need GPU).
3. The Gradio UI can also run on a CPU machine. We suggest running **omnibox** and **gradio** on the same CPU machine and **omniparserserver** on a GPU server.
## Setup
1. **omniparserserver**:
a. If you already have a conda environment for OmniParser, you can use that. Else follow the following steps to create one
b. Ensure conda is installed with `conda --version` or install from the [Anaconda website](https://www.anaconda.com/download/success)
c. Navigate to the root of the repo with `cd OmniParser`
d. Create a conda python environment with `conda create -n "omni" python==3.12`
e. Set the python environment to be used with `conda activate omni`
f. Install the dependencies with `pip install -r requirements.txt`
g. Continue from here if you already had the conda environment.
h. Ensure you have the V2 weights downloaded in weights folder (**ensure caption weights folder is called icon_caption_florence**). If not download them with:
```
rm -rf weights/icon_detect weights/icon_caption weights/icon_caption_florence
for folder in icon_caption icon_detect; do huggingface-cli download microsoft/OmniParser-v2.0 --local-dir weights --repo-type model --include "$folder/*"; done
mv weights/icon_caption weights/icon_caption_florence
```
h. Navigate to the server directory with `cd OmniParser/omnitool/omniparserserver`
i. Start the server with `python -m omniparserserver`
2. **omnibox**:
a. Ensure you have 30GB of space remaining (5GB for ISO, 400MB for Docker container, 20GB for storage folder)
b. Install Docker Desktop
c. Visit [Microsoft Evaluation Center](https://info.microsoft.com/ww-landing-windows-11-enterprise.html), accept the Terms of Service, and download a **Windows 11 Enterprise Evaluation (90-day trial, English, United States)** ISO file [~6GB]. Rename the file to `custom.iso` and copy it to the directory `OmniParser/omnitool/omnibox/vm/win11iso`
d. Navigate to vm management script directory with`cd OmniParser/omnitool/omnibox/scripts`
e. Build the docker container [400MB] and install the ISO to a storage folder [20GB] with `./manage_vm.sh create`. The process is shown in the screenshots below and will take 20-90 mins depending on download speeds (commonly around 60 mins). When complete the terminal will show `VM + server is up and running!`. You can see the apps being installed in the VM by looking at the desktop via the NoVNC viewer (http://localhost:8006/vnc.html?view_only=1&autoconnect=1&resize=scale). The terminal window shown in the NoVNC viewer will not be open on the desktop after the setup is done. If you can see it, wait and don't click around!
![image](https://github.com/user-attachments/assets/6bd18f81-18e2-4bc5-9170-293a6699481d)
f. After creating the first time it will store a save of the VM state in `vm/win11storage`. You can then manage the VM with `./manage_vm.sh start` and `./manage_vm.sh stop`. To delete the VM, use `./manage_vm.sh delete` and delete the `OmniParser/omnitool/omnibox/vm/win11storage` directory.
3. **gradio**:
a. Navigate to the gradio directory with `cd OmniParser/omnitool/gradio`
b. Ensure you have activated the conda python environment with `conda activate omni`
c. Start the server with `python app.py --windows_host_url localhost:8006 --omniparser_server_url localhost:8000`
d. Open the URL in the terminal output, set your API Key and start playing with the AI agent!
## Common setup errors
### OmniBox install taking a while
If your internet speed is slow and you want a minimal VM with less preinstalled apps comment out lines 57 to 350 in this [file](https://github.com/microsoft/OmniParser/blob/master/omnitool/omnibox/vm/win11setup/setupscripts/setup.ps1) that defines all the apps to install when you first create the container + VM. Ensure that you follow factory reset instructions from the next section when creating your VM to wipe any previous omnibox setup.
### Validation errors: Windows Host is not responding
If you get this error in Gradio after clicking the submit button, this indicates that the server running in the VM that accepts commands from Gradio and then moves the mouse/ keyboard isn't available. You can verify this by running `docker exec -it omni-windows bash -c "curl http://localhost:5000/probe"`. Ensure your `omnibox` is fully finished setting up (should no longer have a terminal window). Refer to the omnibox section for timing on that. If you have set up your omnibox, it may be a matter of waiting a little.
If waiting 10 mins doesn't help. Try stopping (`./manage_vm.sh stop`) and starting (`./manage_vm.sh start`) your omnibox VM with the script commands.
Then, if that doesn't work, delete your VM (`./manage_vm.sh delete`) leaving the storage folder and then run create again. It will be fast as it will use the existing storage folder.
Finally, if that still doesn't work and you want to fully reset your VM to factory settings (create a new VM):
1. run `./manage_vm.sh delete`
2. delete the `vm/win11storage` folder
3. run `./manage_vm.sh create`
### libpaddle: The specified module could not be found
The OCR library used by OmniParser is Paddle that depends on C++ Redistributable on Windows. If you are on Windows ensure that you have installed it, then rerun installing the requirements.txt. More details [here](https://github.com/microsoft/OmniParser/issues/140#issuecomment-2670619168).
## Risks and Mitigations
To align with the Microsoft AI principles and Responsible AI practices, we conduct risk mitigation by training the icon caption model with Responsible AI data, which helps the model avoid inferring sensitive attributes (e.g.race, religion etc.) of the individuals which happen to be in icon images as much as possible. At the same time, we encourage user to apply OmniParser only for screenshot that does not contain harmful/violent content. For the OmniTool, we conduct threat model analysis using Microsoft Threat Modeling Tool. We advise human to stay in the loop in order to minimize risk.
## Acknowledgment
Kudos to the amazing resources that are invaluable in the development of our code: [Claude Computer Use](https://github.com/anthropics/anthropic-quickstarts/blob/main/computer-use-demo/README.md), [OS World](https://github.com/xlang-ai/OSWorld), [Windows Agent Arena](https://github.com/microsoft/WindowsAgentArena), and [computer_use_ootb](https://github.com/showlab/computer_use_ootb).
We are grateful for helpful suggestions and feedbacks provided by Francesco Bonacci, Jianwei Yang, Dillon DuPont, Yue Wu, Anh Nguyen.
Many thanks to @keyserjaya for screenshots on omnibox install.

View File

@@ -1,18 +1,32 @@
torch
easyocr
torchvision
supervision==0.18.0
openai==1.3.5
transformers
ultralytics==8.1.24
azure-identity
numpy
opencv-python
opencv-python-headless
gradio
dill
accelerate
timm
einops==0.8.0
paddlepaddle
paddleocr
torch
easyocr
torchvision
supervision==0.18.0
openai==1.3.5
transformers
ultralytics==8.3.70
azure-identity
numpy==1.26.4
opencv-python
opencv-python-headless
gradio
dill
accelerate
timm
einops==0.8.0
paddlepaddle
paddleocr
ruff==0.6.7
pre-commit==3.8.0
pytest==8.3.3
pytest-asyncio==0.23.6
pyautogui==0.9.54
streamlit>=1.38.0
anthropic[bedrock,vertex]>=0.37.1
jsonschema==4.22.0
boto3>=1.28.57
google-auth<3,>=2
screeninfo
uiautomation
dashscope
groq

View File

@@ -1,425 +0,0 @@
'''
Adapted from https://github.com/google-research/google-research/tree/master/android_in_the_wild
'''
import jax
import jax.numpy as jnp
import numpy as np
# import action_type as action_type_lib
import enum
class ActionType(enum.IntEnum):
# Placeholders for unused enum values
UNUSED_0 = 0
UNUSED_1 = 1
UNUSED_2 = 2
UNUSED_8 = 8
UNUSED_9 = 9
########### Agent actions ###########
# A type action that sends text to the emulator. Note that this simply sends
# text and does not perform any clicks for element focus or enter presses for
# submitting text.
TYPE = 3
# The dual point action used to represent all gestures.
DUAL_POINT = 4
# These actions differentiate pressing the home and back button from touches.
# They represent explicit presses of back and home performed using ADB.
PRESS_BACK = 5
PRESS_HOME = 6
# An action representing that ADB command for hitting enter was performed.
PRESS_ENTER = 7
########### Episode status actions ###########
# An action used to indicate the desired task has been completed and resets
# the environment. This action should also be used in the case that the task
# has already been completed and there is nothing to do.
# e.g. The task is to turn on the Wi-Fi when it is already on
STATUS_TASK_COMPLETE = 10
# An action used to indicate that desired task is impossible to complete and
# resets the environment. This can be a result of many different things
# including UI changes, Android version differences, etc.
STATUS_TASK_IMPOSSIBLE = 11
_TAP_DISTANCE_THRESHOLD = 0.14 # Fraction of the screen
ANNOTATION_WIDTH_AUGMENT_FRACTION = 1.4
ANNOTATION_HEIGHT_AUGMENT_FRACTION = 1.4
# Interval determining if an action is a tap or a swipe.
_SWIPE_DISTANCE_THRESHOLD = 0.04
def _yx_in_bounding_boxes(
yx, bounding_boxes
):
"""Check if the (y,x) point is contained in each bounding box.
Args:
yx: The (y, x) coordinate in pixels of the point.
bounding_boxes: A 2D int array of shape (num_bboxes, 4), where each row
represents a bounding box: (y_top_left, x_top_left, box_height,
box_width). Note: containment is inclusive of the bounding box edges.
Returns:
is_inside: A 1D bool array where each element specifies if the point is
contained within the respective box.
"""
y, x = yx
# `bounding_boxes` has shape (n_elements, 4); we extract each array along the
# last axis into shape (n_elements, 1), then squeeze unneeded dimension.
top, left, height, width = [
jnp.squeeze(v, axis=-1) for v in jnp.split(bounding_boxes, 4, axis=-1)
]
# The y-axis is inverted for AndroidEnv, so bottom = top + height.
bottom, right = top + height, left + width
return jnp.logical_and(y >= top, y <= bottom) & jnp.logical_and(
x >= left, x <= right)
def _resize_annotation_bounding_boxes(
annotation_positions, annotation_width_augment_fraction,
annotation_height_augment_fraction):
"""Resize the bounding boxes by the given fractions.
Args:
annotation_positions: Array of shape (N, 4), where each row represents the
(y, x, height, width) of the bounding boxes.
annotation_width_augment_fraction: The fraction to augment the box widths,
E.g., 1.4 == 240% total increase.
annotation_height_augment_fraction: Same as described for width, but for box
height.
Returns:
Resized bounding box.
"""
height_change = (
annotation_height_augment_fraction * annotation_positions[:, 2])
width_change = (
annotation_width_augment_fraction * annotation_positions[:, 3])
# Limit bounding box positions to the screen.
resized_annotations = jnp.stack([
jnp.maximum(0, annotation_positions[:, 0] - (height_change / 2)),
jnp.maximum(0, annotation_positions[:, 1] - (width_change / 2)),
jnp.minimum(1, annotation_positions[:, 2] + height_change),
jnp.minimum(1, annotation_positions[:, 3] + width_change),
],
axis=1)
return resized_annotations
def is_tap_action(normalized_start_yx,
normalized_end_yx):
distance = jnp.linalg.norm(
jnp.array(normalized_start_yx) - jnp.array(normalized_end_yx))
return distance <= _SWIPE_DISTANCE_THRESHOLD
def _is_non_dual_point_action(action_type):
return jnp.not_equal(action_type, ActionType.DUAL_POINT)
def _check_tap_actions_match(
tap_1_yx,
tap_2_yx,
annotation_positions,
matching_tap_distance_threshold_screen_percentage,
annotation_width_augment_fraction,
annotation_height_augment_fraction,
):
"""Determines if two tap actions are the same."""
resized_annotation_positions = _resize_annotation_bounding_boxes(
annotation_positions,
annotation_width_augment_fraction,
annotation_height_augment_fraction,
)
# Check if the ground truth tap action falls in an annotation's bounding box.
tap1_in_box = _yx_in_bounding_boxes(tap_1_yx, resized_annotation_positions)
tap2_in_box = _yx_in_bounding_boxes(tap_2_yx, resized_annotation_positions)
both_in_box = jnp.max(tap1_in_box & tap2_in_box)
# If the ground-truth tap action falls outside any of the annotation
# bounding boxes or one of the actions is inside a bounding box and the other
# is outside bounding box or vice versa, compare the points using Euclidean
# distance.
within_threshold = (
jnp.linalg.norm(jnp.array(tap_1_yx) - jnp.array(tap_2_yx))
<= matching_tap_distance_threshold_screen_percentage
)
return jnp.logical_or(both_in_box, within_threshold)
def _check_drag_actions_match(
drag_1_touch_yx,
drag_1_lift_yx,
drag_2_touch_yx,
drag_2_lift_yx,
):
"""Determines if two drag actions are the same."""
# Store drag deltas (the change in the y and x coordinates from touch to
# lift), magnitudes, and the index of the main axis, which is the axis with
# the greatest change in coordinate value (e.g. a drag starting at (0, 0) and
# ending at (0.3, 0.5) has a main axis index of 1).
drag_1_deltas = drag_1_lift_yx - drag_1_touch_yx
drag_1_magnitudes = jnp.abs(drag_1_deltas)
drag_1_main_axis = np.argmax(drag_1_magnitudes)
drag_2_deltas = drag_2_lift_yx - drag_2_touch_yx
drag_2_magnitudes = jnp.abs(drag_2_deltas)
drag_2_main_axis = np.argmax(drag_2_magnitudes)
return jnp.equal(drag_1_main_axis, drag_2_main_axis)
def check_actions_match(
action_1_touch_yx,
action_1_lift_yx,
action_1_action_type,
action_2_touch_yx,
action_2_lift_yx,
action_2_action_type,
annotation_positions,
tap_distance_threshold = _TAP_DISTANCE_THRESHOLD,
annotation_width_augment_fraction = ANNOTATION_WIDTH_AUGMENT_FRACTION,
annotation_height_augment_fraction = ANNOTATION_HEIGHT_AUGMENT_FRACTION,
):
"""Determines if two actions are considered to be the same.
Two actions being "the same" is defined here as two actions that would result
in a similar screen state.
Args:
action_1_touch_yx: The (y, x) coordinates of the first action's touch.
action_1_lift_yx: The (y, x) coordinates of the first action's lift.
action_1_action_type: The action type of the first action.
action_2_touch_yx: The (y, x) coordinates of the second action's touch.
action_2_lift_yx: The (y, x) coordinates of the second action's lift.
action_2_action_type: The action type of the second action.
annotation_positions: The positions of the UI annotations for the screen. It
is A 2D int array of shape (num_bboxes, 4), where each row represents a
bounding box: (y_top_left, x_top_left, box_height, box_width). Note that
containment is inclusive of the bounding box edges.
tap_distance_threshold: The threshold that determines if two taps result in
a matching screen state if they don't fall the same bounding boxes.
annotation_width_augment_fraction: The fraction to increase the width of the
bounding box by.
annotation_height_augment_fraction: The fraction to increase the height of
of the bounding box by.
Returns:
A boolean representing whether the two given actions are the same or not.
"""
action_1_touch_yx = jnp.asarray(action_1_touch_yx)
action_1_lift_yx = jnp.asarray(action_1_lift_yx)
action_2_touch_yx = jnp.asarray(action_2_touch_yx)
action_2_lift_yx = jnp.asarray(action_2_lift_yx)
# Checks if at least one of the actions is global (i.e. not DUAL_POINT),
# because if that is the case, only the actions' types need to be compared.
has_non_dual_point_action = jnp.logical_or(
_is_non_dual_point_action(action_1_action_type),
_is_non_dual_point_action(action_2_action_type),
)
#print("non dual point: "+str(has_non_dual_point_action))
different_dual_point_types = jnp.logical_xor(
is_tap_action(action_1_touch_yx, action_1_lift_yx),
is_tap_action(action_2_touch_yx, action_2_lift_yx),
)
#print("different dual type: "+str(different_dual_point_types))
is_tap = jnp.logical_and(
is_tap_action(action_1_touch_yx, action_1_lift_yx),
is_tap_action(action_2_touch_yx, action_2_lift_yx),
)
#print("is tap: "+str(is_tap))
taps_match = _check_tap_actions_match(
action_1_touch_yx,
action_2_touch_yx,
annotation_positions,
tap_distance_threshold,
annotation_width_augment_fraction,
annotation_height_augment_fraction,
)
#print("tap match: "+str(taps_match))
taps_match = jnp.logical_and(is_tap, taps_match)
#print("tap match: "+str(taps_match))
drags_match = _check_drag_actions_match(
action_1_touch_yx, action_1_lift_yx, action_2_touch_yx, action_2_lift_yx
)
drags_match = jnp.where(is_tap, False, drags_match)
#print("drag match: "+str(drags_match))
return jnp.where(
has_non_dual_point_action,
jnp.equal(action_1_action_type, action_2_action_type),
jnp.where(
different_dual_point_types,
False,
jnp.logical_or(taps_match, drags_match),
),
)
def action_2_format(step_data):
# 把test数据集中的动作格式转换为计算matching score的格式
action_type = step_data["action_type_id"]
if action_type == 4:
if step_data["action_type_text"] == 'click': # 点击
touch_point = step_data["touch"]
lift_point = step_data["lift"]
else: # 上下左右滑动
if step_data["action_type_text"] == 'scroll down':
touch_point = [0.5, 0.8]
lift_point = [0.5, 0.2]
elif step_data["action_type_text"] == 'scroll up':
touch_point = [0.5, 0.2]
lift_point = [0.5, 0.8]
elif step_data["action_type_text"] == 'scroll left':
touch_point = [0.2, 0.5]
lift_point = [0.8, 0.5]
elif step_data["action_type_text"] == 'scroll right':
touch_point = [0.8, 0.5]
lift_point = [0.2, 0.5]
else:
touch_point = [-1.0, -1.0]
lift_point = [-1.0, -1.0]
if action_type == 3:
typed_text = step_data["type_text"]
else:
typed_text = ""
action = {"action_type": action_type, "touch_point": touch_point, "lift_point": lift_point,
"typed_text": typed_text}
action["touch_point"] = [action["touch_point"][1], action["touch_point"][0]]
action["lift_point"] = [action["lift_point"][1], action["lift_point"][0]]
action["typed_text"] = action["typed_text"].lower()
return action
def pred_2_format(step_data):
# 把模型输出的内容转换为计算action_matching的格式
action_type = step_data["action_type"]
if action_type == 4: # 点击
action_type_new = 4
touch_point = step_data["click_point"]
lift_point = step_data["click_point"]
typed_text = ""
elif action_type == 0:
action_type_new = 4
touch_point = [0.5, 0.8]
lift_point = [0.5, 0.2]
typed_text = ""
elif action_type == 1:
action_type_new = 4
touch_point = [0.5, 0.2]
lift_point = [0.5, 0.8]
typed_text = ""
elif action_type == 8:
action_type_new = 4
touch_point = [0.2, 0.5]
lift_point = [0.8, 0.5]
typed_text = ""
elif action_type == 9:
action_type_new = 4
touch_point = [0.8, 0.5]
lift_point = [0.2, 0.5]
typed_text = ""
else:
action_type_new = action_type
touch_point = [-1.0, -1.0]
lift_point = [-1.0, -1.0]
typed_text = ""
if action_type_new == 3:
typed_text = step_data["typed_text"]
action = {"action_type": action_type_new, "touch_point": touch_point, "lift_point": lift_point,
"typed_text": typed_text}
action["touch_point"] = [action["touch_point"][1], action["touch_point"][0]]
action["lift_point"] = [action["lift_point"][1], action["lift_point"][0]]
action["typed_text"] = action["typed_text"].lower()
return action
def pred_2_format_simplified(step_data):
# 把模型输出的内容转换为计算action_matching的格式
action_type = step_data["action_type"]
if action_type == 'click' : # 点击
action_type_new = 4
touch_point = step_data["click_point"]
lift_point = step_data["click_point"]
typed_text = ""
elif action_type == 'scroll' and step_data["direction"] == 'down':
action_type_new = 4
touch_point = [0.5, 0.8]
lift_point = [0.5, 0.2]
typed_text = ""
elif action_type == 'scroll' and step_data["direction"] == 'up':
action_type_new = 4
touch_point = [0.5, 0.2]
lift_point = [0.5, 0.8]
typed_text = ""
elif action_type == 'scroll' and step_data["direction"] == 'left':
action_type_new = 4
touch_point = [0.2, 0.5]
lift_point = [0.8, 0.5]
typed_text = ""
elif action_type == 'scroll' and step_data["direction"] == 'right':
action_type_new = 4
touch_point = [0.8, 0.5]
lift_point = [0.2, 0.5]
typed_text = ""
elif action_type == 'type':
action_type_new = 3
touch_point = [-1.0, -1.0]
lift_point = [-1.0, -1.0]
typed_text = step_data["text"]
elif action_type == 'navigate_back':
action_type_new = 5
touch_point = [-1.0, -1.0]
lift_point = [-1.0, -1.0]
typed_text = ""
elif action_type == 'navigate_home':
action_type_new = 6
touch_point = [-1.0, -1.0]
lift_point = [-1.0, -1.0]
typed_text = ""
else:
action_type_new = action_type
touch_point = [-1.0, -1.0]
lift_point = [-1.0, -1.0]
typed_text = ""
# if action_type_new == 'type':
# typed_text = step_data["text"]
action = {"action_type": action_type_new, "touch_point": touch_point, "lift_point": lift_point,
"typed_text": typed_text}
action["touch_point"] = [action["touch_point"][1], action["touch_point"][0]]
action["lift_point"] = [action["lift_point"][1], action["lift_point"][0]]
action["typed_text"] = action["typed_text"].lower()
return action

View File

@@ -1,45 +0,0 @@
'''
Adapted from https://github.com/google-research/google-research/tree/master/android_in_the_wild
'''
import enum
class ActionType(enum.IntEnum):
# Placeholders for unused enum values
UNUSED_0 = 0
UNUSED_1 = 1
UNUSED_2 = 2
UNUSED_8 = 8
UNUSED_9 = 9
########### Agent actions ###########
# A type action that sends text to the emulator. Note that this simply sends
# text and does not perform any clicks for element focus or enter presses for
# submitting text.
TYPE = 3
# The dual point action used to represent all gestures.
DUAL_POINT = 4
# These actions differentiate pressing the home and back button from touches.
# They represent explicit presses of back and home performed using ADB.
PRESS_BACK = 5
PRESS_HOME = 6
# An action representing that ADB command for hitting enter was performed.
PRESS_ENTER = 7
########### Episode status actions ###########
# An action used to indicate the desired task has been completed and resets
# the environment. This action should also be used in the case that the task
# has already been completed and there is nothing to do.
# e.g. The task is to turn on the Wi-Fi when it is already on
STATUS_TASK_COMPLETE = 10
# An action used to indicate that desired task is impossible to complete and
# resets the environment. This can be a result of many different things
# including UI changes, Android version differences, etc.
STATUS_TASK_IMPOSSIBLE = 11

View File

@@ -1,262 +1,262 @@
from typing import List, Optional, Union, Tuple
import cv2
import numpy as np
from supervision.detection.core import Detections
from supervision.draw.color import Color, ColorPalette
class BoxAnnotator:
"""
A class for drawing bounding boxes on an image using detections provided.
Attributes:
color (Union[Color, ColorPalette]): The color to draw the bounding box,
can be a single color or a color palette
thickness (int): The thickness of the bounding box lines, default is 2
text_color (Color): The color of the text on the bounding box, default is white
text_scale (float): The scale of the text on the bounding box, default is 0.5
text_thickness (int): The thickness of the text on the bounding box,
default is 1
text_padding (int): The padding around the text on the bounding box,
default is 5
"""
def __init__(
self,
color: Union[Color, ColorPalette] = ColorPalette.DEFAULT,
thickness: int = 3, # 1 for seeclick 2 for mind2web and 3 for demo
text_color: Color = Color.BLACK,
text_scale: float = 0.5, # 0.8 for mobile/web, 0.3 for desktop # 0.4 for mind2web
text_thickness: int = 2, #1, # 2 for demo
text_padding: int = 10,
avoid_overlap: bool = True,
):
self.color: Union[Color, ColorPalette] = color
self.thickness: int = thickness
self.text_color: Color = text_color
self.text_scale: float = text_scale
self.text_thickness: int = text_thickness
self.text_padding: int = text_padding
self.avoid_overlap: bool = avoid_overlap
def annotate(
self,
scene: np.ndarray,
detections: Detections,
labels: Optional[List[str]] = None,
skip_label: bool = False,
image_size: Optional[Tuple[int, int]] = None,
) -> np.ndarray:
"""
Draws bounding boxes on the frame using the detections provided.
Args:
scene (np.ndarray): The image on which the bounding boxes will be drawn
detections (Detections): The detections for which the
bounding boxes will be drawn
labels (Optional[List[str]]): An optional list of labels
corresponding to each detection. If `labels` are not provided,
corresponding `class_id` will be used as label.
skip_label (bool): Is set to `True`, skips bounding box label annotation.
Returns:
np.ndarray: The image with the bounding boxes drawn on it
Example:
```python
import supervision as sv
classes = ['person', ...]
image = ...
detections = sv.Detections(...)
box_annotator = sv.BoxAnnotator()
labels = [
f"{classes[class_id]} {confidence:0.2f}"
for _, _, confidence, class_id, _ in detections
]
annotated_frame = box_annotator.annotate(
scene=image.copy(),
detections=detections,
labels=labels
)
```
"""
font = cv2.FONT_HERSHEY_SIMPLEX
for i in range(len(detections)):
x1, y1, x2, y2 = detections.xyxy[i].astype(int)
class_id = (
detections.class_id[i] if detections.class_id is not None else None
)
idx = class_id if class_id is not None else i
color = (
self.color.by_idx(idx)
if isinstance(self.color, ColorPalette)
else self.color
)
cv2.rectangle(
img=scene,
pt1=(x1, y1),
pt2=(x2, y2),
color=color.as_bgr(),
thickness=self.thickness,
)
if skip_label:
continue
text = (
f"{class_id}"
if (labels is None or len(detections) != len(labels))
else labels[i]
)
text_width, text_height = cv2.getTextSize(
text=text,
fontFace=font,
fontScale=self.text_scale,
thickness=self.text_thickness,
)[0]
if not self.avoid_overlap:
text_x = x1 + self.text_padding
text_y = y1 - self.text_padding
text_background_x1 = x1
text_background_y1 = y1 - 2 * self.text_padding - text_height
text_background_x2 = x1 + 2 * self.text_padding + text_width
text_background_y2 = y1
# text_x = x1 - self.text_padding - text_width
# text_y = y1 + self.text_padding + text_height
# text_background_x1 = x1 - 2 * self.text_padding - text_width
# text_background_y1 = y1
# text_background_x2 = x1
# text_background_y2 = y1 + 2 * self.text_padding + text_height
else:
text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2 = get_optimal_label_pos(self.text_padding, text_width, text_height, x1, y1, x2, y2, detections, image_size)
cv2.rectangle(
img=scene,
pt1=(text_background_x1, text_background_y1),
pt2=(text_background_x2, text_background_y2),
color=color.as_bgr(),
thickness=cv2.FILLED,
)
# import pdb; pdb.set_trace()
box_color = color.as_rgb()
luminance = 0.299 * box_color[0] + 0.587 * box_color[1] + 0.114 * box_color[2]
text_color = (0,0,0) if luminance > 160 else (255,255,255)
cv2.putText(
img=scene,
text=text,
org=(text_x, text_y),
fontFace=font,
fontScale=self.text_scale,
# color=self.text_color.as_rgb(),
color=text_color,
thickness=self.text_thickness,
lineType=cv2.LINE_AA,
)
return scene
def box_area(box):
return (box[2] - box[0]) * (box[3] - box[1])
def intersection_area(box1, box2):
x1 = max(box1[0], box2[0])
y1 = max(box1[1], box2[1])
x2 = min(box1[2], box2[2])
y2 = min(box1[3], box2[3])
return max(0, x2 - x1) * max(0, y2 - y1)
def IoU(box1, box2, return_max=True):
intersection = intersection_area(box1, box2)
union = box_area(box1) + box_area(box2) - intersection
if box_area(box1) > 0 and box_area(box2) > 0:
ratio1 = intersection / box_area(box1)
ratio2 = intersection / box_area(box2)
else:
ratio1, ratio2 = 0, 0
if return_max:
return max(intersection / union, ratio1, ratio2)
else:
return intersection / union
def get_optimal_label_pos(text_padding, text_width, text_height, x1, y1, x2, y2, detections, image_size):
""" check overlap of text and background detection box, and get_optimal_label_pos,
pos: str, position of the text, must be one of 'top left', 'top right', 'outer left', 'outer right' TODO: if all are overlapping, return the last one, i.e. outer right
Threshold: default to 0.3
"""
def get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size):
is_overlap = False
for i in range(len(detections)):
detection = detections.xyxy[i].astype(int)
if IoU([text_background_x1, text_background_y1, text_background_x2, text_background_y2], detection) > 0.3:
is_overlap = True
break
# check if the text is out of the image
if text_background_x1 < 0 or text_background_x2 > image_size[0] or text_background_y1 < 0 or text_background_y2 > image_size[1]:
is_overlap = True
return is_overlap
# if pos == 'top left':
text_x = x1 + text_padding
text_y = y1 - text_padding
text_background_x1 = x1
text_background_y1 = y1 - 2 * text_padding - text_height
text_background_x2 = x1 + 2 * text_padding + text_width
text_background_y2 = y1
is_overlap = get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size)
if not is_overlap:
return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2
# elif pos == 'outer left':
text_x = x1 - text_padding - text_width
text_y = y1 + text_padding + text_height
text_background_x1 = x1 - 2 * text_padding - text_width
text_background_y1 = y1
text_background_x2 = x1
text_background_y2 = y1 + 2 * text_padding + text_height
is_overlap = get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size)
if not is_overlap:
return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2
# elif pos == 'outer right':
text_x = x2 + text_padding
text_y = y1 + text_padding + text_height
text_background_x1 = x2
text_background_y1 = y1
text_background_x2 = x2 + 2 * text_padding + text_width
text_background_y2 = y1 + 2 * text_padding + text_height
is_overlap = get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size)
if not is_overlap:
return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2
# elif pos == 'top right':
text_x = x2 - text_padding - text_width
text_y = y1 - text_padding
text_background_x1 = x2 - 2 * text_padding - text_width
text_background_y1 = y1 - 2 * text_padding - text_height
text_background_x2 = x2
text_background_y2 = y1
is_overlap = get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size)
if not is_overlap:
return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2
return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2
from typing import List, Optional, Union, Tuple
import cv2
import numpy as np
from supervision.detection.core import Detections
from supervision.draw.color import Color, ColorPalette
class BoxAnnotator:
"""
A class for drawing bounding boxes on an image using detections provided.
Attributes:
color (Union[Color, ColorPalette]): The color to draw the bounding box,
can be a single color or a color palette
thickness (int): The thickness of the bounding box lines, default is 2
text_color (Color): The color of the text on the bounding box, default is white
text_scale (float): The scale of the text on the bounding box, default is 0.5
text_thickness (int): The thickness of the text on the bounding box,
default is 1
text_padding (int): The padding around the text on the bounding box,
default is 5
"""
def __init__(
self,
color: Union[Color, ColorPalette] = ColorPalette.DEFAULT,
thickness: int = 3, # 1 for seeclick 2 for mind2web and 3 for demo
text_color: Color = Color.BLACK,
text_scale: float = 0.5, # 0.8 for mobile/web, 0.3 for desktop # 0.4 for mind2web
text_thickness: int = 2, #1, # 2 for demo
text_padding: int = 10,
avoid_overlap: bool = True,
):
self.color: Union[Color, ColorPalette] = color
self.thickness: int = thickness
self.text_color: Color = text_color
self.text_scale: float = text_scale
self.text_thickness: int = text_thickness
self.text_padding: int = text_padding
self.avoid_overlap: bool = avoid_overlap
def annotate(
self,
scene: np.ndarray,
detections: Detections,
labels: Optional[List[str]] = None,
skip_label: bool = False,
image_size: Optional[Tuple[int, int]] = None,
) -> np.ndarray:
"""
Draws bounding boxes on the frame using the detections provided.
Args:
scene (np.ndarray): The image on which the bounding boxes will be drawn
detections (Detections): The detections for which the
bounding boxes will be drawn
labels (Optional[List[str]]): An optional list of labels
corresponding to each detection. If `labels` are not provided,
corresponding `class_id` will be used as label.
skip_label (bool): Is set to `True`, skips bounding box label annotation.
Returns:
np.ndarray: The image with the bounding boxes drawn on it
Example:
```python
import supervision as sv
classes = ['person', ...]
image = ...
detections = sv.Detections(...)
box_annotator = sv.BoxAnnotator()
labels = [
f"{classes[class_id]} {confidence:0.2f}"
for _, _, confidence, class_id, _ in detections
]
annotated_frame = box_annotator.annotate(
scene=image.copy(),
detections=detections,
labels=labels
)
```
"""
font = cv2.FONT_HERSHEY_SIMPLEX
for i in range(len(detections)):
x1, y1, x2, y2 = detections.xyxy[i].astype(int)
class_id = (
detections.class_id[i] if detections.class_id is not None else None
)
idx = class_id if class_id is not None else i
color = (
self.color.by_idx(idx)
if isinstance(self.color, ColorPalette)
else self.color
)
cv2.rectangle(
img=scene,
pt1=(x1, y1),
pt2=(x2, y2),
color=color.as_bgr(),
thickness=self.thickness,
)
if skip_label:
continue
text = (
f"{class_id}"
if (labels is None or len(detections) != len(labels))
else labels[i]
)
text_width, text_height = cv2.getTextSize(
text=text,
fontFace=font,
fontScale=self.text_scale,
thickness=self.text_thickness,
)[0]
if not self.avoid_overlap:
text_x = x1 + self.text_padding
text_y = y1 - self.text_padding
text_background_x1 = x1
text_background_y1 = y1 - 2 * self.text_padding - text_height
text_background_x2 = x1 + 2 * self.text_padding + text_width
text_background_y2 = y1
# text_x = x1 - self.text_padding - text_width
# text_y = y1 + self.text_padding + text_height
# text_background_x1 = x1 - 2 * self.text_padding - text_width
# text_background_y1 = y1
# text_background_x2 = x1
# text_background_y2 = y1 + 2 * self.text_padding + text_height
else:
text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2 = get_optimal_label_pos(self.text_padding, text_width, text_height, x1, y1, x2, y2, detections, image_size)
cv2.rectangle(
img=scene,
pt1=(text_background_x1, text_background_y1),
pt2=(text_background_x2, text_background_y2),
color=color.as_bgr(),
thickness=cv2.FILLED,
)
# import pdb; pdb.set_trace()
box_color = color.as_rgb()
luminance = 0.299 * box_color[0] + 0.587 * box_color[1] + 0.114 * box_color[2]
text_color = (0,0,0) if luminance > 160 else (255,255,255)
cv2.putText(
img=scene,
text=text,
org=(text_x, text_y),
fontFace=font,
fontScale=self.text_scale,
# color=self.text_color.as_rgb(),
color=text_color,
thickness=self.text_thickness,
lineType=cv2.LINE_AA,
)
return scene
def box_area(box):
return (box[2] - box[0]) * (box[3] - box[1])
def intersection_area(box1, box2):
x1 = max(box1[0], box2[0])
y1 = max(box1[1], box2[1])
x2 = min(box1[2], box2[2])
y2 = min(box1[3], box2[3])
return max(0, x2 - x1) * max(0, y2 - y1)
def IoU(box1, box2, return_max=True):
intersection = intersection_area(box1, box2)
union = box_area(box1) + box_area(box2) - intersection
if box_area(box1) > 0 and box_area(box2) > 0:
ratio1 = intersection / box_area(box1)
ratio2 = intersection / box_area(box2)
else:
ratio1, ratio2 = 0, 0
if return_max:
return max(intersection / union, ratio1, ratio2)
else:
return intersection / union
def get_optimal_label_pos(text_padding, text_width, text_height, x1, y1, x2, y2, detections, image_size):
""" check overlap of text and background detection box, and get_optimal_label_pos,
pos: str, position of the text, must be one of 'top left', 'top right', 'outer left', 'outer right' TODO: if all are overlapping, return the last one, i.e. outer right
Threshold: default to 0.3
"""
def get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size):
is_overlap = False
for i in range(len(detections)):
detection = detections.xyxy[i].astype(int)
if IoU([text_background_x1, text_background_y1, text_background_x2, text_background_y2], detection) > 0.3:
is_overlap = True
break
# check if the text is out of the image
if text_background_x1 < 0 or text_background_x2 > image_size[0] or text_background_y1 < 0 or text_background_y2 > image_size[1]:
is_overlap = True
return is_overlap
# if pos == 'top left':
text_x = x1 + text_padding
text_y = y1 - text_padding
text_background_x1 = x1
text_background_y1 = y1 - 2 * text_padding - text_height
text_background_x2 = x1 + 2 * text_padding + text_width
text_background_y2 = y1
is_overlap = get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size)
if not is_overlap:
return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2
# elif pos == 'outer left':
text_x = x1 - text_padding - text_width
text_y = y1 + text_padding + text_height
text_background_x1 = x1 - 2 * text_padding - text_width
text_background_y1 = y1
text_background_x2 = x1
text_background_y2 = y1 + 2 * text_padding + text_height
is_overlap = get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size)
if not is_overlap:
return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2
# elif pos == 'outer right':
text_x = x2 + text_padding
text_y = y1 + text_padding + text_height
text_background_x1 = x2
text_background_y1 = y1
text_background_x2 = x2 + 2 * text_padding + text_width
text_background_y2 = y1 + 2 * text_padding + text_height
is_overlap = get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size)
if not is_overlap:
return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2
# elif pos == 'top right':
text_x = x2 - text_padding - text_width
text_y = y1 - text_padding
text_background_x1 = x2 - 2 * text_padding - text_width
text_background_y1 = y1 - 2 * text_padding - text_height
text_background_x2 = x2
text_background_y2 = y1
is_overlap = get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size)
if not is_overlap:
return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2
return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2

32
util/omniparser.py Normal file
View File

@@ -0,0 +1,32 @@
from util.utils import get_som_labeled_img, get_caption_model_processor, get_yolo_model, check_ocr_box
import torch
from PIL import Image
import io
import base64
from typing import Dict
class Omniparser(object):
def __init__(self, config: Dict):
self.config = config
device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.som_model = get_yolo_model(model_path=config['som_model_path'])
self.caption_model_processor = get_caption_model_processor(model_name=config['caption_model_name'], model_name_or_path=config['caption_model_path'], device=device)
print('Omniparser initialized!!!')
def parse(self, image_base64: str):
image_bytes = base64.b64decode(image_base64)
image = Image.open(io.BytesIO(image_bytes))
print('image size:', image.size)
box_overlay_ratio = max(image.size) / 3200
draw_bbox_config = {
'text_scale': 0.8 * box_overlay_ratio,
'text_thickness': max(int(2 * box_overlay_ratio), 1),
'text_padding': max(int(3 * box_overlay_ratio), 1),
'thickness': max(int(3 * box_overlay_ratio), 1),
}
(text, ocr_bbox), _ = check_ocr_box(image, display_img=False, output_bb_format='xyxy', easyocr_args={'text_threshold': 0.8}, use_paddleocr=False)
dino_labled_img, label_coordinates, parsed_content_list = get_som_labeled_img(image, self.som_model, BOX_TRESHOLD = self.config['BOX_TRESHOLD'], output_coord_in_ratio=True, ocr_bbox=ocr_bbox,draw_bbox_config=draw_bbox_config, caption_model_processor=self.caption_model_processor, ocr_text=text,use_local_semantics=True, iou_threshold=0.7, scale_img=False, batch_size=128)
return dino_labled_img, parsed_content_list

132
utils.py → util/utils.py Executable file → Normal file
View File

@@ -20,27 +20,20 @@ from matplotlib import pyplot as plt
import easyocr
from paddleocr import PaddleOCR
reader = easyocr.Reader(['en'])
paddle_ocr = PaddleOCR(
lang='en', # other lang also available
use_angle_cls=False,
use_gpu=False, # using cuda will conflict with pytorch in the same process
show_log=False,
max_batch_size=1024,
use_dilation=True, # improves accuracy
det_db_score_mode='slow', # improves accuracy
rec_batch_num=1024)
paddle_ocr = PaddleOCR(lang='en')
import time
import base64
import os
import ast
import torch
from typing import Tuple, List
from typing import Tuple, List, Union
from torchvision.ops import box_convert
import re
from torchvision.transforms import ToPILImage
import supervision as sv
import torchvision.transforms as T
from util.box_annotator import BoxAnnotator
def get_caption_model_processor(model_name, model_name_or_path="Salesforce/blip2-opt-2.7b", device=None):
@@ -58,12 +51,12 @@ def get_caption_model_processor(model_name, model_name_or_path="Salesforce/blip2
model_name_or_path, device_map=None, torch_dtype=torch.float16
).to(device)
elif model_name == "florence2":
from transformers import AutoProcessor, AutoModelForCausalLM
from transformers import AutoProcessor, AutoModelForCausalLM
processor = AutoProcessor.from_pretrained("microsoft/Florence-2-base", trust_remote_code=True)
if device == 'cpu':
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, torch_dtype=torch.float32, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, torch_dtype=torch.float32, trust_remote_code=True, attn_implementation="eager")
else:
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, torch_dtype=torch.float16, trust_remote_code=True).to(device)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, torch_dtype=torch.float16, trust_remote_code=True, attn_implementation="eager").to(device)
return {'model': model.to(device), 'processor': processor}
@@ -75,9 +68,8 @@ def get_yolo_model(model_path):
@torch.inference_mode()
def get_parsed_content_icon(filtered_boxes, starting_idx, image_source, caption_model_processor, prompt=None, batch_size=32):
# Number of samples per batch, --> 256 roughly takes 23 GB of GPU memory for florence model
def get_parsed_content_icon(filtered_boxes, starting_idx, image_source, caption_model_processor, prompt=None, batch_size=128):
# Number of samples per batch, --> 128 roughly takes 4 GB of GPU memory for florence v2 model
to_pil = ToPILImage()
if starting_idx:
non_ocr_boxes = filtered_boxes[starting_idx:]
@@ -85,10 +77,14 @@ def get_parsed_content_icon(filtered_boxes, starting_idx, image_source, caption_
non_ocr_boxes = filtered_boxes
croped_pil_image = []
for i, coord in enumerate(non_ocr_boxes):
xmin, xmax = int(coord[0]*image_source.shape[1]), int(coord[2]*image_source.shape[1])
ymin, ymax = int(coord[1]*image_source.shape[0]), int(coord[3]*image_source.shape[0])
cropped_image = image_source[ymin:ymax, xmin:xmax, :]
croped_pil_image.append(to_pil(cropped_image))
try:
xmin, xmax = int(coord[0]*image_source.shape[1]), int(coord[2]*image_source.shape[1])
ymin, ymax = int(coord[1]*image_source.shape[0]), int(coord[3]*image_source.shape[0])
cropped_image = image_source[ymin:ymax, xmin:xmax, :]
cropped_image = cv2.resize(cropped_image, (64, 64))
croped_pil_image.append(to_pil(cropped_image))
except:
continue
model, processor = caption_model_processor['model'], caption_model_processor['processor']
if not prompt:
@@ -102,12 +98,13 @@ def get_parsed_content_icon(filtered_boxes, starting_idx, image_source, caption_
for i in range(0, len(croped_pil_image), batch_size):
start = time.time()
batch = croped_pil_image[i:i+batch_size]
t1 = time.time()
if model.device.type == 'cuda':
inputs = processor(images=batch, text=[prompt]*len(batch), return_tensors="pt").to(device=device, dtype=torch.float16)
inputs = processor(images=batch, text=[prompt]*len(batch), return_tensors="pt", do_resize=False).to(device=device, dtype=torch.float16)
else:
inputs = processor(images=batch, text=[prompt]*len(batch), return_tensors="pt").to(device=device)
if 'florence' in model.config.name_or_path:
generated_ids = model.generate(input_ids=inputs["input_ids"],pixel_values=inputs["pixel_values"],max_new_tokens=100,num_beams=3, do_sample=False)
generated_ids = model.generate(input_ids=inputs["input_ids"],pixel_values=inputs["pixel_values"],max_new_tokens=20,num_beams=1, do_sample=False)
else:
generated_ids = model.generate(**inputs, max_length=100, num_beams=5, no_repeat_ngram_size=2, early_stopping=True, num_return_sequences=1) # temperature=0.01, do_sample=True,
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)
@@ -272,10 +269,10 @@ def remove_overlap_new(boxes, iou_threshold, ocr_bbox=None):
is_valid_box = False
break
if is_valid_box:
# add the following 2 lines to include ocr bbox
if ocr_bbox:
# keep yolo boxes + prioritize ocr label
box_added = False
ocr_labels = ''
for box3_elem in ocr_bbox:
if not box_added:
box3 = box3_elem['bbox']
@@ -283,25 +280,22 @@ def remove_overlap_new(boxes, iou_threshold, ocr_bbox=None):
# box_added = True
# delete the box3_elem from ocr_bbox
try:
filtered_boxes.append({'type': 'text', 'bbox': box1_elem['bbox'], 'interactivity': True, 'content': box3_elem['content']})
# gather all ocr labels
ocr_labels += box3_elem['content'] + ' '
filtered_boxes.remove(box3_elem)
# print('remove ocr bbox:', box3_elem)
except:
continue
# break
elif is_inside(box1, box3): # icon inside ocr
elif is_inside(box1, box3): # icon inside ocr, don't added this icon box, no need to check other ocr bbox bc no overlap between ocr bbox, icon can only be in one ocr box
box_added = True
# try:
# filtered_boxes.append({'type': 'icon', 'bbox': box1_elem['bbox'], 'interactivity': True, 'content': None})
# filtered_boxes.remove(box3_elem)
# except:
# continue
break
else:
continue
if not box_added:
filtered_boxes.append({'type': 'icon', 'bbox': box1_elem['bbox'], 'interactivity': True, 'content': None})
if ocr_labels:
filtered_boxes.append({'type': 'icon', 'bbox': box1_elem['bbox'], 'interactivity': True, 'content': ocr_labels, 'source':'box_yolo_content_ocr'})
else:
filtered_boxes.append({'type': 'icon', 'bbox': box1_elem['bbox'], 'interactivity': True, 'content': None, 'source':'box_yolo_content_yolo'})
else:
filtered_boxes.append(box1)
return filtered_boxes # torch.tensor(filtered_boxes)
@@ -344,7 +338,6 @@ def annotate(image_source: np.ndarray, boxes: torch.Tensor, logits: torch.Tensor
labels = [f"{phrase}" for phrase in range(boxes.shape[0])]
from util.box_annotator import BoxAnnotator
box_annotator = BoxAnnotator(text_scale=text_scale, text_padding=text_padding,text_thickness=text_thickness,thickness=thickness) # 0.8 for mobile/web, 0.3 for desktop # 0.4 for mind2web
annotated_frame = image_source.copy()
annotated_frame = box_annotator.annotate(scene=annotated_frame, detections=detections, labels=labels, image_size=(w,h))
@@ -374,20 +367,20 @@ def predict(model, image, caption, box_threshold, text_threshold):
return boxes, logits, phrases
def predict_yolo(model, image_path, box_threshold, imgsz, scale_img, iou_threshold=0.7):
def predict_yolo(model, image, box_threshold, imgsz, scale_img, iou_threshold=0.7):
""" Use huggingface model to replace the original model
"""
# model = model['model']
if scale_img:
result = model.predict(
source=image_path,
source=image,
conf=box_threshold,
imgsz=imgsz,
iou=iou_threshold, # default 0.7
)
else:
result = model.predict(
source=image_path,
source=image,
conf=box_threshold,
iou=iou_threshold, # default 0.7
)
@@ -397,34 +390,41 @@ def predict_yolo(model, image_path, box_threshold, imgsz, scale_img, iou_thresho
return boxes, conf, phrases
def int_box_area(box, w, h):
x1, y1, x2, y2 = box
int_box = [int(x1*w), int(y1*h), int(x2*w), int(y2*h)]
area = (int_box[2] - int_box[0]) * (int_box[3] - int_box[1])
return area
def get_som_labeled_img(img_path, model=None, BOX_TRESHOLD = 0.01, output_coord_in_ratio=False, ocr_bbox=None, text_scale=0.4, text_padding=5, draw_bbox_config=None, caption_model_processor=None, ocr_text=[], use_local_semantics=True, iou_threshold=0.9,prompt=None, scale_img=False, imgsz=None, batch_size=None):
""" ocr_bbox: list of xyxy format bbox
def get_som_labeled_img(image_source: Union[str, Image.Image], model=None, BOX_TRESHOLD=0.01, output_coord_in_ratio=False, ocr_bbox=None, text_scale=0.4, text_padding=5, draw_bbox_config=None, caption_model_processor=None, ocr_text=[], use_local_semantics=True, iou_threshold=0.9,prompt=None, scale_img=False, imgsz=None, batch_size=128):
"""Process either an image path or Image object
Args:
image_source: Either a file path (str) or PIL Image object
...
"""
image_source = Image.open(img_path).convert("RGB")
if isinstance(image_source, str):
image_source = Image.open(image_source)
image_source = image_source.convert("RGB") # for CLIP
w, h = image_source.size
if not imgsz:
imgsz = (h, w)
# print('image size:', w, h)
xyxy, logits, phrases = predict_yolo(model=model, image_path=img_path, box_threshold=BOX_TRESHOLD, imgsz=imgsz, scale_img=scale_img, iou_threshold=0.1)
xyxy, logits, phrases = predict_yolo(model=model, image=image_source, box_threshold=BOX_TRESHOLD, imgsz=imgsz, scale_img=scale_img, iou_threshold=0.1)
xyxy = xyxy / torch.Tensor([w, h, w, h]).to(xyxy.device)
image_source = np.asarray(image_source)
phrases = [str(i) for i in range(len(phrases))]
# annotate the image with labels
h, w, _ = image_source.shape
if ocr_bbox:
ocr_bbox = torch.tensor(ocr_bbox) / torch.Tensor([w, h, w, h])
ocr_bbox=ocr_bbox.tolist()
else:
print('no ocr bbox!!!')
ocr_bbox = None
# filtered_boxes = remove_overlap(boxes=xyxy, iou_threshold=iou_threshold, ocr_bbox=ocr_bbox)
# starting_idx = len(ocr_bbox)
# print('len(filtered_boxes):', len(filtered_boxes), starting_idx)
ocr_bbox_elem = [{'type': 'text', 'bbox':box, 'interactivity':False, 'content':txt} for box, txt in zip(ocr_bbox, ocr_text)]
xyxy_elem = [{'type': 'icon', 'bbox':box, 'interactivity':True, 'content':None} for box in xyxy.tolist()]
ocr_bbox_elem = [{'type': 'text', 'bbox':box, 'interactivity':False, 'content':txt, 'source': 'box_ocr_content_ocr'} for box, txt in zip(ocr_bbox, ocr_text) if int_box_area(box, w, h) > 0]
xyxy_elem = [{'type': 'icon', 'bbox':box, 'interactivity':True, 'content':None} for box in xyxy.tolist() if int_box_area(box, w, h) > 0]
filtered_boxes = remove_overlap_new(boxes=xyxy_elem, iou_threshold=iou_threshold, ocr_bbox=ocr_bbox_elem)
# sort the filtered_boxes so that the one with 'content': None is at the end, and get the index of the first 'content': None
@@ -432,9 +432,10 @@ def get_som_labeled_img(img_path, model=None, BOX_TRESHOLD = 0.01, output_coord_
# get the index of the first 'content': None
starting_idx = next((i for i, box in enumerate(filtered_boxes_elem) if box['content'] is None), -1)
filtered_boxes = torch.tensor([box['bbox'] for box in filtered_boxes_elem])
print('len(filtered_boxes):', len(filtered_boxes), starting_idx)
# get parsed icon local semantics
time1 = time.time()
if use_local_semantics:
caption_model = caption_model_processor['model']
if 'phi3_v' in caption_model.config.model_type:
@@ -454,6 +455,7 @@ def get_som_labeled_img(img_path, model=None, BOX_TRESHOLD = 0.01, output_coord_
else:
ocr_text = [f"Text Box ID {i}: {txt}" for i, txt in enumerate(ocr_text)]
parsed_content_merged = ocr_text
print('time to get parsed content:', time.time()-time1)
filtered_boxes = box_convert(boxes=filtered_boxes, in_fmt="xyxy", out_fmt="cxcywh")
@@ -470,7 +472,6 @@ def get_som_labeled_img(img_path, model=None, BOX_TRESHOLD = 0.01, output_coord_
pil_img.save(buffered, format="PNG")
encoded_image = base64.b64encode(buffered.getvalue()).decode('ascii')
if output_coord_in_ratio:
# h, w, _ = image_source.shape
label_coordinates = {k: [v[0]/w, v[1]/h, v[2]/w, v[3]/h] for k, v in label_coordinates.items()}
assert w == annotated_frame.shape[1] and h == annotated_frame.shape[0]
@@ -491,46 +492,41 @@ def get_xywh_yolo(input):
x, y, w, h = input[0], input[1], input[2] - input[0], input[3] - input[1]
x, y, w, h = int(x), int(y), int(w), int(h)
return x, y, w, h
def check_ocr_box(image_path, display_img = True, output_bb_format='xywh', goal_filtering=None, easyocr_args=None, use_paddleocr=False):
def check_ocr_box(image_source: Union[str, Image.Image], display_img = True, output_bb_format='xywh', goal_filtering=None, easyocr_args=None, use_paddleocr=False):
if isinstance(image_source, str):
image_source = Image.open(image_source)
if image_source.mode == 'RGBA':
# Convert RGBA to RGB to avoid alpha channel issues
image_source = image_source.convert('RGB')
image_np = np.array(image_source)
w, h = image_source.size
if use_paddleocr:
if easyocr_args is None:
text_threshold = 0.5
else:
text_threshold = easyocr_args['text_threshold']
result = paddle_ocr.ocr(image_path, cls=False)[0]
conf = [item[1] for item in result]
result = paddle_ocr.ocr(image_np, cls=False)[0]
coord = [item[0] for item in result if item[1][1] > text_threshold]
text = [item[1][0] for item in result if item[1][1] > text_threshold]
else: # EasyOCR
if easyocr_args is None:
easyocr_args = {}
result = reader.readtext(image_path, **easyocr_args)
# print('goal filtering pred:', result[-5:])
result = reader.readtext(image_np, **easyocr_args)
coord = [item[0] for item in result]
text = [item[1] for item in result]
# read the image using cv2
if display_img:
opencv_img = cv2.imread(image_path)
opencv_img = cv2.cvtColor(opencv_img, cv2.COLOR_RGB2BGR)
opencv_img = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR)
bb = []
for item in coord:
x, y, a, b = get_xywh(item)
# print(x, y, a, b)
bb.append((x, y, a, b))
cv2.rectangle(opencv_img, (x, y), (x+a, y+b), (0, 255, 0), 2)
# Display the image
plt.imshow(opencv_img)
# matplotlib expects RGB
plt.imshow(cv2.cvtColor(opencv_img, cv2.COLOR_BGR2RGB))
else:
if output_bb_format == 'xywh':
bb = [get_xywh(item) for item in coord]
elif output_bb_format == 'xyxy':
bb = [get_xyxy(item) for item in coord]
# print('bounding box!!!', bb)
return (text, bb), goal_filtering
return (text, bb), goal_filtering

View File

@@ -1,23 +0,0 @@
import torch
from ultralytics.nn.tasks import DetectionModel
from safetensors.torch import load_file
import argparse
import yaml
import os
# accept args to specify v1
parser = argparse.ArgumentParser(description='add weight directory')
parser.add_argument('--weights_dir', type=str, required=True, help='Specify the path to the safetensor file', default='weights/icon_detect')
args = parser.parse_args()
tensor_dict = load_file(os.path.join(args.weights_dir, "model.safetensors"))
model = DetectionModel(os.path.join(args.weights_dir, "model.yaml"))
model.load_state_dict(tensor_dict)
save_dict = {'model':model}
with open(os.path.join(args.weights_dir, "train_args.yaml"), 'r') as file:
train_args = yaml.safe_load(file)
save_dict.update(train_args)
torch.save(save_dict, os.path.join(args.weights_dir, "best.pt"))

View File

@@ -0,0 +1,239 @@
{
"_name_or_path": "microsoft/Florence-2-base-ft",
"architectures": [
"Florence2ForConditionalGeneration"
],
"auto_map": {
"AutoConfig": "microsoft/Florence-2-base-ft--configuration_florence2.Florence2Config",
"AutoModelForCausalLM": "microsoft/Florence-2-base-ft--modeling_florence2.Florence2ForConditionalGeneration"
},
"bos_token_id": 2,
"eos_token_id": 1,
"ignore_index": -100,
"is_encoder_decoder": true,
"model_type": "florence2",
"pad_token_id": 0,
"projection_dim": 768,
"text_config": {
"_attn_implementation_autoset": true,
"_name_or_path": "",
"activation_dropout": 0.1,
"activation_function": "gelu",
"add_bias_logits": false,
"add_cross_attention": false,
"add_final_layer_norm": false,
"architectures": null,
"attention_dropout": 0.1,
"bad_words_ids": null,
"begin_suppress_tokens": null,
"bos_token_id": 0,
"chunk_size_feed_forward": 0,
"classif_dropout": 0.1,
"classifier_dropout": 0.0,
"cross_attention_hidden_size": null,
"d_model": 768,
"decoder_attention_heads": 12,
"decoder_ffn_dim": 3072,
"decoder_layerdrop": 0.0,
"decoder_layers": 6,
"decoder_start_token_id": 2,
"diversity_penalty": 0.0,
"do_sample": false,
"dropout": 0.1,
"early_stopping": true,
"encoder_attention_heads": 12,
"encoder_ffn_dim": 3072,
"encoder_layerdrop": 0.0,
"encoder_layers": 6,
"encoder_no_repeat_ngram_size": 0,
"eos_token_id": 2,
"exponential_decay_length_penalty": null,
"finetuning_task": null,
"forced_bos_token_id": 0,
"forced_eos_token_id": 2,
"gradient_checkpointing": false,
"id2label": {
"0": "LABEL_0",
"1": "LABEL_1",
"2": "LABEL_2"
},
"init_std": 0.02,
"is_decoder": false,
"is_encoder_decoder": true,
"label2id": {
"LABEL_0": 0,
"LABEL_1": 1,
"LABEL_2": 2
},
"length_penalty": 1.0,
"max_length": 20,
"max_position_embeddings": 1024,
"min_length": 0,
"model_type": "florence2_language",
"no_repeat_ngram_size": 3,
"normalize_before": false,
"num_beam_groups": 1,
"num_beams": 3,
"num_hidden_layers": 6,
"num_return_sequences": 1,
"output_attentions": false,
"output_hidden_states": false,
"output_scores": false,
"pad_token_id": 1,
"prefix": null,
"problem_type": null,
"pruned_heads": {},
"remove_invalid_values": false,
"repetition_penalty": 1.0,
"return_dict": true,
"return_dict_in_generate": false,
"scale_embedding": false,
"sep_token_id": null,
"suppress_tokens": null,
"task_specific_params": null,
"temperature": 1.0,
"tf_legacy_loss": false,
"tie_encoder_decoder": false,
"tie_word_embeddings": true,
"tokenizer_class": null,
"top_k": 50,
"top_p": 1.0,
"torch_dtype": null,
"torchscript": false,
"typical_p": 1.0,
"use_bfloat16": false,
"use_cache": true,
"vocab_size": 51289
},
"torch_dtype": "float32",
"transformers_version": "4.46.1",
"vision_config": {
"_attn_implementation_autoset": false,
"_name_or_path": "",
"add_cross_attention": false,
"architectures": null,
"bad_words_ids": null,
"begin_suppress_tokens": null,
"bos_token_id": null,
"chunk_size_feed_forward": 0,
"cross_attention_hidden_size": null,
"decoder_start_token_id": null,
"depths": [
1,
1,
9,
1
],
"dim_embed": [
128,
256,
512,
1024
],
"diversity_penalty": 0.0,
"do_sample": false,
"drop_path_rate": 0.1,
"early_stopping": false,
"enable_checkpoint": false,
"encoder_no_repeat_ngram_size": 0,
"eos_token_id": null,
"exponential_decay_length_penalty": null,
"finetuning_task": null,
"forced_bos_token_id": null,
"forced_eos_token_id": null,
"id2label": {
"0": "LABEL_0",
"1": "LABEL_1"
},
"image_feature_source": [
"spatial_avg_pool",
"temporal_avg_pool"
],
"image_pos_embed": {
"max_pos_embeddings": 50,
"type": "learned_abs_2d"
},
"is_decoder": false,
"is_encoder_decoder": false,
"label2id": {
"LABEL_0": 0,
"LABEL_1": 1
},
"length_penalty": 1.0,
"max_length": 20,
"min_length": 0,
"model_type": "davit",
"no_repeat_ngram_size": 0,
"num_beam_groups": 1,
"num_beams": 1,
"num_groups": [
4,
8,
16,
32
],
"num_heads": [
4,
8,
16,
32
],
"num_return_sequences": 1,
"output_attentions": false,
"output_hidden_states": false,
"output_scores": false,
"pad_token_id": null,
"patch_padding": [
3,
1,
1,
1
],
"patch_prenorm": [
false,
true,
true,
true
],
"patch_size": [
7,
3,
3,
3
],
"patch_stride": [
4,
2,
2,
2
],
"prefix": null,
"problem_type": null,
"projection_dim": 768,
"pruned_heads": {},
"remove_invalid_values": false,
"repetition_penalty": 1.0,
"return_dict": true,
"return_dict_in_generate": false,
"sep_token_id": null,
"suppress_tokens": null,
"task_specific_params": null,
"temperature": 1.0,
"tf_legacy_loss": false,
"tie_encoder_decoder": false,
"tie_word_embeddings": true,
"tokenizer_class": null,
"top_k": 50,
"top_p": 1.0,
"torch_dtype": null,
"torchscript": false,
"typical_p": 1.0,
"use_bfloat16": false,
"visual_temporal_embedding": {
"max_temporal_embeddings": 100,
"type": "COSINE"
},
"window_size": 12
},
"vocab_size": 51289
}

View File

@@ -0,0 +1,13 @@
{
"_from_model_config": true,
"bos_token_id": 2,
"decoder_start_token_id": 2,
"early_stopping": true,
"eos_token_id": 1,
"forced_bos_token_id": 0,
"forced_eos_token_id": 2,
"no_repeat_ngram_size": 3,
"num_beams": 3,
"pad_token_id": 0,
"transformers_version": "4.46.1"
}

View File

@@ -0,0 +1,129 @@
backbone:
- - -1
- 1
- Conv
- - 64
- 3
- 2
- - -1
- 1
- Conv
- - 128
- 3
- 2
- - -1
- 3
- C2f
- - 128
- true
- - -1
- 1
- Conv
- - 256
- 3
- 2
- - -1
- 6
- C2f
- - 256
- true
- - -1
- 1
- Conv
- - 512
- 3
- 2
- - -1
- 6
- C2f
- - 512
- true
- - -1
- 1
- Conv
- - 1024
- 3
- 2
- - -1
- 3
- C2f
- - 1024
- true
- - -1
- 1
- SPPF
- - 1024
- 5
ch: 3
depth_multiple: 0.33
head:
- - -1
- 1
- nn.Upsample
- - None
- 2
- nearest
- - - -1
- 6
- 1
- Concat
- - 1
- - -1
- 3
- C2f
- - 512
- - -1
- 1
- nn.Upsample
- - None
- 2
- nearest
- - - -1
- 4
- 1
- Concat
- - 1
- - -1
- 3
- C2f
- - 256
- - -1
- 1
- Conv
- - 256
- 3
- 2
- - - -1
- 12
- 1
- Concat
- - 1
- - -1
- 3
- C2f
- - 512
- - -1
- 1
- Conv
- - 512
- 3
- 2
- - - -1
- 9
- 1
- Concat
- - 1
- - -1
- 3
- C2f
- - 1024
- - - 15
- 18
- 21
- 1
- Detect
- - nc
nc: 1
scale: ''
width_multiple: 0.25
yaml_file: weights/icon_detect_v1_5/model.yaml

View File

@@ -0,0 +1,107 @@
train_args:
agnostic_nms: false
amp: true
augment: false
auto_augment: randaugment
batch: 64
box: 7.5
cache: false
cfg: null
classes: null
close_mosaic: 10
cls: 0.5
conf: null
copy_paste: 0.0
cos_lr: false
crop_fraction: 1.0
degrees: 0.0
deterministic: true
device:
- 0
- 1
- 2
- 3
dfl: 1.5
dnn: false
dropout: 0.0
dynamic: false
embed: null
epochs: 20
erasing: 0.4
exist_ok: false
fliplr: 0.5
flipud: 0.0
format: torchscript
fraction: 1.0
freeze: null
half: false
hsv_h: 0.015
hsv_s: 0.7
hsv_v: 0.4
imgsz: 1280
int8: false
iou: 0.7
keras: false
kobj: 1.0
label_smoothing: 0.0
line_width: null
lr0: 0.01
lrf: 0.01
mask_ratio: 4
max_det: 300
mixup: 0.0
mode: train
model: yolov8n.pt
momentum: 0.937
mosaic: 0.0
multi_scale: false
nbs: 64
nms: false
opset: null
optimize: false
optimizer: auto
overlap_mask: true
patience: 100
perspective: 0.0
plots: true
pose: 12.0
pretrained: true
profile: false
project: null
rect: false
resume: false
retina_masks: false
save: true
save_conf: false
save_crop: false
save_frames: false
save_hybrid: false
save_json: false
save_period: -1
save_txt: false
scale: 0.5
seed: 0
shear: 0.0
show: false
show_boxes: true
show_conf: true
show_labels: true
simplify: false
single_cls: false
source: null
split: val
stream_buffer: false
task: detect
time: null
tracker: botsort.yaml
translate: 0.1
val: true
verbose: true
vid_stride: 1
visualize: false
warmup_bias_lr: 0.0
warmup_epochs: 3.0
warmup_momentum: 0.8
weight_decay: 0.0005
workers: 8
workspace: 4