Naming conventions

This commit is contained in:
Thomas Dhome-Casanova
2025-02-04 11:43:36 -08:00
parent 31d7b1d096
commit fe84a35292
39 changed files with 35 additions and 33 deletions

View File

@@ -0,0 +1,162 @@
"""
Agentic sampling loop that calls the Anthropic API and local implenmentation of anthropic-defined computer use tools.
"""
import asyncio
import platform
from collections.abc import Callable
from datetime import datetime
from enum import StrEnum
from typing import Any, cast
from anthropic import Anthropic, AnthropicBedrock, AnthropicVertex, APIResponse
from anthropic.types import (
ToolResultBlockParam,
)
from anthropic.types.beta import (
BetaContentBlock,
BetaContentBlockParam,
BetaImageBlockParam,
BetaMessage,
BetaMessageParam,
BetaTextBlockParam,
BetaToolResultBlockParam,
)
from anthropic.types import TextBlock
from anthropic.types.beta import BetaMessage, BetaTextBlock, BetaToolUseBlock
from tools import ComputerTool, ToolCollection, ToolResult
from PIL import Image
from io import BytesIO
import gradio as gr
from typing import Dict
BETA_FLAG = "computer-use-2024-10-22"
class APIProvider(StrEnum):
ANTHROPIC = "anthropic"
BEDROCK = "bedrock"
VERTEX = "vertex"
SYSTEM_PROMPT = f"""<SYSTEM_CAPABILITY>
* You are utilizing a Windows system with internet access.
* The current date is {datetime.today().strftime('%A, %B %d, %Y')}.
</SYSTEM_CAPABILITY>
"""
class AnthropicActor:
def __init__(
self,
model: str,
provider: APIProvider,
api_key: str,
api_response_callback: Callable[[APIResponse[BetaMessage]], None],
max_tokens: int = 4096,
only_n_most_recent_images: int | None = None,
print_usage: bool = True,
):
self.model = model
self.provider = provider
self.api_key = api_key
self.api_response_callback = api_response_callback
self.max_tokens = max_tokens
self.only_n_most_recent_images = only_n_most_recent_images
self.tool_collection = ToolCollection(ComputerTool())
self.system = SYSTEM_PROMPT
self.total_token_usage = 0
self.total_cost = 0
self.print_usage = print_usage
# Instantiate the appropriate API client based on the provider
if provider == APIProvider.ANTHROPIC:
self.client = Anthropic(api_key=api_key)
elif provider == APIProvider.VERTEX:
self.client = AnthropicVertex()
elif provider == APIProvider.BEDROCK:
self.client = AnthropicBedrock()
def __call__(
self,
*,
messages: list[BetaMessageParam]
):
"""
Generate a response given history messages.
"""
if self.only_n_most_recent_images:
_maybe_filter_to_n_most_recent_images(messages, self.only_n_most_recent_images)
# Call the API synchronously
raw_response = self.client.beta.messages.with_raw_response.create(
max_tokens=self.max_tokens,
messages=messages,
model=self.model,
system=self.system,
tools=self.tool_collection.to_params(),
betas=["computer-use-2024-10-22"],
)
self.api_response_callback(cast(APIResponse[BetaMessage], raw_response))
response = raw_response.parse()
print(f"AnthropicActor response: {response}")
self.total_token_usage += response.usage.input_tokens + response.usage.output_tokens
self.total_cost += (response.usage.input_tokens * 3 / 1000000 + response.usage.output_tokens * 15 / 1000000)
if self.print_usage:
print(f"Claude total token usage so far: {self.total_token_usage}, total cost so far: $USD{self.total_cost}")
return response
def _maybe_filter_to_n_most_recent_images(
messages: list[BetaMessageParam],
images_to_keep: int,
min_removal_threshold: int = 10,
):
"""
With the assumption that images are screenshots that are of diminishing value as
the conversation progresses, remove all but the final `images_to_keep` tool_result
images in place, with a chunk of min_removal_threshold to reduce the amount we
break the implicit prompt cache.
"""
if images_to_keep is None:
return messages
tool_result_blocks = cast(
list[ToolResultBlockParam],
[
item
for message in messages
for item in (
message["content"] if isinstance(message["content"], list) else []
)
if isinstance(item, dict) and item.get("type") == "tool_result"
],
)
total_images = sum(
1
for tool_result in tool_result_blocks
for content in tool_result.get("content", [])
if isinstance(content, dict) and content.get("type") == "image"
)
images_to_remove = total_images - images_to_keep
# for better cache behavior, we want to remove in chunks
images_to_remove -= images_to_remove % min_removal_threshold
for tool_result in tool_result_blocks:
if isinstance(tool_result.get("content"), list):
new_content = []
for content in tool_result.get("content", []):
if isinstance(content, dict) and content.get("type") == "image":
if images_to_remove > 0:
images_to_remove -= 1
continue
new_content.append(content)
tool_result["content"] = new_content

View File

@@ -0,0 +1,59 @@
from groq import Groq
import os
from .utils import is_image_path
def run_groq_interleaved(messages: list, system: str, model_name: str, api_key: str, max_tokens=256, temperature=0.6):
"""
Run a chat completion through Groq's API, ignoring any images in the messages.
"""
api_key = api_key or os.environ.get("GROQ_API_KEY")
if not api_key:
raise ValueError("GROQ_API_KEY is not set")
client = Groq(api_key=api_key)
# avoid using system messages for R1
final_messages = [{"role": "user", "content": system}]
if isinstance(messages, list):
for item in messages:
if isinstance(item, dict):
# For dict items, concatenate all text content, ignoring images
text_contents = []
for cnt in item["content"]:
if isinstance(cnt, str):
if not is_image_path(cnt): # Skip image paths
text_contents.append(cnt)
else:
text_contents.append(str(cnt))
if text_contents: # Only add if there's text content
message = {"role": "user", "content": " ".join(text_contents)}
final_messages.append(message)
else: # str
message = {"role": "user", "content": item}
final_messages.append(message)
elif isinstance(messages, str):
final_messages.append({"role": "user", "content": messages})
try:
completion = client.chat.completions.create(
model="deepseek-r1-distill-llama-70b",
messages=final_messages,
temperature=0.6,
max_completion_tokens=max_tokens,
top_p=0.95,
stream=False,
reasoning_format="raw"
)
response = completion.choices[0].message.content
final_answer = response.split('</think>\n')[-1] if '</think>' in response else response
final_answer = final_answer.replace("<output>", "").replace("</output>", "")
token_usage = completion.usage.total_tokens
return final_answer, token_usage
except Exception as e:
print(f"Error in interleaved Groq: {e}")
return str(e), 0

View File

@@ -0,0 +1,62 @@
import os
import logging
import base64
import requests
from .utils import is_image_path, encode_image
def run_oai_interleaved(messages: list, system: str, model_name: str, api_key: str, max_tokens=256, temperature=0, provider_base_url: str = "https://api.openai.com/v1"):
headers = {"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}"}
final_messages = [{"role": "system", "content": system}]
if type(messages) == list:
for item in messages:
contents = []
if isinstance(item, dict):
for cnt in item["content"]:
if isinstance(cnt, str):
if is_image_path(cnt) and 'o3-mini' not in model_name:
# 03 mini does not support images
base64_image = encode_image(cnt)
content = {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}}
else:
content = {"type": "text", "text": cnt}
else:
# in this case it is a text block from anthropic
content = {"type": "text", "text": str(cnt)}
contents.append(content)
message = {"role": 'user', "content": contents}
else: # str
contents.append({"type": "text", "text": item})
message = {"role": "user", "content": contents}
final_messages.append(message)
elif isinstance(messages, str):
final_messages = [{"role": "user", "content": messages}]
payload = {
"model": model_name,
"messages": final_messages,
}
if 'o1' in model_name or 'o3-mini' in model_name:
payload['reasoning_effort'] = 'low'
payload['max_completion_tokens'] = max_tokens
else:
payload['max_tokens'] = max_tokens
response = requests.post(
f"{provider_base_url}/chat/completions", headers=headers, json=payload
)
try:
text = response.json()['choices'][0]['message']['content']
token_usage = int(response.json()['usage']['total_tokens'])
return text, token_usage
except Exception as e:
print(f"Error in interleaved openAI: {e}. This may due to your invalid API key. Please check the response: {response.json()} ")
return response.json()

View File

@@ -0,0 +1,44 @@
import requests
import base64
from pathlib import Path
from tools.screen_capture import get_screenshot
from agent.llm_utils.utils import encode_image
OUTPUT_DIR = "./tmp/outputs"
class OmniParserClient:
def __init__(self,
url: str) -> None:
self.url = url
def __call__(self,):
screenshot, screenshot_path = get_screenshot()
screenshot_path = str(screenshot_path)
image_base64 = encode_image(screenshot_path)
response = requests.post(self.url, json={"base64_image": image_base64})
response_json = response.json()
print('omniparser latency:', response_json['latency'])
som_image_data = base64.b64decode(response_json['som_image_base64'])
screenshot_path_uuid = Path(screenshot_path).stem.replace("screenshot_", "")
som_screenshot_path = f"{OUTPUT_DIR}/screenshot_som_{screenshot_path_uuid}.png"
with open(som_screenshot_path, "wb") as f:
f.write(som_image_data)
response_json['width'] = screenshot.size[0]
response_json['height'] = screenshot.size[1]
response_json['original_screenshot_base64'] = image_base64
response_json['screenshot_uuid'] = screenshot_path_uuid
response_json = self.reformat_messages(response_json)
return response_json
def reformat_messages(self, response_json: dict):
screen_info = ""
for idx, element in enumerate(response_json["parsed_content_list"]):
element['idx'] = idx
if element['type'] == 'text':
screen_info += f'ID: {idx}, Text: {element["content"]}\n'
elif element['type'] == 'icon':
screen_info += f'ID: {idx}, Icon: {element["content"]}\n'
response_json['screen_info'] = screen_info
return response_json

View File

@@ -0,0 +1,13 @@
import base64
def is_image_path(text):
image_extensions = (".jpg", ".jpeg", ".png", ".gif", ".bmp", ".tiff", ".tif")
if text.endswith(image_extensions):
return True
else:
return False
def encode_image(image_path):
"""Encode image file to base64."""
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode("utf-8")

View File

@@ -0,0 +1,338 @@
import json
from collections.abc import Callable
from typing import cast, Callable
import uuid
from PIL import Image, ImageDraw
import base64
from io import BytesIO
from anthropic import APIResponse
from anthropic.types import ToolResultBlockParam
from anthropic.types.beta import BetaMessage, BetaTextBlock, BetaToolUseBlock, BetaMessageParam, BetaUsage
from agent.llm_utils.oaiclient import run_oai_interleaved
from agent.llm_utils.groqclient import run_groq_interleaved
from agent.llm_utils.utils import is_image_path
import time
import re
OUTPUT_DIR = "./tmp/outputs"
def extract_data(input_string, data_type):
# Regular expression to extract content starting from '```python' until the end if there are no closing backticks
pattern = f"```{data_type}" + r"(.*?)(```|$)"
# Extract content
# re.DOTALL allows '.' to match newlines as well
matches = re.findall(pattern, input_string, re.DOTALL)
# Return the first match if exists, trimming whitespace and ignoring potential closing backticks
return matches[0][0].strip() if matches else input_string
class VLMAgent:
def __init__(
self,
model: str,
provider: str,
api_key: str,
output_callback: Callable,
api_response_callback: Callable,
max_tokens: int = 4096,
only_n_most_recent_images: int | None = None,
print_usage: bool = True,
):
if model == "omniparser + gpt-4o":
self.model = "gpt-4o-2024-11-20"
elif model == "omniparser + R1":
self.model = "deepseek-r1-distill-llama-70b"
elif model == "omniparser + qwen2.5vl":
self.model = "qwen2.5-vl-72b-instruct"
elif model == "omniparser + o1":
self.model = "o1"
elif model == "omniparser + o3-mini":
self.model = "o3-mini"
else:
raise ValueError(f"Model {model} not supported")
self.provider = provider
self.api_key = api_key
self.api_response_callback = api_response_callback
self.max_tokens = max_tokens
self.only_n_most_recent_images = only_n_most_recent_images
self.output_callback = output_callback
self.print_usage = print_usage
self.total_token_usage = 0
self.total_cost = 0
self.step_count = 0
self.system = ''
def __call__(self, messages: list, parsed_screen: list[str, list, dict]):
self.step_count += 1
image_base64 = parsed_screen['original_screenshot_base64']
latency_omniparser = parsed_screen['latency']
self.output_callback(f'-- Step {self.step_count}: --', sender="bot")
screen_info = str(parsed_screen['screen_info'])
screenshot_uuid = parsed_screen['screenshot_uuid']
screen_width, screen_height = parsed_screen['width'], parsed_screen['height']
boxids_and_labels = parsed_screen["screen_info"]
system = self._get_system_prompt(boxids_and_labels)
# drop looping actions msg, byte image etc
planner_messages = messages
_remove_som_images(planner_messages)
_maybe_filter_to_n_most_recent_images(planner_messages, self.only_n_most_recent_images)
if isinstance(planner_messages[-1], dict):
if not isinstance(planner_messages[-1]["content"], list):
planner_messages[-1]["content"] = [planner_messages[-1]["content"]]
planner_messages[-1]["content"].append(f"{OUTPUT_DIR}/screenshot_{screenshot_uuid}.png")
planner_messages[-1]["content"].append(f"{OUTPUT_DIR}/screenshot_som_{screenshot_uuid}.png")
start = time.time()
if "gpt" in self.model or "o1" in self.model or "o3-mini" in self.model:
vlm_response, token_usage = run_oai_interleaved(
messages=planner_messages,
system=system,
model_name=self.model,
api_key=self.api_key,
max_tokens=self.max_tokens,
provider_base_url="https://api.openai.com/v1",
temperature=0,
)
print(f"oai token usage: {token_usage}")
self.total_token_usage += token_usage
if 'gpt' in self.model:
self.total_cost += (token_usage * 2.5 / 1000000) # https://openai.com/api/pricing/
elif 'o1' in self.model:
self.total_cost += (token_usage * 15 / 1000000) # https://openai.com/api/pricing/
elif 'o3-mini' in self.model:
self.total_cost += (token_usage * 1.1 / 1000000) # https://openai.com/api/pricing/
elif "r1" in self.model:
vlm_response, token_usage = run_groq_interleaved(
messages=planner_messages,
system=system,
model_name=self.model,
api_key=self.api_key,
max_tokens=self.max_tokens,
)
print(f"groq token usage: {token_usage}")
self.total_token_usage += token_usage
self.total_cost += (token_usage * 0.99 / 1000000)
elif "qwen" in self.model:
vlm_response, token_usage = run_oai_interleaved(
messages=planner_messages,
system=system,
model_name=self.model,
api_key=self.api_key,
max_tokens=min(2048, self.max_tokens),
provider_base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
temperature=0,
)
print(f"qwen token usage: {token_usage}")
self.total_token_usage += token_usage
self.total_cost += (token_usage * 2.2 / 1000000) # https://help.aliyun.com/zh/model-studio/getting-started/models?spm=a2c4g.11186623.0.0.74b04823CGnPv7#fe96cfb1a422a
else:
raise ValueError(f"Model {self.model} not supported")
latency_vlm = time.time() - start
self.output_callback(f"LLM: {latency_vlm:.2f}s, OmniParser: {latency_omniparser:.2f}s", sender="bot")
print(f"{vlm_response}")
if self.print_usage:
print(f"Total token so far: {self.total_token_usage}. Total cost so far: $USD{self.total_cost:.5f}")
vlm_response_json = extract_data(vlm_response, "json")
vlm_response_json = json.loads(vlm_response_json)
img_to_show_base64 = parsed_screen["som_image_base64"]
if "Box ID" in vlm_response_json:
bbox = parsed_screen["parsed_content_list"][int(vlm_response_json["Box ID"])]["bbox"]
vlm_response_json["box_centroid_coordinate"] = [int((bbox[0] + bbox[2]) / 2 * screen_width), int((bbox[1] + bbox[3]) / 2 * screen_height)]
img_to_show_data = base64.b64decode(img_to_show_base64)
img_to_show = Image.open(BytesIO(img_to_show_data))
draw = ImageDraw.Draw(img_to_show)
x, y = vlm_response_json["box_centroid_coordinate"]
radius = 10
draw.ellipse((x - radius, y - radius, x + radius, y + radius), fill='red')
draw.ellipse((x - radius*3, y - radius*3, x + radius*3, y + radius*3), fill=None, outline='red', width=2)
buffered = BytesIO()
img_to_show.save(buffered, format="PNG")
img_to_show_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
self.output_callback(f'<img src="data:image/png;base64,{img_to_show_base64}">', sender="bot")
self.output_callback(
f'<details>'
f' <summary>Parsed Screen elemetns by OmniParser</summary>'
f' <pre>{screen_info}</pre>'
f'</details>',
sender="bot"
)
vlm_plan_str = ""
for key, value in vlm_response_json.items():
if key == "Reasoning":
vlm_plan_str += f'{value}'
else:
vlm_plan_str += f'\n{key}: {value}'
# construct the response so that anthropicExcutor can execute the tool
response_content = [BetaTextBlock(text=vlm_plan_str, type='text')]
if 'box_centroid_coordinate' in vlm_response_json:
move_cursor_block = BetaToolUseBlock(id=f'toolu_{uuid.uuid4()}',
input={'action': 'mouse_move', 'coordinate': vlm_response_json["box_centroid_coordinate"]},
name='computer', type='tool_use')
response_content.append(move_cursor_block)
if vlm_response_json["Next Action"] == "None":
print("Task paused/completed.")
elif vlm_response_json["Next Action"] == "type":
sim_content_block = BetaToolUseBlock(id=f'toolu_{uuid.uuid4()}',
input={'action': vlm_response_json["Next Action"], 'text': vlm_response_json["value"]},
name='computer', type='tool_use')
response_content.append(sim_content_block)
else:
sim_content_block = BetaToolUseBlock(id=f'toolu_{uuid.uuid4()}',
input={'action': vlm_response_json["Next Action"]},
name='computer', type='tool_use')
response_content.append(sim_content_block)
response_message = BetaMessage(id=f'toolu_{uuid.uuid4()}', content=response_content, model='', role='assistant', type='message', stop_reason='tool_use', usage=BetaUsage(input_tokens=0, output_tokens=0))
return response_message, vlm_response_json
def _api_response_callback(self, response: APIResponse):
self.api_response_callback(response)
def _get_system_prompt(self, screen_info: str = ""):
main_section = f"""
You are using a Windows device.
You are able to use a mouse and keyboard to interact with the computer based on the given task and screenshot.
You can only interact with the desktop GUI (no terminal or application menu access).
You may be given some history plan and actions, this is the response from the previous loop.
You should carefully consider your plan base on the task, screenshot, and history actions.
Here is the list of all detected bounding boxes by IDs on the screen and their description:{screen_info}
Your available "Next Action" only include:
- type: types a string of text.
- left_click: move mouse to box id and left clicks.
- right_click: move mouse to box id and right clicks.
- double_click: move mouse to box id and double clicks.
- hover: move mouse to box id.
- scroll_up: scrolls the screen up.
- scroll_down: scrolls the screen down.
- wait: waits for 1 second for the device to load or respond.
Based on the visual information from the screenshot image and the detected bounding boxes, please determine the next action, the Box ID you should operate on (if action is not 'type', 'hover', 'scroll_up', 'scroll_down', 'wait'), and the value (if the action is 'type') in order to complete the task.
Output format:
```json
{{
"Reasoning": str, # describe what is in the current screen, taking into account the history, then describe your step-by-step thoughts on how to achieve the task, choose one action from available actions at a time.
"Next Action": "action_type, action description" | "None" # one action at a time, describe it in short and precisely.
"Box ID": n,
"value": "xxx" # only provide value field if the action is type, else don't include value key
}}
```
One Example:
```json
{{
"Reasoning": "The current screen shows google result of amazon, in previous action I have searched amazon on google. Then I need to click on the first search results to go to amazon.com.",
"Next Action": "left_click",
"Box ID": m
}}
```
Another Example:
```json
{{
"Reasoning": "The current screen shows the front page of amazon. There is no previous action. Therefore I need to type "Apple watch" in the search bar.",
"Next Action": "type",
"Box ID": n,
"value": "Apple watch"
}}
```
IMPORTANT NOTES:
1. You should only give a single action at a time.
"""
thinking_model = "r1" in self.model
if not thinking_model:
main_section += """
2. You should give an analysis to the current screen, and reflect on what has been done by looking at the history, then describe your step-by-step thoughts on how to achieve the task.
"""
else:
main_section += """
2. In <think> XML tags give an analysis to the current screen, and reflect on what has been done by looking at the history, then describe your step-by-step thoughts on how to achieve the task. In <output> XML tags put the next action prediction JSON.
"""
main_section += """
3. Attach the next action prediction in the "Next Action".
4. You should not include other actions, such as keyboard shortcuts.
5. When the task is completed, don't complete additional actions. You should say "Next Action": "None" in the json field.
"""
return main_section
def _remove_som_images(messages):
for msg in messages:
msg_content = msg["content"]
if isinstance(msg_content, list):
msg["content"] = [
cnt for cnt in msg_content
if not (isinstance(cnt, str) and 'som' in cnt and is_image_path(cnt))
]
def _maybe_filter_to_n_most_recent_images(
messages: list[BetaMessageParam],
images_to_keep: int,
min_removal_threshold: int = 10,
):
"""
With the assumption that images are screenshots that are of diminishing value as
the conversation progresses, remove all but the final `images_to_keep` tool_result
images in place
"""
if images_to_keep is None:
return messages
total_images = 0
for msg in messages:
for cnt in msg.get("content", []):
if isinstance(cnt, str) and is_image_path(cnt):
total_images += 1
elif isinstance(cnt, dict) and cnt.get("type") == "tool_result":
for content in cnt.get("content", []):
if isinstance(content, dict) and content.get("type") == "image":
total_images += 1
images_to_remove = total_images - images_to_keep
for msg in messages:
msg_content = msg["content"]
if isinstance(msg_content, list):
new_content = []
for cnt in msg_content:
# Remove images from SOM or screenshot as needed
if isinstance(cnt, str) and is_image_path(cnt):
if images_to_remove > 0:
images_to_remove -= 1
continue
# VLM shouldn't use anthropic screenshot tool so shouldn't have these but in case it does, remove as needed
elif isinstance(cnt, dict) and cnt.get("type") == "tool_result":
new_tool_result_content = []
for tool_result_entry in cnt.get("content", []):
if isinstance(tool_result_entry, dict) and tool_result_entry.get("type") == "image":
if images_to_remove > 0:
images_to_remove -= 1
continue
new_tool_result_content.append(tool_result_entry)
cnt["content"] = new_tool_result_content
# Append fixed content to current message's content list
new_content.append(cnt)
msg["content"] = new_content