docker demo, migration, speedup inference using cv2

This commit is contained in:
yadonglu
2025-01-04 20:06:33 -08:00
parent d0c163cd02
commit b9d3cb715b
36 changed files with 5842 additions and 2456 deletions

16
.gitignore vendored
View File

@@ -1,8 +1,8 @@
weights/icon_caption_blip2 weights/icon_caption_blip2
weights/icon_caption_florence weights/icon_caption_florence
weights/icon_detect/ weights/icon_detect/
weights/icon_detect_v1_5/ weights/icon_detect_v1_5/
weights/icon_detect_v1_5_2/ weights/icon_detect_v1_5_2/
.gradio .gradio
__pycache__/ __pycache__/
debug.ipynb debug.ipynb

203
Dockerfile Normal file
View File

@@ -0,0 +1,203 @@
# Dockerfile for OmniParser with GPU support and OpenGL libraries
#
# This Dockerfile is intended to create an environment with NVIDIA CUDA
# support and the necessary dependencies to run the OmniParser project.
# The configuration is designed to support applications that rely on
# Python 3.12, OpenCV, Hugging Face transformers, and Gradio. Additionally,
# it includes steps to pull large files from Git LFS and a script to
# convert model weights from .safetensor to .pt format. The container
# runs a Gradio server by default, exposed on port 7861.
#
# Base image: nvidia/cuda:12.3.1-devel-ubuntu22.04
#
# Key features:
# - System dependencies for OpenGL to support graphical libraries.
# - Miniconda for Python 3.12, allowing for environment management.
# - Git Large File Storage (LFS) setup for handling large model files.
# - Requirement file installation, including specific versions of
# OpenCV and Hugging Face Hub.
# - Entrypoint script execution with Gradio server configuration for
# external access.
# If it is gpu enviroment, use nvidia/cuda:12.3.1-devel-ubuntu22.04, otherwise use ubuntu:22.04
# FROM nvidia/cuda:12.3.1-devel-ubuntu22.04
FROM docker.io/ubuntu:22.04
# Install system dependencies with explicit OpenGL libraries
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
git \
git-lfs \
wget \
libgl1 \
libglib2.0-0 \
libsm6 \
libxext6 \
libxrender1 \
libglu1-mesa \
libglib2.0-0 \
libsm6 \
libxrender1 \
libxext6 \
python3-opencv \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* \
&& git lfs install
# Install Miniconda for Python 3.12
RUN wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh && \
bash miniconda.sh -b -p /opt/conda && \
rm miniconda.sh
ENV PATH="/opt/conda/bin:$PATH"
# Create and activate Conda environment with Python 3.12, and set it as the default
RUN conda create -n omni python=3.12 && \
echo "source activate omni" > ~/.bashrc
ENV CONDA_DEFAULT_ENV=omni
ENV PATH="/opt/conda/envs/omni/bin:$PATH"
# Set the working directory in the container
WORKDIR /usr/src/app
# Copy project files and requirements
COPY . .
COPY requirements.txt /usr/src/app/requirements.txt
# Initialize Git LFS and pull LFS files
RUN git lfs install && \
git lfs pull
# Install dependencies from requirements.txt with specific opencv-python-headless version
RUN . /opt/conda/etc/profile.d/conda.sh && conda activate omni && \
# pip uninstall -y opencv-python opencv-python-headless && \
# pip install --no-cache-dir opencv-python-headless==4.8.1.78 && \
pip install -r requirements.txt && \
pip install huggingface_hub
# Run download.py to fetch model weights and convert safetensors to .pt format
# RUN . /opt/conda/etc/profile.d/conda.sh && conda activate omni && \
# python download.py && \
# echo "Contents of weights directory:" && \
# ls -lR weights && \
# python weights/convert_safetensor_to_pt.py
# Expose the default Gradio port
EXPOSE 7861
# Configure Gradio to be accessible externally
ENV GRADIO_SERVER_NAME="0.0.0.0"
# Copy and set permissions for entrypoint script
# COPY entrypoint.sh /usr/src/app/entrypoint.sh
# RUN chmod +x /usr/src/app/entrypoint.sh
# To debug, keep the container running
# CMD ["tail", "-f", "/dev/null"]
################################################################################################
# virtual display related setup --> from anthropic-quickstarts/computer-use-demo/Dockerfile
ENV DEBIAN_FRONTEND=noninteractive
ENV DEBIAN_PRIORITY=high
RUN apt-get update && \
apt-get -y upgrade && \
apt-get -y install \
# UI Requirements
xvfb \
xterm \
xdotool \
scrot \
imagemagick \
sudo \
mutter \
x11vnc \
# Python/pyenv reqs
build-essential \
libssl-dev \
zlib1g-dev \
libbz2-dev \
libreadline-dev \
libsqlite3-dev \
curl \
git \
libncursesw5-dev \
xz-utils \
tk-dev \
libxml2-dev \
libxmlsec1-dev \
libffi-dev \
liblzma-dev \
# Network tools
net-tools \
netcat \
# PPA req
software-properties-common && \
# Userland apps
sudo add-apt-repository ppa:mozillateam/ppa && \
sudo apt-get install -y --no-install-recommends \
libreoffice \
firefox-esr \
x11-apps \
xpdf \
gedit \
xpaint \
tint2 \
galculator \
pcmanfm \
unzip && \
apt-get clean
# Install noVNC
RUN git clone --branch v1.5.0 https://github.com/novnc/noVNC.git /opt/noVNC && \
git clone --branch v0.12.0 https://github.com/novnc/websockify /opt/noVNC/utils/websockify && \
ln -s /opt/noVNC/vnc.html /opt/noVNC/index.html
# setup user
ENV USERNAME=computeruse
ENV HOME=/home/$USERNAME
RUN useradd -m -s /bin/bash -d $HOME $USERNAME
RUN echo "${USERNAME} ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
USER computeruse
WORKDIR $HOME
# setup python
RUN git clone https://github.com/pyenv/pyenv.git ~/.pyenv && \
cd ~/.pyenv && src/configure && make -C src && cd .. && \
echo 'export PYENV_ROOT="$HOME/.pyenv"' >> ~/.bashrc && \
echo 'command -v pyenv >/dev/null || export PATH="$PYENV_ROOT/bin:$PATH"' >> ~/.bashrc && \
echo 'eval "$(pyenv init -)"' >> ~/.bashrc
ENV PYENV_ROOT="$HOME/.pyenv"
ENV PATH="$PYENV_ROOT/bin:$PATH"
ENV PYENV_VERSION_MAJOR=3
ENV PYENV_VERSION_MINOR=11
ENV PYENV_VERSION_PATCH=6
ENV PYENV_VERSION=$PYENV_VERSION_MAJOR.$PYENV_VERSION_MINOR.$PYENV_VERSION_PATCH
RUN eval "$(pyenv init -)" && \
pyenv install $PYENV_VERSION && \
pyenv global $PYENV_VERSION && \
pyenv rehash
ENV PATH="$HOME/.pyenv/shims:$HOME/.pyenv/bin:$PATH"
RUN python -m pip install --upgrade pip==23.1.2 setuptools==58.0.4 wheel==0.40.0 && \
python -m pip config set global.disable-pip-version-check true
# only reinstall if requirements.txt changes
# COPY --chown=$USERNAME:$USERNAME computer_use_demo/requirements.txt $HOME/computer_use_demo/requirements.txt
# RUN python -m pip install -r $HOME/computer_use_demo/requirements.txt
# setup desktop env & app
# COPY --chown=$USERNAME:$USERNAME image/ $HOME
# COPY --chown=$USERNAME:$USERNAME computer_use_demo/ $HOME/computer_use_demo/
ARG DISPLAY_NUM=1
ARG HEIGHT=768
ARG WIDTH=1024
ENV DISPLAY_NUM=$DISPLAY_NUM
ENV HEIGHT=$HEIGHT
ENV WIDTH=$WIDTH
# Set the entrypoint
# ENTRYPOINT ["/usr/src/app/entrypoint.sh"]
# sudo docker build . -t omniparser-x-demo:local # manually build the docker image (optional)

790
LICENSE
View File

@@ -1,395 +1,395 @@
Attribution 4.0 International Attribution 4.0 International
======================================================================= =======================================================================
Creative Commons Corporation ("Creative Commons") is not a law firm and Creative Commons Corporation ("Creative Commons") is not a law firm and
does not provide legal services or legal advice. Distribution of does not provide legal services or legal advice. Distribution of
Creative Commons public licenses does not create a lawyer-client or Creative Commons public licenses does not create a lawyer-client or
other relationship. Creative Commons makes its licenses and related other relationship. Creative Commons makes its licenses and related
information available on an "as-is" basis. Creative Commons gives no information available on an "as-is" basis. Creative Commons gives no
warranties regarding its licenses, any material licensed under their warranties regarding its licenses, any material licensed under their
terms and conditions, or any related information. Creative Commons terms and conditions, or any related information. Creative Commons
disclaims all liability for damages resulting from their use to the disclaims all liability for damages resulting from their use to the
fullest extent possible. fullest extent possible.
Using Creative Commons Public Licenses Using Creative Commons Public Licenses
Creative Commons public licenses provide a standard set of terms and Creative Commons public licenses provide a standard set of terms and
conditions that creators and other rights holders may use to share conditions that creators and other rights holders may use to share
original works of authorship and other material subject to copyright original works of authorship and other material subject to copyright
and certain other rights specified in the public license below. The and certain other rights specified in the public license below. The
following considerations are for informational purposes only, are not following considerations are for informational purposes only, are not
exhaustive, and do not form part of our licenses. exhaustive, and do not form part of our licenses.
Considerations for licensors: Our public licenses are Considerations for licensors: Our public licenses are
intended for use by those authorized to give the public intended for use by those authorized to give the public
permission to use material in ways otherwise restricted by permission to use material in ways otherwise restricted by
copyright and certain other rights. Our licenses are copyright and certain other rights. Our licenses are
irrevocable. Licensors should read and understand the terms irrevocable. Licensors should read and understand the terms
and conditions of the license they choose before applying it. and conditions of the license they choose before applying it.
Licensors should also secure all rights necessary before Licensors should also secure all rights necessary before
applying our licenses so that the public can reuse the applying our licenses so that the public can reuse the
material as expected. Licensors should clearly mark any material as expected. Licensors should clearly mark any
material not subject to the license. This includes other CC- material not subject to the license. This includes other CC-
licensed material, or material used under an exception or licensed material, or material used under an exception or
limitation to copyright. More considerations for licensors: limitation to copyright. More considerations for licensors:
wiki.creativecommons.org/Considerations_for_licensors wiki.creativecommons.org/Considerations_for_licensors
Considerations for the public: By using one of our public Considerations for the public: By using one of our public
licenses, a licensor grants the public permission to use the licenses, a licensor grants the public permission to use the
licensed material under specified terms and conditions. If licensed material under specified terms and conditions. If
the licensor's permission is not necessary for any reason--for the licensor's permission is not necessary for any reason--for
example, because of any applicable exception or limitation to example, because of any applicable exception or limitation to
copyright--then that use is not regulated by the license. Our copyright--then that use is not regulated by the license. Our
licenses grant only permissions under copyright and certain licenses grant only permissions under copyright and certain
other rights that a licensor has authority to grant. Use of other rights that a licensor has authority to grant. Use of
the licensed material may still be restricted for other the licensed material may still be restricted for other
reasons, including because others have copyright or other reasons, including because others have copyright or other
rights in the material. A licensor may make special requests, rights in the material. A licensor may make special requests,
such as asking that all changes be marked or described. such as asking that all changes be marked or described.
Although not required by our licenses, you are encouraged to Although not required by our licenses, you are encouraged to
respect those requests where reasonable. More_considerations respect those requests where reasonable. More_considerations
for the public: for the public:
wiki.creativecommons.org/Considerations_for_licensees wiki.creativecommons.org/Considerations_for_licensees
======================================================================= =======================================================================
Creative Commons Attribution 4.0 International Public License Creative Commons Attribution 4.0 International Public License
By exercising the Licensed Rights (defined below), You accept and agree By exercising the Licensed Rights (defined below), You accept and agree
to be bound by the terms and conditions of this Creative Commons to be bound by the terms and conditions of this Creative Commons
Attribution 4.0 International Public License ("Public License"). To the Attribution 4.0 International Public License ("Public License"). To the
extent this Public License may be interpreted as a contract, You are extent this Public License may be interpreted as a contract, You are
granted the Licensed Rights in consideration of Your acceptance of granted the Licensed Rights in consideration of Your acceptance of
these terms and conditions, and the Licensor grants You such rights in these terms and conditions, and the Licensor grants You such rights in
consideration of benefits the Licensor receives from making the consideration of benefits the Licensor receives from making the
Licensed Material available under these terms and conditions. Licensed Material available under these terms and conditions.
Section 1 -- Definitions. Section 1 -- Definitions.
a. Adapted Material means material subject to Copyright and Similar a. Adapted Material means material subject to Copyright and Similar
Rights that is derived from or based upon the Licensed Material Rights that is derived from or based upon the Licensed Material
and in which the Licensed Material is translated, altered, and in which the Licensed Material is translated, altered,
arranged, transformed, or otherwise modified in a manner requiring arranged, transformed, or otherwise modified in a manner requiring
permission under the Copyright and Similar Rights held by the permission under the Copyright and Similar Rights held by the
Licensor. For purposes of this Public License, where the Licensed Licensor. For purposes of this Public License, where the Licensed
Material is a musical work, performance, or sound recording, Material is a musical work, performance, or sound recording,
Adapted Material is always produced where the Licensed Material is Adapted Material is always produced where the Licensed Material is
synched in timed relation with a moving image. synched in timed relation with a moving image.
b. Adapter's License means the license You apply to Your Copyright b. Adapter's License means the license You apply to Your Copyright
and Similar Rights in Your contributions to Adapted Material in and Similar Rights in Your contributions to Adapted Material in
accordance with the terms and conditions of this Public License. accordance with the terms and conditions of this Public License.
c. Copyright and Similar Rights means copyright and/or similar rights c. Copyright and Similar Rights means copyright and/or similar rights
closely related to copyright including, without limitation, closely related to copyright including, without limitation,
performance, broadcast, sound recording, and Sui Generis Database performance, broadcast, sound recording, and Sui Generis Database
Rights, without regard to how the rights are labeled or Rights, without regard to how the rights are labeled or
categorized. For purposes of this Public License, the rights categorized. For purposes of this Public License, the rights
specified in Section 2(b)(1)-(2) are not Copyright and Similar specified in Section 2(b)(1)-(2) are not Copyright and Similar
Rights. Rights.
d. Effective Technological Measures means those measures that, in the d. Effective Technological Measures means those measures that, in the
absence of proper authority, may not be circumvented under laws absence of proper authority, may not be circumvented under laws
fulfilling obligations under Article 11 of the WIPO Copyright fulfilling obligations under Article 11 of the WIPO Copyright
Treaty adopted on December 20, 1996, and/or similar international Treaty adopted on December 20, 1996, and/or similar international
agreements. agreements.
e. Exceptions and Limitations means fair use, fair dealing, and/or e. Exceptions and Limitations means fair use, fair dealing, and/or
any other exception or limitation to Copyright and Similar Rights any other exception or limitation to Copyright and Similar Rights
that applies to Your use of the Licensed Material. that applies to Your use of the Licensed Material.
f. Licensed Material means the artistic or literary work, database, f. Licensed Material means the artistic or literary work, database,
or other material to which the Licensor applied this Public or other material to which the Licensor applied this Public
License. License.
g. Licensed Rights means the rights granted to You subject to the g. Licensed Rights means the rights granted to You subject to the
terms and conditions of this Public License, which are limited to terms and conditions of this Public License, which are limited to
all Copyright and Similar Rights that apply to Your use of the all Copyright and Similar Rights that apply to Your use of the
Licensed Material and that the Licensor has authority to license. Licensed Material and that the Licensor has authority to license.
h. Licensor means the individual(s) or entity(ies) granting rights h. Licensor means the individual(s) or entity(ies) granting rights
under this Public License. under this Public License.
i. Share means to provide material to the public by any means or i. Share means to provide material to the public by any means or
process that requires permission under the Licensed Rights, such process that requires permission under the Licensed Rights, such
as reproduction, public display, public performance, distribution, as reproduction, public display, public performance, distribution,
dissemination, communication, or importation, and to make material dissemination, communication, or importation, and to make material
available to the public including in ways that members of the available to the public including in ways that members of the
public may access the material from a place and at a time public may access the material from a place and at a time
individually chosen by them. individually chosen by them.
j. Sui Generis Database Rights means rights other than copyright j. Sui Generis Database Rights means rights other than copyright
resulting from Directive 96/9/EC of the European Parliament and of resulting from Directive 96/9/EC of the European Parliament and of
the Council of 11 March 1996 on the legal protection of databases, the Council of 11 March 1996 on the legal protection of databases,
as amended and/or succeeded, as well as other essentially as amended and/or succeeded, as well as other essentially
equivalent rights anywhere in the world. equivalent rights anywhere in the world.
k. You means the individual or entity exercising the Licensed Rights k. You means the individual or entity exercising the Licensed Rights
under this Public License. Your has a corresponding meaning. under this Public License. Your has a corresponding meaning.
Section 2 -- Scope. Section 2 -- Scope.
a. License grant. a. License grant.
1. Subject to the terms and conditions of this Public License, 1. Subject to the terms and conditions of this Public License,
the Licensor hereby grants You a worldwide, royalty-free, the Licensor hereby grants You a worldwide, royalty-free,
non-sublicensable, non-exclusive, irrevocable license to non-sublicensable, non-exclusive, irrevocable license to
exercise the Licensed Rights in the Licensed Material to: exercise the Licensed Rights in the Licensed Material to:
a. reproduce and Share the Licensed Material, in whole or a. reproduce and Share the Licensed Material, in whole or
in part; and in part; and
b. produce, reproduce, and Share Adapted Material. b. produce, reproduce, and Share Adapted Material.
2. Exceptions and Limitations. For the avoidance of doubt, where 2. Exceptions and Limitations. For the avoidance of doubt, where
Exceptions and Limitations apply to Your use, this Public Exceptions and Limitations apply to Your use, this Public
License does not apply, and You do not need to comply with License does not apply, and You do not need to comply with
its terms and conditions. its terms and conditions.
3. Term. The term of this Public License is specified in Section 3. Term. The term of this Public License is specified in Section
6(a). 6(a).
4. Media and formats; technical modifications allowed. The 4. Media and formats; technical modifications allowed. The
Licensor authorizes You to exercise the Licensed Rights in Licensor authorizes You to exercise the Licensed Rights in
all media and formats whether now known or hereafter created, all media and formats whether now known or hereafter created,
and to make technical modifications necessary to do so. The and to make technical modifications necessary to do so. The
Licensor waives and/or agrees not to assert any right or Licensor waives and/or agrees not to assert any right or
authority to forbid You from making technical modifications authority to forbid You from making technical modifications
necessary to exercise the Licensed Rights, including necessary to exercise the Licensed Rights, including
technical modifications necessary to circumvent Effective technical modifications necessary to circumvent Effective
Technological Measures. For purposes of this Public License, Technological Measures. For purposes of this Public License,
simply making modifications authorized by this Section 2(a) simply making modifications authorized by this Section 2(a)
(4) never produces Adapted Material. (4) never produces Adapted Material.
5. Downstream recipients. 5. Downstream recipients.
a. Offer from the Licensor -- Licensed Material. Every a. Offer from the Licensor -- Licensed Material. Every
recipient of the Licensed Material automatically recipient of the Licensed Material automatically
receives an offer from the Licensor to exercise the receives an offer from the Licensor to exercise the
Licensed Rights under the terms and conditions of this Licensed Rights under the terms and conditions of this
Public License. Public License.
b. No downstream restrictions. You may not offer or impose b. No downstream restrictions. You may not offer or impose
any additional or different terms or conditions on, or any additional or different terms or conditions on, or
apply any Effective Technological Measures to, the apply any Effective Technological Measures to, the
Licensed Material if doing so restricts exercise of the Licensed Material if doing so restricts exercise of the
Licensed Rights by any recipient of the Licensed Licensed Rights by any recipient of the Licensed
Material. Material.
6. No endorsement. Nothing in this Public License constitutes or 6. No endorsement. Nothing in this Public License constitutes or
may be construed as permission to assert or imply that You may be construed as permission to assert or imply that You
are, or that Your use of the Licensed Material is, connected are, or that Your use of the Licensed Material is, connected
with, or sponsored, endorsed, or granted official status by, with, or sponsored, endorsed, or granted official status by,
the Licensor or others designated to receive attribution as the Licensor or others designated to receive attribution as
provided in Section 3(a)(1)(A)(i). provided in Section 3(a)(1)(A)(i).
b. Other rights. b. Other rights.
1. Moral rights, such as the right of integrity, are not 1. Moral rights, such as the right of integrity, are not
licensed under this Public License, nor are publicity, licensed under this Public License, nor are publicity,
privacy, and/or other similar personality rights; however, to privacy, and/or other similar personality rights; however, to
the extent possible, the Licensor waives and/or agrees not to the extent possible, the Licensor waives and/or agrees not to
assert any such rights held by the Licensor to the limited assert any such rights held by the Licensor to the limited
extent necessary to allow You to exercise the Licensed extent necessary to allow You to exercise the Licensed
Rights, but not otherwise. Rights, but not otherwise.
2. Patent and trademark rights are not licensed under this 2. Patent and trademark rights are not licensed under this
Public License. Public License.
3. To the extent possible, the Licensor waives any right to 3. To the extent possible, the Licensor waives any right to
collect royalties from You for the exercise of the Licensed collect royalties from You for the exercise of the Licensed
Rights, whether directly or through a collecting society Rights, whether directly or through a collecting society
under any voluntary or waivable statutory or compulsory under any voluntary or waivable statutory or compulsory
licensing scheme. In all other cases the Licensor expressly licensing scheme. In all other cases the Licensor expressly
reserves any right to collect such royalties. reserves any right to collect such royalties.
Section 3 -- License Conditions. Section 3 -- License Conditions.
Your exercise of the Licensed Rights is expressly made subject to the Your exercise of the Licensed Rights is expressly made subject to the
following conditions. following conditions.
a. Attribution. a. Attribution.
1. If You Share the Licensed Material (including in modified 1. If You Share the Licensed Material (including in modified
form), You must: form), You must:
a. retain the following if it is supplied by the Licensor a. retain the following if it is supplied by the Licensor
with the Licensed Material: with the Licensed Material:
i. identification of the creator(s) of the Licensed i. identification of the creator(s) of the Licensed
Material and any others designated to receive Material and any others designated to receive
attribution, in any reasonable manner requested by attribution, in any reasonable manner requested by
the Licensor (including by pseudonym if the Licensor (including by pseudonym if
designated); designated);
ii. a copyright notice; ii. a copyright notice;
iii. a notice that refers to this Public License; iii. a notice that refers to this Public License;
iv. a notice that refers to the disclaimer of iv. a notice that refers to the disclaimer of
warranties; warranties;
v. a URI or hyperlink to the Licensed Material to the v. a URI or hyperlink to the Licensed Material to the
extent reasonably practicable; extent reasonably practicable;
b. indicate if You modified the Licensed Material and b. indicate if You modified the Licensed Material and
retain an indication of any previous modifications; and retain an indication of any previous modifications; and
c. indicate the Licensed Material is licensed under this c. indicate the Licensed Material is licensed under this
Public License, and include the text of, or the URI or Public License, and include the text of, or the URI or
hyperlink to, this Public License. hyperlink to, this Public License.
2. You may satisfy the conditions in Section 3(a)(1) in any 2. You may satisfy the conditions in Section 3(a)(1) in any
reasonable manner based on the medium, means, and context in reasonable manner based on the medium, means, and context in
which You Share the Licensed Material. For example, it may be which You Share the Licensed Material. For example, it may be
reasonable to satisfy the conditions by providing a URI or reasonable to satisfy the conditions by providing a URI or
hyperlink to a resource that includes the required hyperlink to a resource that includes the required
information. information.
3. If requested by the Licensor, You must remove any of the 3. If requested by the Licensor, You must remove any of the
information required by Section 3(a)(1)(A) to the extent information required by Section 3(a)(1)(A) to the extent
reasonably practicable. reasonably practicable.
4. If You Share Adapted Material You produce, the Adapter's 4. If You Share Adapted Material You produce, the Adapter's
License You apply must not prevent recipients of the Adapted License You apply must not prevent recipients of the Adapted
Material from complying with this Public License. Material from complying with this Public License.
Section 4 -- Sui Generis Database Rights. Section 4 -- Sui Generis Database Rights.
Where the Licensed Rights include Sui Generis Database Rights that Where the Licensed Rights include Sui Generis Database Rights that
apply to Your use of the Licensed Material: apply to Your use of the Licensed Material:
a. for the avoidance of doubt, Section 2(a)(1) grants You the right a. for the avoidance of doubt, Section 2(a)(1) grants You the right
to extract, reuse, reproduce, and Share all or a substantial to extract, reuse, reproduce, and Share all or a substantial
portion of the contents of the database; portion of the contents of the database;
b. if You include all or a substantial portion of the database b. if You include all or a substantial portion of the database
contents in a database in which You have Sui Generis Database contents in a database in which You have Sui Generis Database
Rights, then the database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database
Rights (but not its individual contents) is Adapted Material; and Rights (but not its individual contents) is Adapted Material; and
c. You must comply with the conditions in Section 3(a) if You Share c. You must comply with the conditions in Section 3(a) if You Share
all or a substantial portion of the contents of the database. all or a substantial portion of the contents of the database.
For the avoidance of doubt, this Section 4 supplements and does not For the avoidance of doubt, this Section 4 supplements and does not
replace Your obligations under this Public License where the Licensed replace Your obligations under this Public License where the Licensed
Rights include other Copyright and Similar Rights. Rights include other Copyright and Similar Rights.
Section 5 -- Disclaimer of Warranties and Limitation of Liability. Section 5 -- Disclaimer of Warranties and Limitation of Liability.
a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
c. The disclaimer of warranties and limitation of liability provided c. The disclaimer of warranties and limitation of liability provided
above shall be interpreted in a manner that, to the extent above shall be interpreted in a manner that, to the extent
possible, most closely approximates an absolute disclaimer and possible, most closely approximates an absolute disclaimer and
waiver of all liability. waiver of all liability.
Section 6 -- Term and Termination. Section 6 -- Term and Termination.
a. This Public License applies for the term of the Copyright and a. This Public License applies for the term of the Copyright and
Similar Rights licensed here. However, if You fail to comply with Similar Rights licensed here. However, if You fail to comply with
this Public License, then Your rights under this Public License this Public License, then Your rights under this Public License
terminate automatically. terminate automatically.
b. Where Your right to use the Licensed Material has terminated under b. Where Your right to use the Licensed Material has terminated under
Section 6(a), it reinstates: Section 6(a), it reinstates:
1. automatically as of the date the violation is cured, provided 1. automatically as of the date the violation is cured, provided
it is cured within 30 days of Your discovery of the it is cured within 30 days of Your discovery of the
violation; or violation; or
2. upon express reinstatement by the Licensor. 2. upon express reinstatement by the Licensor.
For the avoidance of doubt, this Section 6(b) does not affect any For the avoidance of doubt, this Section 6(b) does not affect any
right the Licensor may have to seek remedies for Your violations right the Licensor may have to seek remedies for Your violations
of this Public License. of this Public License.
c. For the avoidance of doubt, the Licensor may also offer the c. For the avoidance of doubt, the Licensor may also offer the
Licensed Material under separate terms or conditions or stop Licensed Material under separate terms or conditions or stop
distributing the Licensed Material at any time; however, doing so distributing the Licensed Material at any time; however, doing so
will not terminate this Public License. will not terminate this Public License.
d. Sections 1, 5, 6, 7, and 8 survive termination of this Public d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
License. License.
Section 7 -- Other Terms and Conditions. Section 7 -- Other Terms and Conditions.
a. The Licensor shall not be bound by any additional or different a. The Licensor shall not be bound by any additional or different
terms or conditions communicated by You unless expressly agreed. terms or conditions communicated by You unless expressly agreed.
b. Any arrangements, understandings, or agreements regarding the b. Any arrangements, understandings, or agreements regarding the
Licensed Material not stated herein are separate from and Licensed Material not stated herein are separate from and
independent of the terms and conditions of this Public License. independent of the terms and conditions of this Public License.
Section 8 -- Interpretation. Section 8 -- Interpretation.
a. For the avoidance of doubt, this Public License does not, and a. For the avoidance of doubt, this Public License does not, and
shall not be interpreted to, reduce, limit, restrict, or impose shall not be interpreted to, reduce, limit, restrict, or impose
conditions on any use of the Licensed Material that could lawfully conditions on any use of the Licensed Material that could lawfully
be made without permission under this Public License. be made without permission under this Public License.
b. To the extent possible, if any provision of this Public License is b. To the extent possible, if any provision of this Public License is
deemed unenforceable, it shall be automatically reformed to the deemed unenforceable, it shall be automatically reformed to the
minimum extent necessary to make it enforceable. If the provision minimum extent necessary to make it enforceable. If the provision
cannot be reformed, it shall be severed from this Public License cannot be reformed, it shall be severed from this Public License
without affecting the enforceability of the remaining terms and without affecting the enforceability of the remaining terms and
conditions. conditions.
c. No term or condition of this Public License will be waived and no c. No term or condition of this Public License will be waived and no
failure to comply consented to unless expressly agreed to by the failure to comply consented to unless expressly agreed to by the
Licensor. Licensor.
d. Nothing in this Public License constitutes or may be interpreted d. Nothing in this Public License constitutes or may be interpreted
as a limitation upon, or waiver of, any privileges and immunities as a limitation upon, or waiver of, any privileges and immunities
that apply to the Licensor or You, including from the legal that apply to the Licensor or You, including from the legal
processes of any jurisdiction or authority. processes of any jurisdiction or authority.
======================================================================= =======================================================================
Creative Commons is not a party to its public Creative Commons is not a party to its public
licenses. Notwithstanding, Creative Commons may elect to apply one of licenses. Notwithstanding, Creative Commons may elect to apply one of
its public licenses to material it publishes and in those instances its public licenses to material it publishes and in those instances
will be considered the “Licensor.” The text of the Creative Commons will be considered the “Licensor.” The text of the Creative Commons
public licenses is dedicated to the public domain under the CC0 Public public licenses is dedicated to the public domain under the CC0 Public
Domain Dedication. Except for the limited purpose of indicating that Domain Dedication. Except for the limited purpose of indicating that
material is shared under a Creative Commons public license or as material is shared under a Creative Commons public license or as
otherwise permitted by the Creative Commons policies published at otherwise permitted by the Creative Commons policies published at
creativecommons.org/policies, Creative Commons does not authorize the creativecommons.org/policies, Creative Commons does not authorize the
use of the trademark "Creative Commons" or any other trademark or logo use of the trademark "Creative Commons" or any other trademark or logo
of Creative Commons without its prior written consent including, of Creative Commons without its prior written consent including,
without limitation, in connection with any unauthorized modifications without limitation, in connection with any unauthorized modifications
to any of its public licenses or any other arrangements, to any of its public licenses or any other arrangements,
understandings, or agreements concerning use of licensed material. For understandings, or agreements concerning use of licensed material. For
the avoidance of doubt, this paragraph does not form part of the the avoidance of doubt, this paragraph does not form part of the
public licenses. public licenses.
Creative Commons may be contacted at creativecommons.org. Creative Commons may be contacted at creativecommons.org.

122
README.md
View File

@@ -1,61 +1,61 @@
# OmniParser: Screen Parsing tool for Pure Vision Based GUI Agent # OmniParser: Screen Parsing tool for Pure Vision Based GUI Agent
<p align="center"> <p align="center">
<img src="imgs/logo.png" alt="Logo"> <img src="imgs/logo.png" alt="Logo">
</p> </p>
[![arXiv](https://img.shields.io/badge/Paper-green)](https://arxiv.org/abs/2408.00203) [![arXiv](https://img.shields.io/badge/Paper-green)](https://arxiv.org/abs/2408.00203)
[![License](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![License](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
📢 [[Project Page](https://microsoft.github.io/OmniParser/)] [[Blog Post](https://www.microsoft.com/en-us/research/articles/omniparser-for-pure-vision-based-gui-agent/)] [[Models](https://huggingface.co/microsoft/OmniParser)] [huggingface space](https://huggingface.co/spaces/microsoft/OmniParser) 📢 [[Project Page](https://microsoft.github.io/OmniParser/)] [[Blog Post](https://www.microsoft.com/en-us/research/articles/omniparser-for-pure-vision-based-gui-agent/)] [[Models](https://huggingface.co/microsoft/OmniParser)] [huggingface space](https://huggingface.co/spaces/microsoft/OmniParser)
**OmniParser** is a comprehensive method for parsing user interface screenshots into structured and easy-to-understand elements, which significantly enhances the ability of GPT-4V to generate actions that can be accurately grounded in the corresponding regions of the interface. **OmniParser** is a comprehensive method for parsing user interface screenshots into structured and easy-to-understand elements, which significantly enhances the ability of GPT-4V to generate actions that can be accurately grounded in the corresponding regions of the interface.
## News ## News
- [2024/11/26] We release an updated version, OmniParser V1.5 which features 1) more fine grained/small icon detection, 2) prediction of whether each screen element is interactable or not. Examples in the demo.ipynb. - [2024/11/26] We release an updated version, OmniParser V1.5 which features 1) more fine grained/small icon detection, 2) prediction of whether each screen element is interactable or not. Examples in the demo.ipynb.
- [2024/10] OmniParser was the #1 trending model on huggingface model hub (starting 10/29/2024). - [2024/10] OmniParser was the #1 trending model on huggingface model hub (starting 10/29/2024).
- [2024/10] Feel free to checkout our demo on [huggingface space](https://huggingface.co/spaces/microsoft/OmniParser)! (stay tuned for OmniParser + Claude Computer Use) - [2024/10] Feel free to checkout our demo on [huggingface space](https://huggingface.co/spaces/microsoft/OmniParser)! (stay tuned for OmniParser + Claude Computer Use)
- [2024/10] Both Interactive Region Detection Model and Icon functional description model are released! [Hugginface models](https://huggingface.co/microsoft/OmniParser) - [2024/10] Both Interactive Region Detection Model and Icon functional description model are released! [Hugginface models](https://huggingface.co/microsoft/OmniParser)
- [2024/09] OmniParser achieves the best performance on [Windows Agent Arena](https://microsoft.github.io/WindowsAgentArena/)! - [2024/09] OmniParser achieves the best performance on [Windows Agent Arena](https://microsoft.github.io/WindowsAgentArena/)!
## Install ## Install
Install environment: Install environment:
```python ```python
conda create -n "omni" python==3.12 conda create -n "omni" python==3.12
conda activate omni conda activate omni
pip install -r requirements.txt pip install -r requirements.txt
``` ```
Then download the model ckpts files in: https://huggingface.co/microsoft/OmniParser, and put them under weights/, default folder structure is: weights/icon_detect, weights/icon_caption_florence, weights/icon_caption_blip2. Then download the model ckpts files in: https://huggingface.co/microsoft/OmniParser, and put them under weights/, default folder structure is: weights/icon_detect, weights/icon_caption_florence, weights/icon_caption_blip2.
Finally, convert the safetensor to .pt file. Finally, convert the safetensor to .pt file.
```python ```python
python weights/convert_safetensor_to_pt.py python weights/convert_safetensor_to_pt.py
``` ```
## Examples: ## Examples:
We put together a few simple examples in the demo.ipynb. We put together a few simple examples in the demo.ipynb.
## Gradio Demo ## Gradio Demo
To run gradio demo, simply run: To run gradio demo, simply run:
```python ```python
python gradio_demo.py python gradio_demo.py
``` ```
## Model Weights License ## Model Weights License
For the model checkpoints on huggingface model hub, please note that icon_detect model is under AGPL license since it is a license inherited from the original yolo model. And icon_caption_blip2 & icon_caption_florence is under MIT license. Please refer to the LICENSE file in the folder of each model: https://huggingface.co/microsoft/OmniParser. For the model checkpoints on huggingface model hub, please note that icon_detect model is under AGPL license since it is a license inherited from the original yolo model. And icon_caption_blip2 & icon_caption_florence is under MIT license. Please refer to the LICENSE file in the folder of each model: https://huggingface.co/microsoft/OmniParser.
## 📚 Citation ## 📚 Citation
Our technical report can be found [here](https://arxiv.org/abs/2408.00203). Our technical report can be found [here](https://arxiv.org/abs/2408.00203).
If you find our work useful, please consider citing our work: If you find our work useful, please consider citing our work:
``` ```
@misc{lu2024omniparserpurevisionbased, @misc{lu2024omniparserpurevisionbased,
title={OmniParser for Pure Vision Based GUI Agent}, title={OmniParser for Pure Vision Based GUI Agent},
author={Yadong Lu and Jianwei Yang and Yelong Shen and Ahmed Awadallah}, author={Yadong Lu and Jianwei Yang and Yelong Shen and Ahmed Awadallah},
year={2024}, year={2024},
eprint={2408.00203}, eprint={2408.00203},
archivePrefix={arXiv}, archivePrefix={arXiv},
primaryClass={cs.CV}, primaryClass={cs.CV},
url={https://arxiv.org/abs/2408.00203}, url={https://arxiv.org/abs/2408.00203},
} }
``` ```

View File

@@ -1,41 +1,41 @@
<!-- BEGIN MICROSOFT SECURITY.MD V0.0.9 BLOCK --> <!-- BEGIN MICROSOFT SECURITY.MD V0.0.9 BLOCK -->
## Security ## Security
Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet) and [Xamarin](https://github.com/xamarin). Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet) and [Xamarin](https://github.com/xamarin).
If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/security.md/definition), please report it to us as described below. If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/security.md/definition), please report it to us as described below.
## Reporting Security Issues ## Reporting Security Issues
**Please do not report security vulnerabilities through public GitHub issues.** **Please do not report security vulnerabilities through public GitHub issues.**
Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/security.md/msrc/create-report). Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/security.md/msrc/create-report).
If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/security.md/msrc/pgp). If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/security.md/msrc/pgp).
You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc). You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc).
Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
* Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
* Full paths of source file(s) related to the manifestation of the issue * Full paths of source file(s) related to the manifestation of the issue
* The location of the affected source code (tag/branch/commit or direct URL) * The location of the affected source code (tag/branch/commit or direct URL)
* Any special configuration required to reproduce the issue * Any special configuration required to reproduce the issue
* Step-by-step instructions to reproduce the issue * Step-by-step instructions to reproduce the issue
* Proof-of-concept or exploit code (if possible) * Proof-of-concept or exploit code (if possible)
* Impact of the issue, including how an attacker might exploit the issue * Impact of the issue, including how an attacker might exploit the issue
This information will help us triage your report more quickly. This information will help us triage your report more quickly.
If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/security.md/msrc/bounty) page for more details about our active programs. If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/security.md/msrc/bounty) page for more details about our active programs.
## Preferred Languages ## Preferred Languages
We prefer all communications to be in English. We prefer all communications to be in English.
## Policy ## Policy
Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/security.md/cvd). Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/security.md/cvd).
<!-- END MICROSOFT SECURITY.MD BLOCK --> <!-- END MICROSOFT SECURITY.MD BLOCK -->

Binary file not shown.

File diff suppressed because one or more lines are too long

0
demo/__init__.py Normal file
View File

479
demo/app.py Normal file
View File

@@ -0,0 +1,479 @@
"""
Entrypoint for Gradio, see https://gradio.app/
"""
import platform
import asyncio
import base64
import os
import io
import json
from datetime import datetime
from enum import StrEnum
from functools import partial
from pathlib import Path
from typing import cast, Dict
from PIL import Image
import gradio as gr
from anthropic import APIResponse
from anthropic.types import TextBlock
from anthropic.types.beta import BetaMessage, BetaTextBlock, BetaToolUseBlock
from anthropic.types.tool_use_block import ToolUseBlock
from screeninfo import get_monitors
screens = get_monitors()
print(screens)
from loop import (
PROVIDER_TO_DEFAULT_MODEL_NAME,
APIProvider,
sampling_loop_sync,
)
from tools import ToolResult
from tools.computer import get_screen_details
SCREEN_NAMES, SELECTED_SCREEN_INDEX = get_screen_details()
# SELECTED_SCREEN_INDEX = None
# SCREEN_NAMES = None
CONFIG_DIR = Path("~/.anthropic").expanduser()
API_KEY_FILE = CONFIG_DIR / "api_key"
INTRO_TEXT = '''
🚀🤖✨ It's Play Time!
Welcome to the OmniParser+X Demo! X = [GPT-4o/4o-mini, Claude, Phi, Llama]. Let OmniParser turn your general purpose vision-langauge model to an AI agent. Type a message to play with your beloved assistant.
'''
class Sender(StrEnum):
USER = "user"
BOT = "assistant"
TOOL = "tool"
def setup_state(state):
if "messages" not in state:
state["messages"] = []
if "model" not in state:
# state["model"] = "gpt-4o + ShowUI"
state["model"] = "omniparser + gpt-4o"
# _reset_model(state)
if "provider" not in state:
if state["model"] == "qwen2vl + ShowUI":
state["provider"] = "DashScopeAPI"
elif state["model"] == "gpt-4o + ShowUI":
state["provider"] = "openai"
else:
state["provider"] = os.getenv("API_PROVIDER", "anthropic") or "anthropic"
if "provider_radio" not in state:
state["provider_radio"] = state["provider"]
if "openai_api_key" not in state: # Fetch API keys from environment variables
state["openai_api_key"] = os.getenv("OPENAI_API_KEY", "")
if "anthropic_api_key" not in state:
state["anthropic_api_key"] = os.getenv("ANTHROPIC_API_KEY", "")
if "qwen_api_key" not in state:
state["qwen_api_key"] = os.getenv("QWEN_API_KEY", "")
# Set the initial api_key based on the provider
if "api_key" not in state:
if state["provider"] == "openai":
state["api_key"] = state["openai_api_key"]
elif state["provider"] == "anthropic":
state["api_key"] = state["anthropic_api_key"]
elif state["provider"] == "qwen":
state["api_key"] = state["qwen_api_key"]
else:
state["api_key"] = ""
# print(f"state['api_key']: {state['api_key']}")
if not state["api_key"]:
print("API key not found. Please set it in the environment or paste in textbox.")
if "selected_screen" not in state:
state['selected_screen'] = SELECTED_SCREEN_INDEX if SCREEN_NAMES else 0
if "auth_validated" not in state:
state["auth_validated"] = False
if "responses" not in state:
state["responses"] = {}
if "tools" not in state:
state["tools"] = {}
if "only_n_most_recent_images" not in state:
state["only_n_most_recent_images"] = 10 # 10
if "custom_system_prompt" not in state:
state["custom_system_prompt"] = load_from_storage("system_prompt") or ""
# remove if want to use default system prompt
device_os_name = "Windows" if platform.system() == "Windows" else "Mac" if platform.system() == "Darwin" else "Linux"
state["custom_system_prompt"] += f"\n\nNOTE: you are operating a {device_os_name} machine"
if "hide_images" not in state:
state["hide_images"] = False
if 'chatbot_messages' not in state:
state['chatbot_messages'] = []
def _reset_model(state):
state["model"] = PROVIDER_TO_DEFAULT_MODEL_NAME[cast(APIProvider, state["provider"])]
async def main(state):
"""Render loop for Gradio"""
setup_state(state)
return "Setup completed"
def validate_auth(provider: APIProvider, api_key: str | None):
if provider == APIProvider.ANTHROPIC:
if not api_key:
return "Enter your Anthropic API key to continue."
if provider == APIProvider.BEDROCK:
import boto3
if not boto3.Session().get_credentials():
return "You must have AWS credentials set up to use the Bedrock API."
if provider == APIProvider.VERTEX:
import google.auth
from google.auth.exceptions import DefaultCredentialsError
if not os.environ.get("CLOUD_ML_REGION"):
return "Set the CLOUD_ML_REGION environment variable to use the Vertex API."
try:
google.auth.default(scopes=["https://www.googleapis.com/auth/cloud-platform"])
except DefaultCredentialsError:
return "Your google cloud credentials are not set up correctly."
def load_from_storage(filename: str) -> str | None:
"""Load data from a file in the storage directory."""
try:
file_path = CONFIG_DIR / filename
if file_path.exists():
data = file_path.read_text().strip()
if data:
return data
except Exception as e:
print(f"Debug: Error loading {filename}: {e}")
return None
def save_to_storage(filename: str, data: str) -> None:
"""Save data to a file in the storage directory."""
try:
CONFIG_DIR.mkdir(parents=True, exist_ok=True)
file_path = CONFIG_DIR / filename
file_path.write_text(data)
# Ensure only user can read/write the file
file_path.chmod(0o600)
except Exception as e:
print(f"Debug: Error saving {filename}: {e}")
def _api_response_callback(response: APIResponse[BetaMessage], response_state: dict):
response_id = datetime.now().isoformat()
response_state[response_id] = response
def _tool_output_callback(tool_output: ToolResult, tool_id: str, tool_state: dict):
tool_state[tool_id] = tool_output
def chatbot_output_callback(message, chatbot_state, hide_images=False, sender="bot"):
def _render_message(message: str | BetaTextBlock | BetaToolUseBlock | ToolResult, hide_images=False):
print(f"_render_message: {str(message)[:100]}")
if isinstance(message, str):
return message
is_tool_result = not isinstance(message, str) and (
isinstance(message, ToolResult)
or message.__class__.__name__ == "ToolResult"
or message.__class__.__name__ == "CLIResult"
)
if not message or (
is_tool_result
and hide_images
and not hasattr(message, "error")
and not hasattr(message, "output")
): # return None if hide_images is True
return
# render tool result
if is_tool_result:
message = cast(ToolResult, message)
if message.output:
return message.output
if message.error:
return f"Error: {message.error}"
if message.base64_image and not hide_images:
# somehow can't display via gr.Image
# image_data = base64.b64decode(message.base64_image)
# return gr.Image(value=Image.open(io.BytesIO(image_data)))
return f'<img src="data:image/png;base64,{message.base64_image}">'
elif isinstance(message, BetaTextBlock) or isinstance(message, TextBlock):
return f"Analysis: {message.text}"
elif isinstance(message, BetaToolUseBlock) or isinstance(message, ToolUseBlock):
# return f"Tool Use: {message.name}\nInput: {message.input}"
return f"Next I will perform the following action: {message.input}"
else:
return message
def _truncate_string(s, max_length=500):
"""Truncate long strings for concise printing."""
if isinstance(s, str) and len(s) > max_length:
return s[:max_length] + "..."
return s
# processing Anthropic messages
message = _render_message(message, hide_images)
if sender == "bot":
chatbot_state.append((None, message))
else:
chatbot_state.append((message, None))
# Create a concise version of the chatbot state for printing
concise_state = [(_truncate_string(user_msg), _truncate_string(bot_msg))
for user_msg, bot_msg in chatbot_state]
# print(f"chatbot_output_callback chatbot_state: {concise_state} (truncated)")
def process_input(user_input, state):
setup_state(state)
# Append the user message to state["messages"]
if state["model"] == "gpt-4o + ShowUI" or state["model"] == "qwen2vl + ShowUI":
state["messages"].append(
{
"role": "user",
"content": [TextBlock(type="text", text=user_input)],
}
)
elif state["model"] == "claude-3-5-sonnet-20241022":
state["messages"].append(
{
"role": Sender.USER,
"content": [TextBlock(type="text", text=user_input)],
}
)
elif state["model"] == "omniparser + gpt-4o" or state["model"] == "omniparser + phi35v":
state["messages"].append(
{
"role": "user",
"content": [TextBlock(type="text", text=user_input)],
}
)
# Append the user's message to chatbot_messages with None for the assistant's reply
state['chatbot_messages'].append((user_input, None))
yield state['chatbot_messages'] # Yield to update the chatbot UI with the user's message
# Run sampling_loop_sync with the chatbot_output_callback
for loop_msg in sampling_loop_sync(
system_prompt_suffix=state["custom_system_prompt"],
model=state["model"],
provider=state["provider"],
messages=state["messages"],
output_callback=partial(chatbot_output_callback, chatbot_state=state['chatbot_messages'], hide_images=state["hide_images"]),
tool_output_callback=partial(_tool_output_callback, tool_state=state["tools"]),
api_response_callback=partial(_api_response_callback, response_state=state["responses"]),
api_key=state["api_key"],
only_n_most_recent_images=state["only_n_most_recent_images"],
selected_screen=state['selected_screen']
):
if loop_msg is None:
yield state['chatbot_messages']
print("End of task. Close the loop.")
break
yield state['chatbot_messages'] # Yield the updated chatbot_messages to update the chatbot UI
# with gr.Blocks(theme=gr.themes.Default()) as demo:
with gr.Blocks(theme='YTheme/Minecraft') as demo:
state = gr.State({}) # Use Gradio's state management
setup_state(state.value) # Initialize the state
# Retrieve screen details
gr.Markdown("# OmniParser + ✖️ Demo")
if not os.getenv("HIDE_WARNING", False):
gr.Markdown(INTRO_TEXT)
with gr.Accordion("Settings", open=True):
with gr.Row():
with gr.Column():
model = gr.Dropdown(
label="Model",
choices=["omniparser + gpt-4o", "omniparser + phi35v", "claude-3-5-sonnet-20241022"],
value="omniparser + gpt-4o", # Set to one of the choices
interactive=True,
)
with gr.Column():
provider = gr.Dropdown(
label="API Provider",
choices=[option.value for option in APIProvider],
value="openai",
interactive=False,
)
with gr.Column():
api_key = gr.Textbox(
label="API Key",
type="password",
value=state.value.get("api_key", ""),
placeholder="Paste your API key here",
interactive=True,
)
with gr.Column():
custom_prompt = gr.Textbox(
label="System Prompt Suffix",
value="",
interactive=True,
)
with gr.Column():
screen_options, primary_index = get_screen_details()
SCREEN_NAMES = screen_options
SELECTED_SCREEN_INDEX = primary_index
screen_selector = gr.Dropdown(
label="Select Screen",
choices=screen_options,
value=screen_options[primary_index] if screen_options else None,
interactive=True,
)
with gr.Column():
only_n_images = gr.Slider(
label="N most recent screenshots",
minimum=0,
maximum=10,
step=1,
value=2,
interactive=True,
)
# hide_images = gr.Checkbox(label="Hide screenshots", value=False)
# Define the merged dictionary with task mappings
# merged_dict = json.load(open("examples/ootb_examples.json", "r"))
merged_dict = {}
def update_only_n_images(only_n_images_value, state):
state["only_n_most_recent_images"] = only_n_images_value
# Callback to update the second dropdown based on the first selection
def update_second_menu(selected_category):
return gr.update(choices=list(merged_dict.get(selected_category, {}).keys()))
# Callback to update the third dropdown based on the second selection
def update_third_menu(selected_category, selected_option):
return gr.update(choices=list(merged_dict.get(selected_category, {}).get(selected_option, {}).keys()))
# Callback to update the textbox based on the third selection
def update_textbox(selected_category, selected_option, selected_task):
task_data = merged_dict.get(selected_category, {}).get(selected_option, {}).get(selected_task, {})
prompt = task_data.get("prompt", "")
preview_image = task_data.get("initial_state", "")
task_hint = "Task Hint: " + task_data.get("hint", "")
return prompt, preview_image, task_hint
# Function to update the global variable when the dropdown changes
def update_selected_screen(selected_screen_name, state):
global SCREEN_NAMES
global SELECTED_SCREEN_INDEX
SELECTED_SCREEN_INDEX = SCREEN_NAMES.index(selected_screen_name)
print(f"Selected screen updated to: {SELECTED_SCREEN_INDEX}")
state['selected_screen'] = SELECTED_SCREEN_INDEX
def update_model(model_selection, state):
state["model"] = model_selection
print(f"Model updated to: {state['model']}")
if model_selection == "claude-3-5-sonnet-20241022":
# Provider can be any of the current choices except 'openai'
provider_choices = [option.value for option in APIProvider if option.value != "openai"]
provider_value = "anthropic" # Set default to 'anthropic'
provider_interactive = True
api_key_placeholder = "claude API key"
elif model_selection == "omniparser + gpt-4o" or model_selection == "omniparser + phi35v":
# Provider can be any of the current choices except 'openai'
provider_choices = ["openai"]
provider_value = "openai"
provider_interactive = False
api_key_placeholder = "openai API key"
else:
# Default case
provider_choices = [option.value for option in APIProvider]
provider_value = state.get("provider", provider_choices[0])
provider_interactive = True
api_key_placeholder = ""
# Update the provider in state
state["provider"] = provider_value
# Update api_key in state based on the provider
if provider_value == "openai":
state["api_key"] = state.get("openai_api_key", "")
elif provider_value == "anthropic":
state["api_key"] = state.get("anthropic_api_key", "")
elif provider_value == "qwen":
state["api_key"] = state.get("qwen_api_key", "")
else:
state["api_key"] = ""
# Use gr.update() instead of gr.Dropdown.update()
provider_update = gr.update(
choices=provider_choices,
value=provider_value,
interactive=provider_interactive
)
# Update the API Key textbox
api_key_update = gr.update(
placeholder=api_key_placeholder,
value=state["api_key"]
)
return provider_update, api_key_update
def update_api_key_placeholder(provider_value, model_selection):
if model_selection == "claude-3-5-sonnet-20241022":
if provider_value == "anthropic":
return gr.update(placeholder="anthropic API key")
elif provider_value == "bedrock":
return gr.update(placeholder="bedrock API key")
elif provider_value == "vertex":
return gr.update(placeholder="vertex API key")
else:
return gr.update(placeholder="")
elif model_selection == "gpt-4o + ShowUI":
return gr.update(placeholder="openai API key")
else:
return gr.update(placeholder="")
def update_system_prompt_suffix(system_prompt_suffix, state):
state["custom_system_prompt"] = system_prompt_suffix
api_key.change(fn=lambda key: save_to_storage(API_KEY_FILE, key), inputs=api_key)
with gr.Row():
# submit_button = gr.Button("Submit") # Add submit button
with gr.Column(scale=8):
chat_input = gr.Textbox(show_label=False, placeholder="Type a message to send to Computer Use OOTB...", container=False)
with gr.Column(scale=1, min_width=50):
submit_button = gr.Button(value="Send", variant="primary")
chatbot = gr.Chatbot(label="Chatbot History", autoscroll=True, height=580)
model.change(fn=update_model, inputs=[model, state], outputs=[provider, api_key])
provider.change(fn=update_api_key_placeholder, inputs=[provider, model], outputs=api_key)
screen_selector.change(fn=update_selected_screen, inputs=[screen_selector, state], outputs=None)
only_n_images.change(fn=update_only_n_images, inputs=[only_n_images, state], outputs=None)
# chat_input.submit(process_input, [chat_input, state], chatbot)
submit_button.click(process_input, [chat_input, state], chatbot)
demo.launch(share=True, server_port=7861, server_name='0.0.0.0') # TODO: allowed_paths

View File

@@ -0,0 +1,136 @@
import asyncio
from typing import Any, Dict, cast
from collections.abc import Callable
from anthropic.types.beta import (
BetaContentBlock,
BetaContentBlockParam,
BetaImageBlockParam,
BetaMessage,
BetaMessageParam,
BetaTextBlockParam,
BetaToolResultBlockParam,
)
from anthropic.types import TextBlock
from anthropic.types.beta import BetaMessage, BetaTextBlock, BetaToolUseBlock
from tools import BashTool, ComputerTool, EditTool, ToolCollection, ToolResult
class AnthropicExecutor:
def __init__(
self,
output_callback: Callable[[BetaContentBlockParam], None],
tool_output_callback: Callable[[Any, str], None],
selected_screen: int = 0
):
self.tool_collection = ToolCollection(
ComputerTool(selected_screen=selected_screen),
BashTool(),
EditTool(),
)
self.output_callback = output_callback
self.tool_output_callback = tool_output_callback
def __call__(self, response: BetaMessage, messages: list[BetaMessageParam]):
new_message = {
"role": "assistant",
"content": cast(list[BetaContentBlockParam], response.content),
}
if new_message not in messages:
messages.append(new_message)
else:
print("new_message already in messages, there are duplicates.")
tool_result_content: list[BetaToolResultBlockParam] = []
for content_block in cast(list[BetaContentBlock], response.content):
self.output_callback(content_block, sender="bot")
# Execute the tool
if content_block.type == "tool_use":
# Run the asynchronous tool execution in a synchronous context
result = asyncio.run(self.tool_collection.run(
name=content_block.name,
tool_input=cast(dict[str, Any], content_block.input),
))
self.output_callback(result, sender="bot")
tool_result_content.append(
_make_api_tool_result(result, content_block.id)
)
self.tool_output_callback(result, content_block.id)
# Craft messages based on the content_block
# Note: to display the messages in the gradio, you should organize the messages in the following way (user message, bot message)
display_messages = _message_display_callback(messages)
# display_messages = []
# Send the messages to the gradio
for user_msg, bot_msg in display_messages:
# yield [user_msg, bot_msg], tool_result_content
yield [None, None], tool_result_content
if not tool_result_content:
return messages
return tool_result_content
def _message_display_callback(messages):
display_messages = []
for msg in messages:
try:
if isinstance(msg["content"][0], TextBlock):
display_messages.append((msg["content"][0].text, None)) # User message
elif isinstance(msg["content"][0], BetaTextBlock):
display_messages.append((None, msg["content"][0].text)) # Bot message
elif isinstance(msg["content"][0], BetaToolUseBlock):
display_messages.append((None, f"Tool Use: {msg['content'][0].name}\nInput: {msg['content'][0].input}")) # Bot message
elif isinstance(msg["content"][0], Dict) and msg["content"][0]["content"][-1]["type"] == "image":
display_messages.append((None, f'<img src="data:image/png;base64,{msg["content"][0]["content"][-1]["source"]["data"]}">')) # Bot message
else:
print(msg["content"][0])
except Exception as e:
print("error", e)
pass
return display_messages
def _make_api_tool_result(
result: ToolResult, tool_use_id: str
) -> BetaToolResultBlockParam:
"""Convert an agent ToolResult to an API ToolResultBlockParam."""
tool_result_content: list[BetaTextBlockParam | BetaImageBlockParam] | str = []
is_error = False
if result.error:
is_error = True
tool_result_content = _maybe_prepend_system_tool_result(result, result.error)
else:
if result.output:
tool_result_content.append(
{
"type": "text",
"text": _maybe_prepend_system_tool_result(result, result.output),
}
)
if result.base64_image:
tool_result_content.append(
{
"type": "image",
"source": {
"type": "base64",
"media_type": "image/png",
"data": result.base64_image,
},
}
)
return {
"type": "tool_result",
"content": tool_result_content,
"tool_use_id": tool_use_id,
"is_error": is_error,
}
def _maybe_prepend_system_tool_result(result: ToolResult, result_text: str):
if result.system:
result_text = f"<system>{result.system}</system>\n{result_text}"
return result_text

View File

@@ -0,0 +1,207 @@
"""
Agentic sampling loop that calls the Anthropic API and local implenmentation of anthropic-defined computer use tools.
"""
import asyncio
import platform
from collections.abc import Callable
from datetime import datetime
from enum import StrEnum
from typing import Any, cast
from anthropic import Anthropic, AnthropicBedrock, AnthropicVertex, APIResponse
from anthropic.types import (
ToolResultBlockParam,
)
from anthropic.types.beta import (
BetaContentBlock,
BetaContentBlockParam,
BetaImageBlockParam,
BetaMessage,
BetaMessageParam,
BetaTextBlockParam,
BetaToolResultBlockParam,
)
from anthropic.types import TextBlock
from anthropic.types.beta import BetaMessage, BetaTextBlock, BetaToolUseBlock
from tools import BashTool, ComputerTool, EditTool, ToolCollection, ToolResult
from PIL import Image
from io import BytesIO
import gradio as gr
from typing import Dict
BETA_FLAG = "computer-use-2024-10-22"
class APIProvider(StrEnum):
ANTHROPIC = "anthropic"
BEDROCK = "bedrock"
VERTEX = "vertex"
PROVIDER_TO_DEFAULT_MODEL_NAME: dict[APIProvider, str] = {
APIProvider.ANTHROPIC: "claude-3-5-sonnet-20241022",
APIProvider.BEDROCK: "anthropic.claude-3-5-sonnet-20241022-v2:0",
APIProvider.VERTEX: "claude-3-5-sonnet-v2@20241022",
}
# Check OS
SYSTEM_PROMPT = f"""<SYSTEM_CAPABILITY>
* You are utilizing a Windows system with internet access.
* The current date is {datetime.today().strftime('%A, %B %d, %Y')}.
</SYSTEM_CAPABILITY>
"""
class AnthropicActor:
def __init__(
self,
model: str,
provider: APIProvider,
system_prompt_suffix: str,
api_key: str,
api_response_callback: Callable[[APIResponse[BetaMessage]], None],
max_tokens: int = 4096,
only_n_most_recent_images: int | None = None,
selected_screen: int = 0,
print_usage: bool = True,
):
self.model = model
self.provider = provider
self.system_prompt_suffix = system_prompt_suffix
self.api_key = api_key
self.api_response_callback = api_response_callback
self.max_tokens = max_tokens
self.only_n_most_recent_images = only_n_most_recent_images
self.selected_screen = selected_screen
self.tool_collection = ToolCollection(
ComputerTool(selected_screen=selected_screen),
BashTool(),
EditTool(),
)
self.system = (
f"{SYSTEM_PROMPT}{' ' + system_prompt_suffix if system_prompt_suffix else ''}"
)
self.total_token_usage = 0
self.total_cost = 0
self.print_usage = print_usage
# Instantiate the appropriate API client based on the provider
if provider == APIProvider.ANTHROPIC:
self.client = Anthropic(api_key=api_key)
elif provider == APIProvider.VERTEX:
self.client = AnthropicVertex()
elif provider == APIProvider.BEDROCK:
self.client = AnthropicBedrock()
def __call__(
self,
*,
messages: list[BetaMessageParam]
):
"""
Generate a response given history messages.
"""
if self.only_n_most_recent_images:
_maybe_filter_to_n_most_recent_images(messages, self.only_n_most_recent_images)
# Call the API synchronously
raw_response = self.client.beta.messages.with_raw_response.create(
max_tokens=self.max_tokens,
messages=messages,
model=self.model,
system=self.system,
tools=self.tool_collection.to_params(),
betas=["computer-use-2024-10-22"],
)
self.api_response_callback(cast(APIResponse[BetaMessage], raw_response))
response = raw_response.parse()
print(f"AnthropicActor response: {response}")
self.total_token_usage += response.usage.input_tokens + response.usage.output_tokens
self.total_cost += (response.usage.input_tokens * 3 / 1000000 + response.usage.output_tokens * 15 / 1000000)
if self.print_usage:
print(f"Claude total token usage so far: {self.total_token_usage}, total cost so far: $USD{self.total_cost}")
return response
def _maybe_filter_to_n_most_recent_images(
messages: list[BetaMessageParam],
images_to_keep: int,
min_removal_threshold: int = 10,
):
"""
With the assumption that images are screenshots that are of diminishing value as
the conversation progresses, remove all but the final `images_to_keep` tool_result
images in place, with a chunk of min_removal_threshold to reduce the amount we
break the implicit prompt cache.
"""
if images_to_keep is None:
return messages
tool_result_blocks = cast(
list[ToolResultBlockParam],
[
item
for message in messages
for item in (
message["content"] if isinstance(message["content"], list) else []
)
if isinstance(item, dict) and item.get("type") == "tool_result"
],
)
total_images = sum(
1
for tool_result in tool_result_blocks
for content in tool_result.get("content", [])
if isinstance(content, dict) and content.get("type") == "image"
)
images_to_remove = total_images - images_to_keep
# for better cache behavior, we want to remove in chunks
images_to_remove -= images_to_remove % min_removal_threshold
for tool_result in tool_result_blocks:
if isinstance(tool_result.get("content"), list):
new_content = []
for content in tool_result.get("content", []):
if isinstance(content, dict) and content.get("type") == "image":
if images_to_remove > 0:
images_to_remove -= 1
continue
new_content.append(content)
tool_result["content"] = new_content
if __name__ == "__main__":
pass
# client = Anthropic(api_key="")
# response = client.beta.messages.with_raw_response.create(
# max_tokens=4096,
# model="claude-3-5-sonnet-20241022",
# system=SYSTEM_PROMPT,
# # tools=ToolCollection(
# # ComputerTool(selected_screen=0),
# # BashTool(),
# # EditTool(),
# # ).to_params(),
# betas=["computer-use-2024-10-22"],
# messages=[
# {"role": "user", "content": "click on (199, 199)."}
# ],
# )
# print(f"AnthropicActor response: {response.parse().usage.input_tokens+response.parse().usage.output_tokens}")

View File

@@ -0,0 +1,109 @@
import os
import re
import ast
import base64
def is_image_path(text):
# Checking if the input text ends with typical image file extensions
image_extensions = (".jpg", ".jpeg", ".png", ".gif", ".bmp", ".tiff", ".tif")
if text.endswith(image_extensions):
return True
else:
return False
def encode_image(image_path):
"""Encode image file to base64."""
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode("utf-8")
def is_url_or_filepath(input_string):
# Check if input_string is a URL
url_pattern = re.compile(
r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"
)
if url_pattern.match(input_string):
return "URL"
# Check if input_string is a file path
file_path = os.path.abspath(input_string)
if os.path.exists(file_path):
return "File path"
return "Invalid"
def extract_data(input_string, data_type):
# Regular expression to extract content starting from '```python' until the end if there are no closing backticks
pattern = f"```{data_type}" + r"(.*?)(```|$)"
# Extract content
# re.DOTALL allows '.' to match newlines as well
matches = re.findall(pattern, input_string, re.DOTALL)
# Return the first match if exists, trimming whitespace and ignoring potential closing backticks
return matches[0][0].strip() if matches else input_string
def parse_input(code):
"""Use AST to parse the input string and extract the function name, arguments, and keyword arguments."""
def get_target_names(target):
"""Recursively get all variable names from the assignment target."""
if isinstance(target, ast.Name):
return [target.id]
elif isinstance(target, ast.Tuple):
names = []
for elt in target.elts:
names.extend(get_target_names(elt))
return names
return []
def extract_value(node):
"""提取 AST 节点的实际值"""
if isinstance(node, ast.Constant):
return node.value
elif isinstance(node, ast.Name):
# TODO: a better way to handle variables
raise ValueError(
f"Arguments should be a Constant, got a variable {node.id} instead."
)
# 添加其他需要处理的 AST 节点类型
return None
try:
tree = ast.parse(code)
for node in ast.walk(tree):
if isinstance(node, ast.Assign):
targets = []
for t in node.targets:
targets.extend(get_target_names(t))
if isinstance(node.value, ast.Call):
func_name = node.value.func.id
args = [ast.dump(arg) for arg in node.value.args]
kwargs = {
kw.arg: extract_value(kw.value) for kw in node.value.keywords
}
print(f"Input: {code.strip()}")
print(f"Output Variables: {targets}")
print(f"Function Name: {func_name}")
print(f"Arguments: {args}")
print(f"Keyword Arguments: {kwargs}")
elif isinstance(node, ast.Expr) and isinstance(node.value, ast.Call):
targets = []
func_name = extract_value(node.value.func)
args = [extract_value(arg) for arg in node.value.args]
kwargs = {kw.arg: extract_value(kw.value) for kw in node.value.keywords}
except SyntaxError:
print(f"Input: {code.strip()}")
print("No match found")
return targets, func_name, args, kwargs
if __name__ == "__main__":
import json
s='{"Reasoning": "The Docker icon has been successfully clicked, and the Docker application should now be opening. No further actions are required.", "Next Action": None}'
json_str = json.loads(s)
print(json_str)

View File

@@ -0,0 +1,117 @@
import os
import logging
import base64
import requests
# from computer_use_demo.gui_agent.llm_utils import is_image_path, encode_image
def is_image_path(text):
# Checking if the input text ends with typical image file extensions
image_extensions = (".jpg", ".jpeg", ".png", ".gif", ".bmp", ".tiff", ".tif")
if text.endswith(image_extensions):
return True
else:
return False
def encode_image(image_path):
"""Encode image file to base64."""
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode("utf-8")
# from openai import OpenAI
# client = OpenAI(
# api_key=os.environ.get("OPENAI_API_KEY")
# )
def run_oai_interleaved(messages: list, system: str, llm: str, api_key: str, max_tokens=256, temperature=0):
api_key = api_key or os.environ.get("OPENAI_API_KEY")
if not api_key:
raise ValueError("OPENAI_API_KEY is not set")
headers = {"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}"}
final_messages = [{"role": "system", "content": system}]
# image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
if type(messages) == list:
for item in messages:
contents = []
if isinstance(item, dict):
for cnt in item["content"]:
if isinstance(cnt, str):
if is_image_path(cnt):
base64_image = encode_image(cnt)
content = {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}}
else:
content = {"type": "text", "text": cnt}
else:
# in this case it is a text block from anthropic
content = {"type": "text", "text": str(cnt)}
contents.append(content)
message = {"role": 'user', "content": contents}
else: # str
contents.append({"type": "text", "text": item})
message = {"role": "user", "content": contents}
final_messages.append(message)
elif isinstance(messages, str):
final_messages = [{"role": "user", "content": messages}]
# import pdb; pdb.set_trace()
print("[oai] sending messages:", {"role": "user", "content": messages})
payload = {
"model": llm,
"messages": final_messages,
"max_tokens": max_tokens,
"temperature": temperature,
# "stop": stop,
}
# from IPython.core.debugger import Pdb; Pdb().set_trace()
response = requests.post(
"https://api.openai.com/v1/chat/completions", headers=headers, json=payload
)
try:
text = response.json()['choices'][0]['message']['content']
token_usage = int(response.json()['usage']['total_tokens'])
return text, token_usage
# return error message if the response is not successful
except Exception as e:
print(f"Error in interleaved openAI: {e}. This may due to your invalid OPENAI_API_KEY. Please check the response: {response.json()} ")
return response.json()
if __name__ == "__main__":
api_key = os.environ.get("OPENAI_API_KEY")
if not api_key:
raise ValueError("OPENAI_API_KEY is not set")
text, token_usage = run_oai_interleaved(
messages= [{"content": [
"What is in the screenshot?",
"./tmp/outputs/screenshot_0b04acbb783d4706bc93873d17ba8c05.png"],
"role": "user"
}],
llm="gpt-4o-mini",
system="You are a helpful assistant",
api_key=api_key,
max_tokens=256,
temperature=0)
print(text, token_usage)
# There is an introduction describing the Calyx... 36986

View File

@@ -0,0 +1,107 @@
import os
import logging
import base64
import requests
import dashscope
# from computer_use_demo.gui_agent.llm_utils import is_image_path, encode_image
def is_image_path(text):
return False
def encode_image(image_path):
return ""
def run_qwen(messages: list, system: str, llm: str, api_key: str, max_tokens=256, temperature=0):
api_key = api_key or os.environ.get("QWEN_API_KEY")
if not api_key:
raise ValueError("QWEN_API_KEY is not set")
dashscope.api_key = api_key
# from IPython.core.debugger import Pdb; Pdb().set_trace()
final_messages = [{"role": "system", "content": [{"text": system}]}]
# image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
if type(messages) == list:
for item in messages:
contents = []
if isinstance(item, dict):
for cnt in item["content"]:
if isinstance(cnt, str):
if is_image_path(cnt):
# base64_image = encode_image(cnt)
content = [{"image": cnt}]
# content = {"type": "image_url", "image_url": {"url": image_url}}
else:
content = {"text": cnt}
contents.append(content)
message = {"role": item["role"], "content": contents}
else: # str
contents.append({"text": item})
message = {"role": "user", "content": contents}
final_messages.append(message)
print("[qwen-vl] sending messages:", final_messages)
response = dashscope.MultiModalConversation.call(
model='qwen-vl-max-0809',
messages=final_messages
)
# from IPython.core.debugger import Pdb; Pdb().set_trace()
try:
text = response.output.choices[0].message.content[0]['text']
usage = response.usage
if "total_tokens" not in usage:
token_usage = int(usage["input_tokens"] + usage["output_tokens"])
else:
token_usage = int(usage["total_tokens"])
return text, token_usage
# return response.json()['choices'][0]['message']['content']
# return error message if the response is not successful
except Exception as e:
print(f"Error in interleaved openAI: {e}. This may due to your invalid OPENAI_API_KEY. Please check the response: {response.json()} ")
return response.json()
if __name__ == "__main__":
api_key = os.environ.get("QWEN_API_KEY")
if not api_key:
raise ValueError("QWEN_API_KEY is not set")
dashscope.api_key = api_key
final_messages = [{"role": "user",
"content": [
{"text": "What is in the screenshot?"},
{"image": "./tmp/outputs/screenshot_0b04acbb783d4706bc93873d17ba8c05.png"}
]
}
]
response = dashscope.MultiModalConversation.call(model='qwen-vl-max-0809', messages=final_messages)
print(response)
text = response.output.choices[0].message.content[0]['text']
usage = response.usage
if "total_tokens" not in usage:
if "image_tokens" in usage:
token_usage = usage["input_tokens"] + usage["output_tokens"] + usage["image_tokens"]
else:
token_usage = usage["input_tokens"] + usage["output_tokens"]
else:
token_usage = usage["total_tokens"]
print(text, token_usage)
# The screenshot is from a video game... 1387

View File

@@ -0,0 +1,44 @@
import base64
import logging
from .oai import run_oai_interleaved
from .gemini import run_gemini_interleaved
def run_llm(prompt, llm="gpt-4o-mini", max_tokens=256, temperature=0, stop=None):
log_prompt(prompt)
# turn string prompt into list
if isinstance(prompt, str):
prompt = [prompt]
elif isinstance(prompt, list):
pass
else:
raise ValueError(f"Invalid prompt type: {type(prompt)}")
if llm.startswith("gpt"): # gpt series
out = run_oai_interleaved(
prompt,
llm,
max_tokens,
temperature,
stop
)
elif llm.startswith("gemini"): # gemini series
out = run_gemini_interleaved(
prompt,
llm,
max_tokens,
temperature,
stop
)
else:
raise ValueError(f"Invalid llm: {llm}")
logging.info(
f"========Output for {llm}=======\n{out}\n============================")
return out
def log_prompt(prompt):
prompt_display = [prompt] if isinstance(prompt, str) else prompt
prompt_display = "\n\n".join(prompt_display)
logging.info(
f"========Prompt=======\n{prompt_display}\n============================")

236
demo/loop.py Normal file
View File

@@ -0,0 +1,236 @@
"""
Agentic sampling loop that calls the Anthropic API and local implenmentation of anthropic-defined computer use tools.
"""
import time
import json
import asyncio
import platform
from collections.abc import Callable
from datetime import datetime
from enum import StrEnum
from typing import Any, cast, Dict
from anthropic import Anthropic, AnthropicBedrock, AnthropicVertex, APIResponse
from anthropic.types import (
ToolResultBlockParam,
TextBlock,
)
from anthropic.types.beta import (
BetaContentBlock,
BetaContentBlockParam,
BetaImageBlockParam,
BetaMessage,
BetaMessageParam,
BetaTextBlockParam,
BetaToolResultBlockParam,
)
from tools import BashTool, ComputerTool, EditTool, ToolCollection, ToolResult
import torch
from gui_agent.anthropic_agent import AnthropicActor
from executor.anthropic_executor import AnthropicExecutor
from omniparser_agent.vlm_agent import OmniParser, VLMAgent
from tools.colorful_text import colorful_text_showui, colorful_text_vlm
from tools.screen_capture import get_screenshot
from gui_agent.llm_utils.oai import encode_image
BETA_FLAG = "computer-use-2024-10-22"
class APIProvider(StrEnum):
ANTHROPIC = "anthropic"
BEDROCK = "bedrock"
VERTEX = "vertex"
OPENAI = "openai"
QWEN = "qwen"
PROVIDER_TO_DEFAULT_MODEL_NAME: dict[APIProvider, str] = {
APIProvider.ANTHROPIC: "claude-3-5-sonnet-20241022",
APIProvider.BEDROCK: "anthropic.claude-3-5-sonnet-20241022-v2:0",
APIProvider.VERTEX: "claude-3-5-sonnet-v2@20241022",
# APIProvider.OPENAI: "gpt-4o",
# APIProvider.QWEN: "qwen2vl",
}
# This system prompt is optimized for the Docker environment in this repository and
# specific tool combinations enabled.
# We encourage modifying this system prompt to ensure the model has context for the
# environment it is running in, and to provide any additional information that may be
# helpful for the task at hand.
SYSTEM_PROMPT = f"""<SYSTEM_CAPABILITY>
* You are utilizing a Windows system with internet access.
* The current date is {datetime.today().strftime('%A, %B %d, %Y')}.
</SYSTEM_CAPABILITY>
"""
import base64
from PIL import Image
from io import BytesIO
def sampling_loop_sync(
*,
model: str,
provider: APIProvider | None,
system_prompt_suffix: str,
messages: list[BetaMessageParam],
output_callback: Callable[[BetaContentBlock], None],
tool_output_callback: Callable[[ToolResult, str], None],
api_response_callback: Callable[[APIResponse[BetaMessage]], None],
api_key: str,
only_n_most_recent_images: int | None = 2,
max_tokens: int = 4096,
selected_screen: int = 0
):
"""
Synchronous agentic sampling loop for the assistant/tool interaction of computer use.
"""
print('in sampling_loop_sync, model:', model)
if model == "claude-3-5-sonnet-20241022":
omniparser = OmniParser(url="http://127.0.0.1:8000/send_text/",
selected_screen=selected_screen,)
# Register Actor and Executor
actor = AnthropicActor(
model=model,
provider=provider,
system_prompt_suffix=system_prompt_suffix,
api_key=api_key,
api_response_callback=api_response_callback,
max_tokens=max_tokens,
only_n_most_recent_images=only_n_most_recent_images,
selected_screen=selected_screen
)
# from IPython.core.debugger import Pdb; Pdb().set_trace()
executor = AnthropicExecutor(
output_callback=output_callback,
tool_output_callback=tool_output_callback,
selected_screen=selected_screen
)
elif model == "omniparser + gpt-4o" or model == "omniparser + phi35v":
omniparser = OmniParser(url="http://127.0.0.1:8000/send_text/",
selected_screen=selected_screen,)
actor = VLMAgent(
model=model,
provider=provider,
system_prompt_suffix=system_prompt_suffix,
api_key=api_key,
api_response_callback=api_response_callback,
selected_screen=selected_screen,
output_callback=output_callback,
)
executor = AnthropicExecutor(
output_callback=output_callback,
tool_output_callback=tool_output_callback,
selected_screen=selected_screen
)
# elif model == "gpt-4o + ShowUI" or model == "qwen2vl + ShowUI":
# planner = VLMPlanner(
# model=model,
# provider=provider,
# system_prompt_suffix=system_prompt_suffix,
# api_key=api_key,
# api_response_callback=api_response_callback,
# selected_screen=selected_screen,
# output_callback=output_callback,
# )
# if torch.cuda.is_available(): device = torch.device("cuda")
# elif torch.backends.mps.is_available(): device = torch.device("mps")
# else: device = torch.device("cpu") # support: 'cpu', 'mps', 'cuda'
# print(f"showUI-2B inited on device: {device}.")
# actor = ShowUIActor(
# model_path="./showui-2b/",
# # Replace with your local path, e.g., "C:\\code\\ShowUI-2B", "/Users/your_username/ShowUI-2B/".
# device=device,
# split='web', # 'web' or 'phone'
# selected_screen=selected_screen,
# output_callback=output_callback,
# )
# executor = ShowUIExecutor(
# output_callback=output_callback,
# tool_output_callback=tool_output_callback,
# selected_screen=selected_screen
# )
else:
raise ValueError(f"Model {model} not supported")
print(f"Model Inited: {model}, Provider: {provider}")
tool_result_content = None
print(f"Start the message loop. User messages: {messages}")
if model == "claude-3-5-sonnet-20241022": # Anthropic loop
while True:
parsed_screen = omniparser() # parsed_screen: {"som_image_base64": dino_labled_img, "parsed_content_list": parsed_content_list, "screen_info"}
import pdb; pdb.set_trace()
screen_info_block = TextBlock(text='Below is the structured accessibility information of the current UI screen, which includes text and icons you can operate on, take these information into account when you are making the prediction for the next action. Note you will still need to take screenshot to get the image: \n' + parsed_screen['screen_info'], type='text')
# # messages[-1]['content'].append(screen_info_block)
screen_info_dict = {"role": "user", "content": [screen_info_block]}
messages.append(screen_info_dict)
response = actor(messages=messages)
for message, tool_result_content in executor(response, messages):
yield message
if not tool_result_content:
return messages
messages.append({"content": tool_result_content, "role": "user"})
elif model == "omniparser + gpt-4o" or model == "omniparser + phi35v":
while True:
parsed_screen = omniparser()
response, vlm_response_json = actor(messages=messages, parsed_screen=parsed_screen)
for message, tool_result_content in executor(response, messages):
yield message
if not tool_result_content:
return messages
# import pdb; pdb.set_trace()
# messages.append({"role": "user",
# "content": ["History plan:\n" + str(vlm_response_json['Reasoning'])]})
# messages.append({"content": tool_result_content, "role": "user"})
elif model == "gpt-4o + ShowUI" or model == "qwen2vl + ShowUI": # ShowUI loop
while True:
vlm_response = planner(messages=messages)
next_action = json.loads(vlm_response).get("Next Action")
yield next_action
if next_action == None or next_action == "" or next_action == "None":
final_sc, final_sc_path = get_screenshot(selected_screen=selected_screen)
output_callback(f'No more actions from {colorful_text_vlm}. End of task. Final State:\n<img src="data:image/png;base64,{encode_image(str(final_sc_path))}">',
sender="bot")
yield None
output_callback(f"{colorful_text_vlm} sending action to {colorful_text_showui}:\n{next_action}", sender="bot")
actor_response = actor(messages=next_action)
yield actor_response
for message, tool_result_content in executor(actor_response, messages):
time.sleep(1)
yield message
# since showui executor has no feedback for now, we use "actor_response" to represent its response
# update messages for the next loop
messages.append({"role": "user",
"content": ["History plan:\n" + str(json.loads(vlm_response)) +
"\nHistory actions:\n" + str(actor_response["content"])]})
print(f"End of loop. Messages: {str(messages)[:100000]}. Total cost: $USD{planner.total_cost:.5f}")

View File

@@ -0,0 +1,417 @@
import json
import asyncio
import platform
from collections.abc import Callable
from datetime import datetime
from enum import StrEnum
from typing import Any, cast, Dict, Callable
import uuid
import requests
from PIL import Image, ImageDraw
import base64
from io import BytesIO
from anthropic import Anthropic, AnthropicBedrock, AnthropicVertex, APIResponse
from anthropic.types import TextBlock, ToolResultBlockParam
from anthropic.types.beta import BetaMessage, BetaTextBlock, BetaToolUseBlock, BetaMessageParam, BetaUsage
from tools.screen_capture import get_screenshot
from gui_agent.llm_utils.oai import run_oai_interleaved, encode_image
from gui_agent.llm_utils.qwen import run_qwen
from gui_agent.llm_utils.llm_utils import extract_data
from tools.colorful_text import colorful_text_showui, colorful_text_vlm
SYSTEM_PROMPT = f"""<SYSTEM_CAPABILITY>
* You are utilizing a Windows system with internet access.
* The current date is {datetime.today().strftime('%A, %B %d, %Y')}.
</SYSTEM_CAPABILITY>
"""
class OmniParser:
def __init__(self,
url: str,
selected_screen: int = 0) -> None:
self.url = url
self.selected_screen = selected_screen
def __call__(self,):
screenshot, screenshot_path = get_screenshot(selected_screen=self.selected_screen)
screenshot_path = str(screenshot_path)
image_base64 = encode_image(screenshot_path)
# response = requests.post(self.url, json={"base64_image": image_base64, 'prompt': 'omniparser process'})
# response_json = response.json()
# example response_json: {"som_image_base64": dino_labled_img, "parsed_content_list": parsed_content_list, "latency": 0.1}
# Debug
response_json = {"som_image_base64": image_base64, "parsed_content_list": ['debug1', 'debug2'], "latency": 0.1}
print('omniparser latency:', response_json['latency'])
response_json = self.reformat_messages(response_json)
return response_json
def reformat_messages(self, response_json: dict):
parsed_content_list = response_json["parsed_content_list"]
screen_info = ""
# Debug
# for idx, element in enumerate(parsed_content_list):
# element['idx'] = idx
# if element['type'] == 'text':
# # screen_info += f'''<p id={idx} class="text" alt="{element['content']}"> </p>\n'''
# screen_info += f'ID: {idx}, Text: {element["content"]}\n'
# elif element['type'] == 'icon':
# # screen_info += f'''<img id={idx} class="icon" alt="{element['content']}"> </img>\n'''
# screen_info += f'ID: {idx}, Icon: {element["content"]}\n'
response_json['screen_info'] = screen_info
return response_json
class VLMAgent:
def __init__(
self,
model: str,
provider: str,
system_prompt_suffix: str,
api_key: str,
output_callback: Callable,
api_response_callback: Callable,
max_tokens: int = 4096,
only_n_most_recent_images: int | None = None,
selected_screen: int = 0,
print_usage: bool = True,
):
if model == "gpt-4o + ShowUI":
self.model = "gpt-4o-2024-11-20"
elif model == "gpt-4o-mini + ShowUI":
self.model = "gpt-4o-mini" # "gpt-4o-mini"
elif model == "qwen2vl + ShowUI":
self.model = "qwen2vl"
elif model == "omniparser + gpt-4o":
self.model = "gpt-4o-2024-11-20"
else:
raise ValueError(f"Model {model} not supported")
self.provider = provider
self.system_prompt_suffix = system_prompt_suffix
self.api_key = api_key
self.api_response_callback = api_response_callback
self.max_tokens = max_tokens
self.only_n_most_recent_images = only_n_most_recent_images
self.selected_screen = selected_screen
self.output_callback = output_callback
self.print_usage = print_usage
self.total_token_usage = 0
self.total_cost = 0
self.system = (
# f"{SYSTEM_PROMPT}{' ' + system_prompt_suffix if system_prompt_suffix else ''}"
f"{system_prompt_suffix}"
)
def __call__(self, messages: list, parsed_screen: list[str, list]):
# example parsed_screen: {"som_image_base64": dino_labled_img, "parsed_content_list": parsed_content_list, "screen_info"}
screen_info = parsed_screen["screen_info"]
# drop looping actions msg, byte image etc
planner_messages = messages
# planner_messages = _message_filter_callback(messages)
print(f"filtered_messages: {planner_messages}\n\n", "full messages:", messages)
# import pdb; pdb.set_trace()
planner_messages = _keep_latest_images(planner_messages)
# if self.only_n_most_recent_images:
# _maybe_filter_to_n_most_recent_images(planner_messages, self.only_n_most_recent_images)
system = self._get_system_prompt(screen_info) + self.system_prompt_suffix
# Take a screenshot
screenshot, screenshot_path = get_screenshot(selected_screen=self.selected_screen)
screen_width, screen_height = screenshot.size
screenshot_path = str(screenshot_path)
image_base64 = encode_image(screenshot_path)
som_image_data = base64.b64decode(parsed_screen['som_image_base64'])
som_screenshot_path = f"./tmp/outputs/screenshot_som_{uuid.uuid4().hex}.png"
with open(som_screenshot_path, "wb") as f:
f.write(som_image_data)
self.output_callback(f'Screenshot for {colorful_text_vlm}:\n<img src="data:image/png;base64,{image_base64}">',
sender="bot")
self.output_callback(f'Set of Marks Screenshot for {colorful_text_vlm}:\n<img src="data:image/png;base64,{parsed_screen['som_image_base64']}">', sender="bot")
if isinstance(planner_messages[-1], dict):
if not isinstance(planner_messages[-1]["content"], list):
planner_messages[-1]["content"] = [planner_messages[-1]["content"]]
planner_messages[-1]["content"].append(screenshot_path)
planner_messages[-1]["content"].append(som_screenshot_path)
print(f"Sending messages to VLMPlanner : {planner_messages}")
if "gpt" in self.model:
vlm_response, token_usage = run_oai_interleaved(
messages=planner_messages,
system=system,
llm=self.model,
api_key=self.api_key,
max_tokens=self.max_tokens,
temperature=0,
)
print(f"oai token usage: {token_usage}")
self.total_token_usage += token_usage
self.total_cost += (token_usage * 0.15 / 1000000) # https://openai.com/api/pricing/
elif "qwen" in self.model:
vlm_response, token_usage = run_qwen(
messages=planner_messages,
system=system,
llm=self.model,
api_key=self.api_key,
max_tokens=self.max_tokens,
temperature=0,
)
print(f"qwen token usage: {token_usage}")
self.total_token_usage += token_usage
self.total_cost += (token_usage * 0.02 / 7.25 / 1000) # 1USD=7.25CNY, https://help.aliyun.com/zh/dashscope/developer-reference/tongyi-qianwen-vl-plus-api
elif "phi" in self.model:
pass # TODO
else:
raise ValueError(f"Model {self.model} not supported")
print(f"VLMPlanner response: {vlm_response}")
if self.print_usage:
print(f"VLMPlanner total token usage so far: {self.total_token_usage}. Total cost so far: $USD{self.total_cost:.5f}")
vlm_response_json = extract_data(vlm_response, "json")
vlm_response_json = json.loads(vlm_response_json)
# map "box_id" to "idx" in parsed_screen, and output the xy coordinate of bbox
# TODO add try except for the case when "box_id" is not in the response
# if 'Box ID' in vlm_response_json:
try:
bbox = parsed_screen["parsed_content_list"][int(vlm_response_json["Box ID"])]["bbox"]
vlm_response_json["coordinate"] = [int((bbox[0] + bbox[2]) / 2 * screen_width), int((bbox[1] + bbox[3]) / 2 * screen_height)]
# draw a circle on the screenshot image to indicate the action
self.draw_action(vlm_response_json, image_base64)
except:
print("No Box ID in the response.")
# vlm_plan_str = '\n'.join([f'{key}: {value}' for key, value in json.loads(response).items()])
vlm_plan_str = ""
for key, value in vlm_response_json.items():
if key == "Reasoning":
vlm_plan_str += f'{value}'
else:
vlm_plan_str += f'\n{key}: {value}'
# self.output_callback(f"{colorful_text_vlm}:\n{vlm_plan_str}", sender="bot")
# construct the response so that anthropicExcutor can execute the tool
analysis = BetaTextBlock(text=vlm_plan_str, type='text')
if 'coordinate' in vlm_response_json:
move_cursor_block = BetaToolUseBlock(id=f'toolu_{uuid.uuid4()}',
input={'action': 'mouse_move', 'coordinate': vlm_response_json["coordinate"]},
name='computer', type='tool_use')
response_content = [analysis, move_cursor_block]
else:
response_content = [analysis]
if vlm_response_json["Next Action"] == "type":
click_block = BetaToolUseBlock(id=f'toolu_{uuid.uuid4()}', input={'action': 'left_click'}, name='computer', type='tool_use')
sim_content_block = BetaToolUseBlock(id=f'toolu_{uuid.uuid4()}',
input={'action': vlm_response_json["Next Action"], 'text': vlm_response_json["value"]},
name='computer', type='tool_use')
response_content.extend([click_block, sim_content_block])
elif vlm_response_json["Next Action"] == "None":
print("Task paused/completed.")
else:
sim_content_block = BetaToolUseBlock(id=f'toolu_{uuid.uuid4()}',
input={'action': vlm_response_json["Next Action"]},
name='computer', type='tool_use')
response_content.append(sim_content_block)
response = BetaMessage(id=f'toolu_{uuid.uuid4()}', content=response_content, model='', role='assistant', type='message', stop_reason='tool_use', usage=BetaUsage(input_tokens=0, output_tokens=0))
return response, vlm_response_json
def _api_response_callback(self, response: APIResponse):
self.api_response_callback(response)
def reformat_messages(self, messages: list):
pass
def _get_system_prompt(self, screen_info: str = ""):
datetime_str = datetime.now().strftime("%A, %B %d, %Y")
os_name = platform.system()
return f"""
You are using an {os_name} device.
You are able to use a mouse and keyboard to interact with the computer based on the given task and screenshot.
You can only interact with the desktop GUI (no terminal or application menu access).
You may be given some history plan and actions, this is the response from the previous loop.
You should carefully consider your plan base on the task, screenshot, and history actions.
Here is the list of all detected bounding boxes by IDs on the screen and their description:{screen_info}
Your available "Next Action" only include:
- type: type a string of text.
- left_click: Describe the ui element to be clicked.
- enter: Press an enter key.
- escape: Press an ESCAPE key.
- hover: Describe the ui element to be hovered.
- scroll: Scroll the screen, you must specify up or down.
- press: Describe the ui element to be pressed.
Based on the visual information from the screenshot image and the detected bounding boxes, please determine the next action, the Box ID you should operate on, and the value (if the action is 'type') in order to complete the task.
Output format:
```json
{{
"Reasoning": str, # describe what is in the current screen, taking into account the history, then describe your step-by-step thoughts on how to achieve the task, choose one action from available actions at a time.
"Next Action": "action_type, action description" | "None" # one action at a time, describe it in short and precisely.
'Box ID': n,
'value': "xxx" # if the action is type, you should provide the text to type.
}}
```
One Example:
```json
{{
"Reasoning": "The current screen shows google result of amazon, in previous action I have searched amazon on google. Then I need to click on the first search results to go to amazon.com.",
"Next Action": "left_click",
'Box ID': m,
}}
```
Another Example:
```json
{{
"Reasoning": "The current screen shows the front page of amazon. There is no previous action. Therefore I need to type "Apple watch" in the search bar.",
"Next Action": "type",
'Box ID': n,
'value': "Apple watch"
}}
```
IMPORTANT NOTES:
1. You should only give a single action at a time.
2. You should give an analysis to the current screen, and reflect on what has been done by looking at the history, then describe your step-by-step thoughts on how to achieve the task.
3. Attach the next action prediction in the "Next Action".
4. You should not include other actions, such as keyboard shortcuts.
5. When the task is completed, you should say "Next Action": "None" in the json field.
"""
def draw_action(self, vlm_response_json, image_base64):
# draw a circle using the coordinate in parsed_screen['som_image_base64']
image_data = base64.b64decode(image_base64)
image = Image.open(BytesIO(image_data))
draw = ImageDraw.Draw(image)
x, y = vlm_response_json["coordinate"]
radius = 10
draw.ellipse((x - radius, y - radius, x + radius, y + radius), outline='red', width=3)
buffered = BytesIO()
image.save('demo.png')
image.save(buffered, format="PNG")
image_with_circle_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
self.output_callback(f'Action performed on the Screenshot (red circle), for {colorful_text_vlm}:\n<img src="data:image/png;base64,{image_with_circle_base64}">', sender="bot")
def _keep_latest_images(messages):
for i in range(len(messages)-1):
if isinstance(messages[i]["content"], list):
for cnt in messages[i]["content"]:
if isinstance(cnt, str):
if cnt.endswith((".jpg", ".jpeg", ".png", ".gif", ".bmp", ".tiff", ".tif")):
messages[i]["content"].remove(cnt)
return messages
def _maybe_filter_to_n_most_recent_images(
messages: list[BetaMessageParam],
images_to_keep: int,
min_removal_threshold: int = 10,
):
"""
With the assumption that images are screenshots that are of diminishing value as
the conversation progresses, remove all but the final `images_to_keep` tool_result
images in place, with a chunk of min_removal_threshold to reduce the amount we
break the implicit prompt cache.
"""
if images_to_keep is None:
return messages
tool_result_blocks = cast(
list[ToolResultBlockParam],
[
item
for message in messages
for item in (
message["content"] if isinstance(message["content"], list) else []
)
if isinstance(item, dict) and item.get("type") == "tool_result"
],
)
total_images = sum(
1
for tool_result in tool_result_blocks
for content in tool_result.get("content", [])
if isinstance(content, dict) and content.get("type") == "image"
)
images_to_remove = total_images - images_to_keep
# for better cache behavior, we want to remove in chunks
images_to_remove -= images_to_remove % min_removal_threshold
for tool_result in tool_result_blocks:
if isinstance(tool_result.get("content"), list):
new_content = []
for content in tool_result.get("content", []):
if isinstance(content, dict) and content.get("type") == "image":
if images_to_remove > 0:
images_to_remove -= 1
continue
new_content.append(content)
tool_result["content"] = new_content
def _message_filter_callback(messages):
filtered_list = []
try:
for msg in messages:
if msg.get('role') in ['user']:
if not isinstance(msg["content"], list):
msg["content"] = [msg["content"]]
if isinstance(msg["content"][0], TextBlock):
filtered_list.append(str(msg["content"][0].text)) # User message
elif isinstance(msg["content"][0], str):
filtered_list.append(msg["content"][0]) # User message
else:
print("[_message_filter_callback]: drop message", msg)
continue
# elif msg.get('role') in ['assistant']:
# if isinstance(msg["content"][0], TextBlock):
# msg["content"][0] = str(msg["content"][0].text)
# elif isinstance(msg["content"][0], BetaTextBlock):
# msg["content"][0] = str(msg["content"][0].text)
# elif isinstance(msg["content"][0], BetaToolUseBlock):
# msg["content"][0] = str(msg['content'][0].input)
# elif isinstance(msg["content"][0], Dict) and msg["content"][0]["content"][-1]["type"] == "image":
# msg["content"][0] = f'<img src="data:image/png;base64,{msg["content"][0]["content"][-1]["source"]["data"]}">'
# else:
# print("[_message_filter_callback]: drop message", msg)
# continue
# filtered_list.append(msg["content"][0]) # User message
else:
print("[_message_filter_callback]: drop message", msg)
continue
except Exception as e:
print("[_message_filter_callback]: error", e)
return filtered_list

View File

@@ -1,78 +1,78 @@
# uvicorn remote_request:app --host 0.0.0.0 --port 8000 --reload # uvicorn remote_request:app --host 0.0.0.0 --port 8000 --reload
import sys import sys
import os import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utils import get_som_labeled_img, check_ocr_box, get_caption_model_processor, get_yolo_model from utils import get_som_labeled_img, check_ocr_box, get_caption_model_processor, get_yolo_model
import torch import torch
from PIL import Image from PIL import Image
from typing import Dict, Tuple, List from typing import Dict, Tuple, List
import base64 import base64
config = { config = {
'som_model_path': '../weights/icon_detect_v1_5/model_v1_5.pt', 'som_model_path': '../weights/icon_detect_v1_5/model_v1_5.pt',
'device': 'cpu', 'device': 'cpu',
'caption_model_name': 'florence2', 'caption_model_name': 'florence2',
'caption_model_path': '../weights/icon_caption_florence', 'caption_model_path': '../weights/icon_caption_florence',
'BOX_TRESHOLD': 0.05 'BOX_TRESHOLD': 0.05
} }
class Omniparser(object): class Omniparser(object):
def __init__(self, config: Dict): def __init__(self, config: Dict):
self.config = config self.config = config
device = 'cuda' if torch.cuda.is_available() else 'cpu' device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.som_model = get_yolo_model(model_path=config['som_model_path']) self.som_model = get_yolo_model(model_path=config['som_model_path'])
self.caption_model_processor = get_caption_model_processor(model_name=config['caption_model_name'], model_name_or_path=config['caption_model_path'], device=device) self.caption_model_processor = get_caption_model_processor(model_name=config['caption_model_name'], model_name_or_path=config['caption_model_path'], device=device)
print('Omniparser initialized!!!') print('Omniparser initialized!!!')
def parse(self, image_base64: str): def parse(self, image_base64: str):
image_path = '../imgs/demo_image.jpg' image_path = '../imgs/demo_image.jpg'
with open(image_path, "wb") as fh: with open(image_path, "wb") as fh:
fh.write(base64.b64decode(image_base64)) fh.write(base64.b64decode(image_base64))
print('Parsing image:', image_path) print('Parsing image:', image_path)
image = Image.open(image_path) image = Image.open(image_path)
print('image size:', image.size) print('image size:', image.size)
box_overlay_ratio = max(image.size) / 3200 box_overlay_ratio = max(image.size) / 3200
draw_bbox_config = { draw_bbox_config = {
'text_scale': 0.8 * box_overlay_ratio, 'text_scale': 0.8 * box_overlay_ratio,
'text_thickness': max(int(2 * box_overlay_ratio), 1), 'text_thickness': max(int(2 * box_overlay_ratio), 1),
'text_padding': max(int(3 * box_overlay_ratio), 1), 'text_padding': max(int(3 * box_overlay_ratio), 1),
'thickness': max(int(3 * box_overlay_ratio), 1), 'thickness': max(int(3 * box_overlay_ratio), 1),
} }
BOX_TRESHOLD = config['BOX_TRESHOLD'] BOX_TRESHOLD = config['BOX_TRESHOLD']
ocr_bbox_rslt, is_goal_filtered = check_ocr_box(image_path, display_img = False, output_bb_format='xyxy', goal_filtering=None, easyocr_args={'paragraph': False, 'text_threshold':0.8}, use_paddleocr=True) ocr_bbox_rslt, is_goal_filtered = check_ocr_box(image_path, display_img = False, output_bb_format='xyxy', goal_filtering=None, easyocr_args={'paragraph': False, 'text_threshold':0.8}, use_paddleocr=True)
text, ocr_bbox = ocr_bbox_rslt text, ocr_bbox = ocr_bbox_rslt
dino_labled_img, label_coordinates, parsed_content_list = get_som_labeled_img(image_path, self.som_model, BOX_TRESHOLD = BOX_TRESHOLD, output_coord_in_ratio=True, ocr_bbox=ocr_bbox,draw_bbox_config=draw_bbox_config, caption_model_processor=self.caption_model_processor, ocr_text=text,use_local_semantics=True, iou_threshold=0.7, scale_img=False, batch_size=128) dino_labled_img, label_coordinates, parsed_content_list = get_som_labeled_img(image_path, self.som_model, BOX_TRESHOLD = BOX_TRESHOLD, output_coord_in_ratio=True, ocr_bbox=ocr_bbox,draw_bbox_config=draw_bbox_config, caption_model_processor=self.caption_model_processor, ocr_text=text,use_local_semantics=True, iou_threshold=0.7, scale_img=False, batch_size=128)
with open('../imgs/demo_image_som.jpg', "wb") as fh: with open('../imgs/demo_image_som.jpg', "wb") as fh:
fh.write(base64.b64decode(dino_labled_img)) fh.write(base64.b64decode(dino_labled_img))
return dino_labled_img, parsed_content_list return dino_labled_img, parsed_content_list
from fastapi import FastAPI from fastapi import FastAPI
from pydantic import BaseModel from pydantic import BaseModel
app = FastAPI() app = FastAPI()
class Item(BaseModel): class Item(BaseModel):
base64_image: str base64_image: str
prompt: str prompt: str
Omniparser = Omniparser(config) Omniparser = Omniparser(config)
@app.post("/send_text/") @app.post("/send_text/")
async def send_text(item: Item): async def send_text(item: Item):
print('start parsing...') print('start parsing...')
import time import time
start = time.time() start = time.time()
dino_labled_img, parsed_content_list = Omniparser.parse(item.base64_image) dino_labled_img, parsed_content_list = Omniparser.parse(item.base64_image)
latency = time.time() - start latency = time.time() - start
print('time:', latency) print('time:', latency)
return {"som_image_base64": dino_labled_img, "parsed_content_list": parsed_content_list, 'latency': latency} return {"som_image_base64": dino_labled_img, "parsed_content_list": parsed_content_list, 'latency': latency}

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.4 MiB

16
demo/tools/__init__.py Normal file
View File

@@ -0,0 +1,16 @@
from .base import CLIResult, ToolResult
from .bash import BashTool
from .collection import ToolCollection
from .computer import ComputerTool
from .edit import EditTool
from .screen_capture import get_screenshot
__ALL__ = [
BashTool,
CLIResult,
ComputerTool,
EditTool,
ToolCollection,
ToolResult,
get_screenshot,
]

69
demo/tools/base.py Normal file
View File

@@ -0,0 +1,69 @@
from abc import ABCMeta, abstractmethod
from dataclasses import dataclass, fields, replace
from typing import Any
from anthropic.types.beta import BetaToolUnionParam
class BaseAnthropicTool(metaclass=ABCMeta):
"""Abstract base class for Anthropic-defined tools."""
@abstractmethod
def __call__(self, **kwargs) -> Any:
"""Executes the tool with the given arguments."""
...
@abstractmethod
def to_params(
self,
) -> BetaToolUnionParam:
raise NotImplementedError
@dataclass(kw_only=True, frozen=True)
class ToolResult:
"""Represents the result of a tool execution."""
output: str | None = None
error: str | None = None
base64_image: str | None = None
system: str | None = None
def __bool__(self):
return any(getattr(self, field.name) for field in fields(self))
def __add__(self, other: "ToolResult"):
def combine_fields(
field: str | None, other_field: str | None, concatenate: bool = True
):
if field and other_field:
if concatenate:
return field + other_field
raise ValueError("Cannot combine tool results")
return field or other_field
return ToolResult(
output=combine_fields(self.output, other.output),
error=combine_fields(self.error, other.error),
base64_image=combine_fields(self.base64_image, other.base64_image, False),
system=combine_fields(self.system, other.system),
)
def replace(self, **kwargs):
"""Returns a new ToolResult with the given fields replaced."""
return replace(self, **kwargs)
class CLIResult(ToolResult):
"""A ToolResult that can be rendered as a CLI output."""
class ToolFailure(ToolResult):
"""A ToolResult that represents a failure."""
class ToolError(Exception):
"""Raised when a tool encounters an error."""
def __init__(self, message):
self.message = message

136
demo/tools/bash.py Normal file
View File

@@ -0,0 +1,136 @@
import asyncio
import os
from typing import ClassVar, Literal
from anthropic.types.beta import BetaToolBash20241022Param
from .base import BaseAnthropicTool, CLIResult, ToolError, ToolResult
class _BashSession:
"""A session of a bash shell."""
_started: bool
_process: asyncio.subprocess.Process
command: str = "/bin/bash"
_output_delay: float = 0.2 # seconds
_timeout: float = 120.0 # seconds
_sentinel: str = "<<exit>>"
def __init__(self):
self._started = False
self._timed_out = False
async def start(self):
if self._started:
return
self._process = await asyncio.create_subprocess_shell(
self.command,
shell=False,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
self._started = True
def stop(self):
"""Terminate the bash shell."""
if not self._started:
raise ToolError("Session has not started.")
if self._process.returncode is not None:
return
self._process.terminate()
async def run(self, command: str):
"""Execute a command in the bash shell."""
if not self._started:
raise ToolError("Session has not started.")
if self._process.returncode is not None:
return ToolResult(
system="tool must be restarted",
error=f"bash has exited with returncode {self._process.returncode}",
)
if self._timed_out:
raise ToolError(
f"timed out: bash has not returned in {self._timeout} seconds and must be restarted",
)
# we know these are not None because we created the process with PIPEs
assert self._process.stdin
assert self._process.stdout
assert self._process.stderr
# send command to the process
self._process.stdin.write(
command.encode() + f"; echo '{self._sentinel}'\n".encode()
)
await self._process.stdin.drain()
# read output from the process, until the sentinel is found
output = ""
try:
async with asyncio.timeout(self._timeout):
while True:
await asyncio.sleep(self._output_delay)
data = await self._process.stdout.readline()
if not data:
break
line = data.decode()
output += line
if self._sentinel in line:
output = output.replace(self._sentinel, "")
break
except asyncio.TimeoutError:
self._timed_out = True
raise ToolError(
f"timed out: bash has not returned in {self._timeout} seconds and must be restarted",
) from None
error = await self._process.stderr.read()
error = error.decode()
return CLIResult(output=output.strip(), error=error.strip())
class BashTool(BaseAnthropicTool):
"""
A tool that allows the agent to run bash commands.
The tool parameters are defined by Anthropic and are not editable.
"""
_session: _BashSession | None
name: ClassVar[Literal["bash"]] = "bash"
api_type: ClassVar[Literal["bash_20241022"]] = "bash_20241022"
def __init__(self):
self._session = None
super().__init__()
async def __call__(
self, command: str | None = None, restart: bool = False, **kwargs
):
if restart:
if self._session:
self._session.stop()
self._session = _BashSession()
await self._session.start()
return ToolResult(system="tool has been restarted.")
if self._session is None:
self._session = _BashSession()
await self._session.start()
if command is not None:
return await self._session.run(command)
raise ToolError("no command provided.")
def to_params(self) -> BetaToolBash20241022Param:
return {
"type": self.api_type,
"name": self.name,
}

34
demo/tools/collection.py Normal file
View File

@@ -0,0 +1,34 @@
"""Collection classes for managing multiple tools."""
from typing import Any
from anthropic.types.beta import BetaToolUnionParam
from .base import (
BaseAnthropicTool,
ToolError,
ToolFailure,
ToolResult,
)
class ToolCollection:
"""A collection of anthropic-defined tools."""
def __init__(self, *tools: BaseAnthropicTool):
self.tools = tools
self.tool_map = {tool.to_params()["name"]: tool for tool in tools}
def to_params(
self,
) -> list[BetaToolUnionParam]:
return [tool.to_params() for tool in self.tools]
async def run(self, *, name: str, tool_input: dict[str, Any]) -> ToolResult:
tool = self.tool_map.get(name)
if not tool:
return ToolFailure(error=f"Tool {name} is invalid")
try:
return await tool(**tool_input)
except ToolError as e:
return ToolFailure(error=e.message)

View File

@@ -0,0 +1,27 @@
"""
Define some colorful stuffs for better visualization in the chat.
"""
# Define the RGB colors for each letter
colors = {
'S': 'rgb(106, 158, 210)',
'h': 'rgb(111, 163, 82)',
'o': 'rgb(209, 100, 94)',
'w': 'rgb(238, 171, 106)',
'U': 'rgb(0, 0, 0)',
'I': 'rgb(0, 0, 0)',
}
# Construct the colorful "ShowUI" word
colorful_text_showui = "**"+''.join(
f'<span style="color:{colors.get(letter, "black")}">{letter}</span>'
for letter in "ShowUI"
)+"**"
colorful_text_vlm = "**OmniParser Agent**"
colorful_text_user = "**User**"
# print(f"colorful_text_showui: {colorful_text_showui}")
# **<span style="color:rgb(106, 158, 210)">S</span><span style="color:rgb(111, 163, 82)">h</span><span style="color:rgb(209, 100, 94)">o</span><span style="color:rgb(238, 171, 106)">w</span><span style="color:rgb(0, 0, 0)">U</span><span style="color:rgb(0, 0, 0)">I</span>**

519
demo/tools/computer.py Normal file
View File

@@ -0,0 +1,519 @@
import subprocess
import platform
import pyautogui
import asyncio
import base64
import os
import time
if platform.system() == "Darwin":
import Quartz # uncomment this line if you are on macOS
from enum import StrEnum
from pathlib import Path
from typing import Literal, TypedDict
from uuid import uuid4
from screeninfo import get_monitors
from PIL import ImageGrab, Image
from functools import partial
from anthropic.types.beta import BetaToolComputerUse20241022Param
from .base import BaseAnthropicTool, ToolError, ToolResult
from .run import run
OUTPUT_DIR = "./tmp/outputs"
TYPING_DELAY_MS = 12
TYPING_GROUP_SIZE = 50
Action = Literal[
"key",
"type",
"mouse_move",
"left_click",
"left_click_drag",
"right_click",
"middle_click",
"double_click",
"screenshot",
"cursor_position",
]
class Resolution(TypedDict):
width: int
height: int
MAX_SCALING_TARGETS: dict[str, Resolution] = {
"XGA": Resolution(width=1024, height=768), # 4:3
"WXGA": Resolution(width=1280, height=800), # 16:10
"FWXGA": Resolution(width=1366, height=768), # ~16:9
}
class ScalingSource(StrEnum):
COMPUTER = "computer"
API = "api"
class ComputerToolOptions(TypedDict):
display_height_px: int
display_width_px: int
display_number: int | None
def chunks(s: str, chunk_size: int) -> list[str]:
return [s[i : i + chunk_size] for i in range(0, len(s), chunk_size)]
def get_screen_details():
screens = get_monitors()
screen_details = []
# Sort screens by x position to arrange from left to right
sorted_screens = sorted(screens, key=lambda s: s.x)
# Loop through sorted screens and assign positions
primary_index = 0
for i, screen in enumerate(sorted_screens):
if i == 0:
layout = "Left"
elif i == len(sorted_screens) - 1:
layout = "Right"
else:
layout = "Center"
if screen.is_primary:
position = "Primary"
primary_index = i
else:
position = "Secondary"
screen_info = f"Screen {i + 1}: {screen.width}x{screen.height}, {layout}, {position}"
screen_details.append(screen_info)
return screen_details, primary_index
class ComputerTool(BaseAnthropicTool):
"""
A tool that allows the agent to interact with the screen, keyboard, and mouse of the current computer.
Adapted for Windows using 'pyautogui'.
"""
name: Literal["computer"] = "computer"
api_type: Literal["computer_20241022"] = "computer_20241022"
width: int
height: int
display_num: int | None
_screenshot_delay = 2.0
_scaling_enabled = True
@property
def options(self) -> ComputerToolOptions:
width, height = self.scale_coordinates(
ScalingSource.COMPUTER, self.width, self.height
)
return {
"display_width_px": width,
"display_height_px": height,
"display_number": self.display_num,
}
def to_params(self) -> BetaToolComputerUse20241022Param:
return {"name": self.name, "type": self.api_type, **self.options}
def __init__(self, selected_screen: int = 0, is_scaling: bool = False):
super().__init__()
# Get screen width and height using Windows command
self.display_num = None
self.offset_x = 0
self.offset_y = 0
self.selected_screen = selected_screen
self.is_scaling = is_scaling
self.width, self.height = self.get_screen_size()
# Path to cliclick
self.cliclick = "cliclick"
self.key_conversion = {"Page_Down": "pagedown",
"Page_Up": "pageup",
"Super_L": "win",
"Escape": "esc"}
system = platform.system() # Detect platform
if system == "Windows":
screens = get_monitors()
sorted_screens = sorted(screens, key=lambda s: s.x)
if self.selected_screen < 0 or self.selected_screen >= len(screens):
raise IndexError("Invalid screen index.")
screen = sorted_screens[self.selected_screen]
bbox = (screen.x, screen.y, screen.x + screen.width, screen.y + screen.height)
elif system == "Darwin": # macOS
max_displays = 32 # Maximum number of displays to handle
active_displays = Quartz.CGGetActiveDisplayList(max_displays, None, None)[1]
screens = []
for display_id in active_displays:
bounds = Quartz.CGDisplayBounds(display_id)
screens.append({
'id': display_id, 'x': int(bounds.origin.x), 'y': int(bounds.origin.y),
'width': int(bounds.size.width), 'height': int(bounds.size.height),
'is_primary': Quartz.CGDisplayIsMain(display_id) # Check if this is the primary display
})
sorted_screens = sorted(screens, key=lambda s: s['x'])
if self.selected_screen < 0 or self.selected_screen >= len(screens):
raise IndexError("Invalid screen index.")
screen = sorted_screens[self.selected_screen]
bbox = (screen['x'], screen['y'], screen['x'] + screen['width'], screen['y'] + screen['height'])
else: # Linux or other OS
cmd = "xrandr | grep ' primary' | awk '{print $4}'"
try:
# output = subprocess.check_output(cmd, shell=True).decode()
# resolution = output.strip().split()[0]
# width, height = map(int, resolution.split('x'))
# bbox = (0, 0, width, height) # Assuming single primary screen for simplicity
screen = get_monitors()[0]
bbox = (screen.x, screen.y, screen.x + screen.width, screen.y + screen.height)
except subprocess.CalledProcessError:
raise RuntimeError("Failed to get screen resolution on Linux.")
self.offset_x = screen['x'] if system == "Darwin" else screen.x
self.offset_y = screen['y'] if system == "Darwin" else screen.y
self.bbox = bbox
async def __call__(
self,
*,
action: Action,
text: str | None = None,
coordinate: tuple[int, int] | None = None,
**kwargs,
):
print(f"action: {action}, text: {text}, coordinate: {coordinate}, is_scaling: {self.is_scaling}")
if action in ("mouse_move", "left_click_drag"):
if coordinate is None:
raise ToolError(f"coordinate is required for {action}")
if text is not None:
raise ToolError(f"text is not accepted for {action}")
if not isinstance(coordinate, (list, tuple)) or len(coordinate) != 2:
raise ToolError(f"{coordinate} must be a tuple of length 2")
# if not all(isinstance(i, int) and i >= 0 for i in coordinate):
if not all(isinstance(i, int) for i in coordinate):
raise ToolError(f"{coordinate} must be a tuple of non-negative ints")
if self.is_scaling:
x, y = self.scale_coordinates(
ScalingSource.API, coordinate[0], coordinate[1]
)
else:
x, y = coordinate
# print(f"scaled_coordinates: {x}, {y}")
# print(f"offset: {self.offset_x}, {self.offset_y}")
# x += self.offset_x # TODO - check if this is needed
# y += self.offset_y
print(f"mouse move to {x}, {y}")
if action == "mouse_move":
pyautogui.moveTo(x, y)
return ToolResult(output=f"Moved mouse to ({x}, {y})")
elif action == "left_click_drag":
current_x, current_y = pyautogui.position()
pyautogui.dragTo(x, y, duration=0.5) # Adjust duration as needed
return ToolResult(output=f"Dragged mouse from ({current_x}, {current_y}) to ({x}, {y})")
if action in ("key", "type"):
if text is None:
raise ToolError(f"text is required for {action}")
if coordinate is not None:
raise ToolError(f"coordinate is not accepted for {action}")
if not isinstance(text, str):
raise ToolError(output=f"{text} must be a string")
if action == "key":
# Handle key combinations
keys = text.split('+')
for key in keys:
key = self.key_conversion.get(key.strip(), key.strip())
key = key.lower()
pyautogui.keyDown(key) # Press down each key
for key in reversed(keys):
key = self.key_conversion.get(key.strip(), key.strip())
key = key.lower()
pyautogui.keyUp(key) # Release each key in reverse order
return ToolResult(output=f"Pressed keys: {text}")
elif action == "type":
pyautogui.typewrite(text, interval=TYPING_DELAY_MS / 1000) # Convert ms to seconds
pyautogui.press('enter')
screenshot_base64 = (await self.screenshot()).base64_image
return ToolResult(output=text, base64_image=screenshot_base64)
if action in (
"left_click",
"right_click",
"double_click",
"middle_click",
"screenshot",
"cursor_position",
"left_press",
):
if text is not None:
raise ToolError(f"text is not accepted for {action}")
if coordinate is not None:
raise ToolError(f"coordinate is not accepted for {action}")
if action == "screenshot":
return await self.screenshot()
elif action == "cursor_position":
x, y = pyautogui.position()
x, y = self.scale_coordinates(ScalingSource.COMPUTER, x, y)
return ToolResult(output=f"X={x},Y={y}")
else:
if action == "left_click":
pyautogui.click()
elif action == "right_click":
pyautogui.rightClick()
elif action == "middle_click":
pyautogui.middleClick()
elif action == "double_click":
pyautogui.doubleClick()
elif action == "left_press":
pyautogui.mouseDown()
time.sleep(1)
pyautogui.mouseUp()
return ToolResult(output=f"Performed {action}")
raise ToolError(f"Invalid action: {action}")
async def screenshot(self):
import time
time.sleep(1)
"""Take a screenshot of the current screen and return a ToolResult with the base64 encoded image."""
output_dir = Path(OUTPUT_DIR)
output_dir.mkdir(parents=True, exist_ok=True)
path = output_dir / f"screenshot_{uuid4().hex}.png"
ImageGrab.grab = partial(ImageGrab.grab, all_screens=True)
# Detect platform
system = platform.system()
if system == "Windows":
# Windows: Use screeninfo to get monitor details
screens = get_monitors()
# Sort screens by x position to arrange from left to right
sorted_screens = sorted(screens, key=lambda s: s.x)
if self.selected_screen < 0 or self.selected_screen >= len(screens):
raise IndexError("Invalid screen index.")
screen = sorted_screens[self.selected_screen]
bbox = (screen.x, screen.y, screen.x + screen.width, screen.y + screen.height)
elif system == "Darwin": # macOS
# macOS: Use Quartz to get monitor details
max_displays = 32 # Maximum number of displays to handle
active_displays = Quartz.CGGetActiveDisplayList(max_displays, None, None)[1]
# Get the display bounds (resolution) for each active display
screens = []
for display_id in active_displays:
bounds = Quartz.CGDisplayBounds(display_id)
screens.append({
'id': display_id,
'x': int(bounds.origin.x),
'y': int(bounds.origin.y),
'width': int(bounds.size.width),
'height': int(bounds.size.height),
'is_primary': Quartz.CGDisplayIsMain(display_id) # Check if this is the primary display
})
# Sort screens by x position to arrange from left to right
sorted_screens = sorted(screens, key=lambda s: s['x'])
if self.selected_screen < 0 or self.selected_screen >= len(screens):
raise IndexError("Invalid screen index.")
screen = sorted_screens[self.selected_screen]
bbox = (screen['x'], screen['y'], screen['x'] + screen['width'], screen['y'] + screen['height'])
else: # Linux or other OS
cmd = "xrandr | grep ' primary' | awk '{print $4}'"
try:
# output = subprocess.check_output(cmd, shell=True).decode()
# resolution = output.strip().split()[0]
# width, height = map(int, resolution.split('x'))
# bbox = (0, 0, width, height) # Assuming single primary screen for simplicity
screen = get_monitors()[0]
bbox = (screen.x, screen.y, screen.x + screen.width, screen.y + screen.height)
except subprocess.CalledProcessError:
raise RuntimeError("Failed to get screen resolution on Linux.")
# Take screenshot using the bounding box
screenshot = ImageGrab.grab(bbox=bbox)
# Set offsets (for potential future use)
self.offset_x = screen['x'] if system == "Darwin" else screen.x
self.offset_y = screen['y'] if system == "Darwin" else screen.y
print(f"target_dimension {self.target_dimension}")
if not hasattr(self, 'target_dimension'):
screenshot = self.padding_image(screenshot)
self.target_dimension = MAX_SCALING_TARGETS["WXGA"]
# Resize if target_dimensions are specified
print(f"offset is {self.offset_x}, {self.offset_y}")
print(f"target_dimension is {self.target_dimension}")
screenshot = screenshot.resize((self.target_dimension["width"], self.target_dimension["height"]))
# Save the screenshot
screenshot.save(str(path))
if path.exists():
# Return a ToolResult instance instead of a dictionary
return ToolResult(base64_image=base64.b64encode(path.read_bytes()).decode())
raise ToolError(f"Failed to take screenshot: {path} does not exist.")
def padding_image(self, screenshot):
"""Pad the screenshot to 16:10 aspect ratio, when the aspect ratio is not 16:10."""
_, height = screenshot.size
new_width = height * 16 // 10
padding_image = Image.new("RGB", (new_width, height), (255, 255, 255))
# padding to top left
padding_image.paste(screenshot, (0, 0))
return padding_image
async def shell(self, command: str, take_screenshot=True) -> ToolResult:
"""Run a shell command and return the output, error, and optionally a screenshot."""
_, stdout, stderr = await run(command)
base64_image = None
if take_screenshot:
# delay to let things settle before taking a screenshot
await asyncio.sleep(self._screenshot_delay)
base64_image = (await self.screenshot()).base64_image
return ToolResult(output=stdout, error=stderr, base64_image=base64_image)
def scale_coordinates(self, source: ScalingSource, x: int, y: int):
"""Scale coordinates to a target maximum resolution."""
if not self._scaling_enabled:
return x, y
ratio = self.width / self.height
target_dimension = None
for target_name, dimension in MAX_SCALING_TARGETS.items():
# allow some error in the aspect ratio - not ratios are exactly 16:9
if abs(dimension["width"] / dimension["height"] - ratio) < 0.02:
if dimension["width"] < self.width:
target_dimension = dimension
self.target_dimension = target_dimension
# print(f"target_dimension: {target_dimension}")
break
if target_dimension is None:
# TODO: currently we force the target to be WXGA (16:10), when it cannot find a match
target_dimension = MAX_SCALING_TARGETS["WXGA"]
self.target_dimension = MAX_SCALING_TARGETS["WXGA"]
# should be less than 1
x_scaling_factor = target_dimension["width"] / self.width
y_scaling_factor = target_dimension["height"] / self.height
if source == ScalingSource.API:
if x > self.width or y > self.height:
raise ToolError(f"Coordinates {x}, {y} are out of bounds")
# scale up
return round(x / x_scaling_factor), round(y / y_scaling_factor)
# scale down
return round(x * x_scaling_factor), round(y * y_scaling_factor)
def get_screen_size(self):
if platform.system() == "Windows":
# Use screeninfo to get primary monitor on Windows
screens = get_monitors()
# Sort screens by x position to arrange from left to right
sorted_screens = sorted(screens, key=lambda s: s.x)
if self.selected_screen is None:
primary_monitor = next((m for m in get_monitors() if m.is_primary), None)
return primary_monitor.width, primary_monitor.height
elif self.selected_screen < 0 or self.selected_screen >= len(screens):
raise IndexError("Invalid screen index.")
else:
screen = sorted_screens[self.selected_screen]
return screen.width, screen.height
elif platform.system() == "Darwin":
# macOS part using Quartz to get screen information
max_displays = 32 # Maximum number of displays to handle
active_displays = Quartz.CGGetActiveDisplayList(max_displays, None, None)[1]
# Get the display bounds (resolution) for each active display
screens = []
for display_id in active_displays:
bounds = Quartz.CGDisplayBounds(display_id)
screens.append({
'id': display_id,
'x': int(bounds.origin.x),
'y': int(bounds.origin.y),
'width': int(bounds.size.width),
'height': int(bounds.size.height),
'is_primary': Quartz.CGDisplayIsMain(display_id) # Check if this is the primary display
})
# Sort screens by x position to arrange from left to right
sorted_screens = sorted(screens, key=lambda s: s['x'])
if self.selected_screen is None:
# Find the primary monitor
primary_monitor = next((screen for screen in screens if screen['is_primary']), None)
if primary_monitor:
return primary_monitor['width'], primary_monitor['height']
else:
raise RuntimeError("No primary monitor found.")
elif self.selected_screen < 0 or self.selected_screen >= len(screens):
raise IndexError("Invalid screen index.")
else:
# Return the resolution of the selected screen
screen = sorted_screens[self.selected_screen]
return screen['width'], screen['height']
else: # Linux or other OS
cmd = "xrandr | grep ' primary' | awk '{print $4}'"
try:
# output = subprocess.check_output(cmd, shell=True).decode()
# resolution = output.strip().split()[0]
# width, height = map(int, resolution.split('x'))
# return width, height
screen = get_monitors()[0]
return screen.width, screen.height
except subprocess.CalledProcessError:
raise RuntimeError("Failed to get screen resolution on Linux.")
def get_mouse_position(self):
# TODO: enhance this func
from AppKit import NSEvent
from Quartz import CGEventSourceCreate, kCGEventSourceStateCombinedSessionState
loc = NSEvent.mouseLocation()
# Adjust for different coordinate system
return int(loc.x), int(self.height - loc.y)
def map_keys(self, text: str):
"""Map text to cliclick key codes if necessary."""
# For simplicity, return text as is
# Implement mapping if special keys are needed
return text

290
demo/tools/edit.py Normal file
View File

@@ -0,0 +1,290 @@
from collections import defaultdict
from pathlib import Path
from typing import Literal, get_args
from anthropic.types.beta import BetaToolTextEditor20241022Param
from .base import BaseAnthropicTool, CLIResult, ToolError, ToolResult
from .run import maybe_truncate, run
Command = Literal[
"view",
"create",
"str_replace",
"insert",
"undo_edit",
]
SNIPPET_LINES: int = 4
class EditTool(BaseAnthropicTool):
"""
An filesystem editor tool that allows the agent to view, create, and edit files.
The tool parameters are defined by Anthropic and are not editable.
"""
api_type: Literal["text_editor_20241022"] = "text_editor_20241022"
name: Literal["str_replace_editor"] = "str_replace_editor"
_file_history: dict[Path, list[str]]
def __init__(self):
self._file_history = defaultdict(list)
super().__init__()
def to_params(self) -> BetaToolTextEditor20241022Param:
return {
"name": self.name,
"type": self.api_type,
}
async def __call__(
self,
*,
command: Command,
path: str,
file_text: str | None = None,
view_range: list[int] | None = None,
old_str: str | None = None,
new_str: str | None = None,
insert_line: int | None = None,
**kwargs,
):
_path = Path(path)
self.validate_path(command, _path)
if command == "view":
return await self.view(_path, view_range)
elif command == "create":
if not file_text:
raise ToolError("Parameter `file_text` is required for command: create")
self.write_file(_path, file_text)
self._file_history[_path].append(file_text)
return ToolResult(output=f"File created successfully at: {_path}")
elif command == "str_replace":
if not old_str:
raise ToolError(
"Parameter `old_str` is required for command: str_replace"
)
return self.str_replace(_path, old_str, new_str)
elif command == "insert":
if insert_line is None:
raise ToolError(
"Parameter `insert_line` is required for command: insert"
)
if not new_str:
raise ToolError("Parameter `new_str` is required for command: insert")
return self.insert(_path, insert_line, new_str)
elif command == "undo_edit":
return self.undo_edit(_path)
raise ToolError(
f'Unrecognized command {command}. The allowed commands for the {self.name} tool are: {", ".join(get_args(Command))}'
)
def validate_path(self, command: str, path: Path):
"""
Check that the path/command combination is valid.
"""
# Check if its an absolute path
if not path.is_absolute():
suggested_path = Path("") / path
raise ToolError(
f"The path {path} is not an absolute path, it should start with `/`. Maybe you meant {suggested_path}?"
)
# Check if path exists
if not path.exists() and command != "create":
raise ToolError(
f"The path {path} does not exist. Please provide a valid path."
)
if path.exists() and command == "create":
raise ToolError(
f"File already exists at: {path}. Cannot overwrite files using command `create`."
)
# Check if the path points to a directory
if path.is_dir():
if command != "view":
raise ToolError(
f"The path {path} is a directory and only the `view` command can be used on directories"
)
async def view(self, path: Path, view_range: list[int] | None = None):
"""Implement the view command"""
if path.is_dir():
if view_range:
raise ToolError(
"The `view_range` parameter is not allowed when `path` points to a directory."
)
_, stdout, stderr = await run(
rf"find {path} -maxdepth 2 -not -path '*/\.*'"
)
if not stderr:
stdout = f"Here's the files and directories up to 2 levels deep in {path}, excluding hidden items:\n{stdout}\n"
return CLIResult(output=stdout, error=stderr)
file_content = self.read_file(path)
init_line = 1
if view_range:
if len(view_range) != 2 or not all(isinstance(i, int) for i in view_range):
raise ToolError(
"Invalid `view_range`. It should be a list of two integers."
)
file_lines = file_content.split("\n")
n_lines_file = len(file_lines)
init_line, final_line = view_range
if init_line < 1 or init_line > n_lines_file:
raise ToolError(
f"Invalid `view_range`: {view_range}. It's first element `{init_line}` should be within the range of lines of the file: {[1, n_lines_file]}"
)
if final_line > n_lines_file:
raise ToolError(
f"Invalid `view_range`: {view_range}. It's second element `{final_line}` should be smaller than the number of lines in the file: `{n_lines_file}`"
)
if final_line != -1 and final_line < init_line:
raise ToolError(
f"Invalid `view_range`: {view_range}. It's second element `{final_line}` should be larger or equal than its first `{init_line}`"
)
if final_line == -1:
file_content = "\n".join(file_lines[init_line - 1 :])
else:
file_content = "\n".join(file_lines[init_line - 1 : final_line])
return CLIResult(
output=self._make_output(file_content, str(path), init_line=init_line)
)
def str_replace(self, path: Path, old_str: str, new_str: str | None):
"""Implement the str_replace command, which replaces old_str with new_str in the file content"""
# Read the file content
file_content = self.read_file(path).expandtabs()
old_str = old_str.expandtabs()
new_str = new_str.expandtabs() if new_str is not None else ""
# Check if old_str is unique in the file
occurrences = file_content.count(old_str)
if occurrences == 0:
raise ToolError(
f"No replacement was performed, old_str `{old_str}` did not appear verbatim in {path}."
)
elif occurrences > 1:
file_content_lines = file_content.split("\n")
lines = [
idx + 1
for idx, line in enumerate(file_content_lines)
if old_str in line
]
raise ToolError(
f"No replacement was performed. Multiple occurrences of old_str `{old_str}` in lines {lines}. Please ensure it is unique"
)
# Replace old_str with new_str
new_file_content = file_content.replace(old_str, new_str)
# Write the new content to the file
self.write_file(path, new_file_content)
# Save the content to history
self._file_history[path].append(file_content)
# Create a snippet of the edited section
replacement_line = file_content.split(old_str)[0].count("\n")
start_line = max(0, replacement_line - SNIPPET_LINES)
end_line = replacement_line + SNIPPET_LINES + new_str.count("\n")
snippet = "\n".join(new_file_content.split("\n")[start_line : end_line + 1])
# Prepare the success message
success_msg = f"The file {path} has been edited. "
success_msg += self._make_output(
snippet, f"a snippet of {path}", start_line + 1
)
success_msg += "Review the changes and make sure they are as expected. Edit the file again if necessary."
return CLIResult(output=success_msg)
def insert(self, path: Path, insert_line: int, new_str: str):
"""Implement the insert command, which inserts new_str at the specified line in the file content."""
file_text = self.read_file(path).expandtabs()
new_str = new_str.expandtabs()
file_text_lines = file_text.split("\n")
n_lines_file = len(file_text_lines)
if insert_line < 0 or insert_line > n_lines_file:
raise ToolError(
f"Invalid `insert_line` parameter: {insert_line}. It should be within the range of lines of the file: {[0, n_lines_file]}"
)
new_str_lines = new_str.split("\n")
new_file_text_lines = (
file_text_lines[:insert_line]
+ new_str_lines
+ file_text_lines[insert_line:]
)
snippet_lines = (
file_text_lines[max(0, insert_line - SNIPPET_LINES) : insert_line]
+ new_str_lines
+ file_text_lines[insert_line : insert_line + SNIPPET_LINES]
)
new_file_text = "\n".join(new_file_text_lines)
snippet = "\n".join(snippet_lines)
self.write_file(path, new_file_text)
self._file_history[path].append(file_text)
success_msg = f"The file {path} has been edited. "
success_msg += self._make_output(
snippet,
"a snippet of the edited file",
max(1, insert_line - SNIPPET_LINES + 1),
)
success_msg += "Review the changes and make sure they are as expected (correct indentation, no duplicate lines, etc). Edit the file again if necessary."
return CLIResult(output=success_msg)
def undo_edit(self, path: Path):
"""Implement the undo_edit command."""
if not self._file_history[path]:
raise ToolError(f"No edit history found for {path}.")
old_text = self._file_history[path].pop()
self.write_file(path, old_text)
return CLIResult(
output=f"Last edit to {path} undone successfully. {self._make_output(old_text, str(path))}"
)
def read_file(self, path: Path):
"""Read the content of a file from a given path; raise a ToolError if an error occurs."""
try:
return path.read_text()
except Exception as e:
raise ToolError(f"Ran into {e} while trying to read {path}") from None
def write_file(self, path: Path, file: str):
"""Write the content of a file to a given path; raise a ToolError if an error occurs."""
try:
path.write_text(file)
except Exception as e:
raise ToolError(f"Ran into {e} while trying to write to {path}") from None
def _make_output(
self,
file_content: str,
file_descriptor: str,
init_line: int = 1,
expand_tabs: bool = True,
):
"""Generate output for the CLI based on the content of a file."""
file_content = maybe_truncate(file_content)
if expand_tabs:
file_content = file_content.expandtabs()
file_content = "\n".join(
[
f"{i + init_line:6}\t{line}"
for i, line in enumerate(file_content.split("\n"))
]
)
return (
f"Here's the result of running `cat -n` on {file_descriptor}:\n"
+ file_content
+ "\n"
)

42
demo/tools/run.py Normal file
View File

@@ -0,0 +1,42 @@
"""Utility to run shell commands asynchronously with a timeout."""
import asyncio
TRUNCATED_MESSAGE: str = "<response clipped><NOTE>To save on context only part of this file has been shown to you. You should retry this tool after you have searched inside the file with `grep -n` in order to find the line numbers of what you are looking for.</NOTE>"
MAX_RESPONSE_LEN: int = 16000
def maybe_truncate(content: str, truncate_after: int | None = MAX_RESPONSE_LEN):
"""Truncate content and append a notice if content exceeds the specified length."""
return (
content
if not truncate_after or len(content) <= truncate_after
else content[:truncate_after] + TRUNCATED_MESSAGE
)
async def run(
cmd: str,
timeout: float | None = 120.0, # seconds
truncate_after: int | None = MAX_RESPONSE_LEN,
):
"""Run a shell command asynchronously with a timeout."""
process = await asyncio.create_subprocess_shell(
cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
try:
stdout, stderr = await asyncio.wait_for(process.communicate(), timeout=timeout)
return (
process.returncode or 0,
maybe_truncate(stdout.decode(), truncate_after=truncate_after),
maybe_truncate(stderr.decode(), truncate_after=truncate_after),
)
except asyncio.TimeoutError as exc:
try:
process.kill()
except ProcessLookupError:
pass
raise TimeoutError(
f"Command '{cmd}' timed out after {timeout} seconds"
) from exc

View File

@@ -0,0 +1,185 @@
import subprocess
import base64
from pathlib import Path
from PIL import ImageGrab
from uuid import uuid4
from screeninfo import get_monitors
import platform
if platform.system() == "Darwin":
import Quartz # uncomment this line if you are on macOS
from PIL import ImageGrab
from functools import partial
from .base import BaseAnthropicTool, ToolError, ToolResult
OUTPUT_DIR = "./tmp/outputs"
def get_screenshot(selected_screen: int = 0, resize: bool = True, target_width: int = 1920, target_height: int = 1080):
# print(f"get_screenshot selected_screen: {selected_screen}")
# Get screen width and height using Windows command
display_num = None
offset_x = 0
offset_y = 0
selected_screen = selected_screen
width, height = _get_screen_size()
"""Take a screenshot of the current screen and return a ToolResult with the base64 encoded image."""
output_dir = Path(OUTPUT_DIR)
output_dir.mkdir(parents=True, exist_ok=True)
path = output_dir / f"screenshot_{uuid4().hex}.png"
ImageGrab.grab = partial(ImageGrab.grab, all_screens=True)
# Detect platform
system = platform.system()
if system == "Windows":
# Windows: Use screeninfo to get monitor details
screens = get_monitors()
# Sort screens by x position to arrange from left to right
sorted_screens = sorted(screens, key=lambda s: s.x)
if selected_screen < 0 or selected_screen >= len(screens):
raise IndexError("Invalid screen index.")
screen = sorted_screens[selected_screen]
bbox = (screen.x, screen.y, screen.x + screen.width, screen.y + screen.height)
elif system == "Darwin": # macOS
# macOS: Use Quartz to get monitor details
max_displays = 32 # Maximum number of displays to handle
active_displays = Quartz.CGGetActiveDisplayList(max_displays, None, None)[1]
# Get the display bounds (resolution) for each active display
screens = []
for display_id in active_displays:
bounds = Quartz.CGDisplayBounds(display_id)
screens.append({
'id': display_id,
'x': int(bounds.origin.x),
'y': int(bounds.origin.y),
'width': int(bounds.size.width),
'height': int(bounds.size.height),
'is_primary': Quartz.CGDisplayIsMain(display_id) # Check if this is the primary display
})
# Sort screens by x position to arrange from left to right
sorted_screens = sorted(screens, key=lambda s: s['x'])
# print(f"Darwin sorted_screens: {sorted_screens}")
if selected_screen < 0 or selected_screen >= len(screens):
raise IndexError("Invalid screen index.")
screen = sorted_screens[selected_screen]
bbox = (screen['x'], screen['y'], screen['x'] + screen['width'], screen['y'] + screen['height'])
else: # Linux or other OS
cmd = "xrandr | grep ' primary' | awk '{print $4}'"
try:
# output = subprocess.check_output(cmd, shell=True).decode()
# resolution = output.strip().split()[0]
# width, height = map(int, resolution.split('x'))
screen = get_monitors()[0]
width, height = screen.width, screen.height
bbox = (0, 0, width, height) # Assuming single primary screen for simplicity
except subprocess.CalledProcessError:
raise RuntimeError("Failed to get screen resolution on Linux.")
# Take screenshot using the bounding box
screenshot = ImageGrab.grab(bbox=bbox)
import os
if (display_num := os.getenv("DISPLAY_NUM")) is not None:
display_num = int(display_num)
_display_prefix = f"DISPLAY=:{display_num} "
else:
display_num = None
_display_prefix = ""
screenshot_cmd = f"{_display_prefix}scrot -p {path}"
import pdb; pdb.set_trace()
result = subprocess.run(screenshot_cmd, shell=True, capture_output=True)
# Set offsets (for potential future use)
offset_x = screen['x'] if system == "Darwin" else screen.x
offset_y = screen['y'] if system == "Darwin" else screen.y
# # Resize if
if resize:
screenshot = screenshot.resize((target_width, target_height))
# Save the screenshot
# screenshot.save(str(path))
if path.exists():
# Return a ToolResult instance instead of a dictionary
return screenshot, path
raise ToolError(f"Failed to take screenshot: {path} does not exist.")
def _get_screen_size(selected_screen: int = 0):
if platform.system() == "Windows":
# Use screeninfo to get primary monitor on Windows
screens = get_monitors()
# Sort screens by x position to arrange from left to right
sorted_screens = sorted(screens, key=lambda s: s.x)
if selected_screen is None:
primary_monitor = next((m for m in get_monitors() if m.is_primary), None)
return primary_monitor.width, primary_monitor.height
elif selected_screen < 0 or selected_screen >= len(screens):
raise IndexError("Invalid screen index.")
else:
screen = sorted_screens[selected_screen]
return screen.width, screen.height
elif platform.system() == "Darwin":
# macOS part using Quartz to get screen information
max_displays = 32 # Maximum number of displays to handle
active_displays = Quartz.CGGetActiveDisplayList(max_displays, None, None)[1]
# Get the display bounds (resolution) for each active display
screens = []
for display_id in active_displays:
bounds = Quartz.CGDisplayBounds(display_id)
screens.append({
'id': display_id,
'x': int(bounds.origin.x),
'y': int(bounds.origin.y),
'width': int(bounds.size.width),
'height': int(bounds.size.height),
'is_primary': Quartz.CGDisplayIsMain(display_id) # Check if this is the primary display
})
# Sort screens by x position to arrange from left to right
sorted_screens = sorted(screens, key=lambda s: s['x'])
if selected_screen is None:
# Find the primary monitor
primary_monitor = next((screen for screen in screens if screen['is_primary']), None)
if primary_monitor:
return primary_monitor['width'], primary_monitor['height']
else:
raise RuntimeError("No primary monitor found.")
elif selected_screen < 0 or selected_screen >= len(screens):
raise IndexError("Invalid screen index.")
else:
# Return the resolution of the selected screen
screen = sorted_screens[selected_screen]
return screen['width'], screen['height']
else: # Linux or other OS
cmd = "xrandr | grep ' primary' | awk '{print $4}'"
try:
# output = subprocess.check_output(cmd, shell=True).decode()
# resolution = output.strip().split()[0]
# width, height = map(int, resolution.split('x'))
# return width, height
screen = get_monitors()[0]
return screen.width, screen.height
except subprocess.CalledProcessError:
raise RuntimeError("Failed to get screen resolution on Linux.")

View File

@@ -1,105 +1,105 @@
from typing import Optional from typing import Optional
import gradio as gr import gradio as gr
import numpy as np import numpy as np
import torch import torch
from PIL import Image from PIL import Image
import io import io
import base64, os import base64, os
from utils import check_ocr_box, get_yolo_model, get_caption_model_processor, get_som_labeled_img from utils import check_ocr_box, get_yolo_model, get_caption_model_processor, get_som_labeled_img
import torch import torch
from PIL import Image from PIL import Image
# yolo_model = get_yolo_model(model_path='weights/icon_detect/best.pt') # yolo_model = get_yolo_model(model_path='weights/icon_detect/best.pt')
yolo_model = get_yolo_model(model_path='weights/icon_detect_v1_5/best.pt') yolo_model = get_yolo_model(model_path='weights/icon_detect_v1_5/best.pt')
caption_model_processor = get_caption_model_processor(model_name="florence2", model_name_or_path="weights/icon_caption_florence") caption_model_processor = get_caption_model_processor(model_name="florence2", model_name_or_path="weights/icon_caption_florence")
# caption_model_processor = get_caption_model_processor(model_name="blip2", model_name_or_path="weights/icon_caption_blip2") # caption_model_processor = get_caption_model_processor(model_name="blip2", model_name_or_path="weights/icon_caption_blip2")
MARKDOWN = """ MARKDOWN = """
# OmniParser for Pure Vision Based General GUI Agent 🔥 # OmniParser for Pure Vision Based General GUI Agent 🔥
<div> <div>
<a href="https://arxiv.org/pdf/2408.00203"> <a href="https://arxiv.org/pdf/2408.00203">
<img src="https://img.shields.io/badge/arXiv-2408.00203-b31b1b.svg" alt="Arxiv" style="display:inline-block;"> <img src="https://img.shields.io/badge/arXiv-2408.00203-b31b1b.svg" alt="Arxiv" style="display:inline-block;">
</a> </a>
</div> </div>
OmniParser is a screen parsing tool to convert general GUI screen to structured elements. OmniParser is a screen parsing tool to convert general GUI screen to structured elements.
""" """
DEVICE = torch.device('cuda') DEVICE = torch.device('cuda')
# @spaces.GPU # @spaces.GPU
# @torch.inference_mode() # @torch.inference_mode()
# @torch.autocast(device_type="cuda", dtype=torch.bfloat16) # @torch.autocast(device_type="cuda", dtype=torch.bfloat16)
def process( def process(
image_input, image_input,
box_threshold, box_threshold,
iou_threshold, iou_threshold,
use_paddleocr, use_paddleocr,
imgsz imgsz
) -> Optional[Image.Image]: ) -> Optional[Image.Image]:
image_save_path = 'imgs/saved_image_demo.png' image_save_path = 'imgs/saved_image_demo.png'
image_input.save(image_save_path) image_input.save(image_save_path)
image = Image.open(image_save_path) image = Image.open(image_save_path)
box_overlay_ratio = image.size[0] / 3200 box_overlay_ratio = image.size[0] / 3200
draw_bbox_config = { draw_bbox_config = {
'text_scale': 0.8 * box_overlay_ratio, 'text_scale': 0.8 * box_overlay_ratio,
'text_thickness': max(int(2 * box_overlay_ratio), 1), 'text_thickness': max(int(2 * box_overlay_ratio), 1),
'text_padding': max(int(3 * box_overlay_ratio), 1), 'text_padding': max(int(3 * box_overlay_ratio), 1),
'thickness': max(int(3 * box_overlay_ratio), 1), 'thickness': max(int(3 * box_overlay_ratio), 1),
} }
# import pdb; pdb.set_trace() # import pdb; pdb.set_trace()
ocr_bbox_rslt, is_goal_filtered = check_ocr_box(image_save_path, display_img = False, output_bb_format='xyxy', goal_filtering=None, easyocr_args={'paragraph': False, 'text_threshold':0.9}, use_paddleocr=use_paddleocr) ocr_bbox_rslt, is_goal_filtered = check_ocr_box(image_save_path, display_img = False, output_bb_format='xyxy', goal_filtering=None, easyocr_args={'paragraph': False, 'text_threshold':0.9}, use_paddleocr=use_paddleocr)
text, ocr_bbox = ocr_bbox_rslt text, ocr_bbox = ocr_bbox_rslt
# print('prompt:', prompt) # print('prompt:', prompt)
dino_labled_img, label_coordinates, parsed_content_list = get_som_labeled_img(image_save_path, yolo_model, BOX_TRESHOLD = box_threshold, output_coord_in_ratio=True, ocr_bbox=ocr_bbox,draw_bbox_config=draw_bbox_config, caption_model_processor=caption_model_processor, ocr_text=text,iou_threshold=iou_threshold, imgsz=imgsz,) dino_labled_img, label_coordinates, parsed_content_list = get_som_labeled_img(image_save_path, yolo_model, BOX_TRESHOLD = box_threshold, output_coord_in_ratio=True, ocr_bbox=ocr_bbox,draw_bbox_config=draw_bbox_config, caption_model_processor=caption_model_processor, ocr_text=text,iou_threshold=iou_threshold, imgsz=imgsz,)
image = Image.open(io.BytesIO(base64.b64decode(dino_labled_img))) image = Image.open(io.BytesIO(base64.b64decode(dino_labled_img)))
print('finish processing') print('finish processing')
parsed_content_list = '\n'.join([f'icon {i}: ' + str(v) for i,v in enumerate(parsed_content_list)]) parsed_content_list = '\n'.join([f'icon {i}: ' + str(v) for i,v in enumerate(parsed_content_list)])
# parsed_content_list = str(parsed_content_list) # parsed_content_list = str(parsed_content_list)
return image, str(parsed_content_list) return image, str(parsed_content_list)
with gr.Blocks() as demo: with gr.Blocks() as demo:
gr.Markdown(MARKDOWN) gr.Markdown(MARKDOWN)
with gr.Row(): with gr.Row():
with gr.Column(): with gr.Column():
image_input_component = gr.Image( image_input_component = gr.Image(
type='pil', label='Upload image') type='pil', label='Upload image')
# set the threshold for removing the bounding boxes with low confidence, default is 0.05 # set the threshold for removing the bounding boxes with low confidence, default is 0.05
box_threshold_component = gr.Slider( box_threshold_component = gr.Slider(
label='Box Threshold', minimum=0.01, maximum=1.0, step=0.01, value=0.05) label='Box Threshold', minimum=0.01, maximum=1.0, step=0.01, value=0.05)
# set the threshold for removing the bounding boxes with large overlap, default is 0.1 # set the threshold for removing the bounding boxes with large overlap, default is 0.1
iou_threshold_component = gr.Slider( iou_threshold_component = gr.Slider(
label='IOU Threshold', minimum=0.01, maximum=1.0, step=0.01, value=0.1) label='IOU Threshold', minimum=0.01, maximum=1.0, step=0.01, value=0.1)
use_paddleocr_component = gr.Checkbox( use_paddleocr_component = gr.Checkbox(
label='Use PaddleOCR', value=True) label='Use PaddleOCR', value=True)
imgsz_component = gr.Slider( imgsz_component = gr.Slider(
label='Icon Detect Image Size', minimum=640, maximum=1920, step=32, value=640) label='Icon Detect Image Size', minimum=640, maximum=1920, step=32, value=640)
submit_button_component = gr.Button( submit_button_component = gr.Button(
value='Submit', variant='primary') value='Submit', variant='primary')
with gr.Column(): with gr.Column():
image_output_component = gr.Image(type='pil', label='Image Output') image_output_component = gr.Image(type='pil', label='Image Output')
text_output_component = gr.Textbox(label='Parsed screen elements', placeholder='Text Output') text_output_component = gr.Textbox(label='Parsed screen elements', placeholder='Text Output')
submit_button_component.click( submit_button_component.click(
fn=process, fn=process,
inputs=[ inputs=[
image_input_component, image_input_component,
box_threshold_component, box_threshold_component,
iou_threshold_component, iou_threshold_component,
use_paddleocr_component, use_paddleocr_component,
imgsz_component imgsz_component
], ],
outputs=[image_output_component, text_output_component] outputs=[image_output_component, text_output_component]
) )
# demo.launch(debug=False, show_error=True, share=True) # demo.launch(debug=False, show_error=True, share=True)
demo.launch(share=True, server_port=7861, server_name='0.0.0.0') demo.launch(share=True, server_port=7861, server_name='0.0.0.0')

View File

@@ -1,60 +1,60 @@
from utils import get_som_labeled_img, check_ocr_box, get_caption_model_processor, get_dino_model, get_yolo_model from utils import get_som_labeled_img, check_ocr_box, get_caption_model_processor, get_dino_model, get_yolo_model
import torch import torch
from ultralytics import YOLO from ultralytics import YOLO
from PIL import Image from PIL import Image
from typing import Dict, Tuple, List from typing import Dict, Tuple, List
import io import io
import base64 import base64
config = { config = {
'som_model_path': 'finetuned_icon_detect.pt', 'som_model_path': 'finetuned_icon_detect.pt',
'device': 'cpu', 'device': 'cpu',
'caption_model_path': 'Salesforce/blip2-opt-2.7b', 'caption_model_path': 'Salesforce/blip2-opt-2.7b',
'draw_bbox_config': { 'draw_bbox_config': {
'text_scale': 0.8, 'text_scale': 0.8,
'text_thickness': 2, 'text_thickness': 2,
'text_padding': 3, 'text_padding': 3,
'thickness': 3, 'thickness': 3,
}, },
'BOX_TRESHOLD': 0.05 'BOX_TRESHOLD': 0.05
} }
class Omniparser(object): class Omniparser(object):
def __init__(self, config: Dict): def __init__(self, config: Dict):
self.config = config self.config = config
self.som_model = get_yolo_model(model_path=config['som_model_path']) self.som_model = get_yolo_model(model_path=config['som_model_path'])
# self.caption_model_processor = get_caption_model_processor(config['caption_model_path'], device=cofig['device']) # self.caption_model_processor = get_caption_model_processor(config['caption_model_path'], device=cofig['device'])
# self.caption_model_processor['model'].to(torch.float32) # self.caption_model_processor['model'].to(torch.float32)
def parse(self, image_path: str): def parse(self, image_path: str):
print('Parsing image:', image_path) print('Parsing image:', image_path)
ocr_bbox_rslt, is_goal_filtered = check_ocr_box(image_path, display_img = False, output_bb_format='xyxy', goal_filtering=None, easyocr_args={'paragraph': False, 'text_threshold':0.9}) ocr_bbox_rslt, is_goal_filtered = check_ocr_box(image_path, display_img = False, output_bb_format='xyxy', goal_filtering=None, easyocr_args={'paragraph': False, 'text_threshold':0.9})
text, ocr_bbox = ocr_bbox_rslt text, ocr_bbox = ocr_bbox_rslt
draw_bbox_config = self.config['draw_bbox_config'] draw_bbox_config = self.config['draw_bbox_config']
BOX_TRESHOLD = self.config['BOX_TRESHOLD'] BOX_TRESHOLD = self.config['BOX_TRESHOLD']
dino_labled_img, label_coordinates, parsed_content_list = get_som_labeled_img(image_path, self.som_model, BOX_TRESHOLD = BOX_TRESHOLD, output_coord_in_ratio=False, ocr_bbox=ocr_bbox,draw_bbox_config=draw_bbox_config, caption_model_processor=None, ocr_text=text,use_local_semantics=False) dino_labled_img, label_coordinates, parsed_content_list = get_som_labeled_img(image_path, self.som_model, BOX_TRESHOLD = BOX_TRESHOLD, output_coord_in_ratio=False, ocr_bbox=ocr_bbox,draw_bbox_config=draw_bbox_config, caption_model_processor=None, ocr_text=text,use_local_semantics=False)
image = Image.open(io.BytesIO(base64.b64decode(dino_labled_img))) image = Image.open(io.BytesIO(base64.b64decode(dino_labled_img)))
# formating output # formating output
return_list = [{'from': 'omniparser', 'shape': {'x':coord[0], 'y':coord[1], 'width':coord[2], 'height':coord[3]}, return_list = [{'from': 'omniparser', 'shape': {'x':coord[0], 'y':coord[1], 'width':coord[2], 'height':coord[3]},
'text': parsed_content_list[i].split(': ')[1], 'type':'text'} for i, (k, coord) in enumerate(label_coordinates.items()) if i < len(parsed_content_list)] 'text': parsed_content_list[i].split(': ')[1], 'type':'text'} for i, (k, coord) in enumerate(label_coordinates.items()) if i < len(parsed_content_list)]
return_list.extend( return_list.extend(
[{'from': 'omniparser', 'shape': {'x':coord[0], 'y':coord[1], 'width':coord[2], 'height':coord[3]}, [{'from': 'omniparser', 'shape': {'x':coord[0], 'y':coord[1], 'width':coord[2], 'height':coord[3]},
'text': 'None', 'type':'icon'} for i, (k, coord) in enumerate(label_coordinates.items()) if i >= len(parsed_content_list)] 'text': 'None', 'type':'icon'} for i, (k, coord) in enumerate(label_coordinates.items()) if i >= len(parsed_content_list)]
) )
return [image, return_list] return [image, return_list]
parser = Omniparser(config) parser = Omniparser(config)
image_path = 'examples/pc_1.png' image_path = 'examples/pc_1.png'
# time the parser # time the parser
import time import time
s = time.time() s = time.time()
image, parsed_content_list = parser.parse(image_path) image, parsed_content_list = parser.parse(image_path)
device = config['device'] device = config['device']
print(f'Time taken for Omniparser on {device}:', time.time() - s) print(f'Time taken for Omniparser on {device}:', time.time() - s)

View File

@@ -1,18 +1,31 @@
torch torch
easyocr easyocr
torchvision torchvision
supervision==0.18.0 supervision==0.18.0
openai==1.3.5 openai==1.3.5
transformers transformers
ultralytics==8.1.24 ultralytics==8.1.24
azure-identity azure-identity
numpy numpy
opencv-python opencv-python
opencv-python-headless opencv-python-headless
gradio gradio
dill dill
accelerate accelerate
timm timm
einops==0.8.0 einops==0.8.0
paddlepaddle paddlepaddle
paddleocr paddleocr
ruff==0.6.7
pre-commit==3.8.0
pytest==8.3.3
pytest-asyncio==0.23.6
pyautogui==0.9.54
streamlit>=1.38.0
anthropic[bedrock,vertex]>=0.37.1
jsonschema==4.22.0
boto3>=1.28.57
google-auth<3,>=2
screeninfo
uiautomation
dashscope

View File

@@ -1,425 +1,425 @@
''' '''
Adapted from https://github.com/google-research/google-research/tree/master/android_in_the_wild Adapted from https://github.com/google-research/google-research/tree/master/android_in_the_wild
''' '''
import jax import jax
import jax.numpy as jnp import jax.numpy as jnp
import numpy as np import numpy as np
# import action_type as action_type_lib # import action_type as action_type_lib
import enum import enum
class ActionType(enum.IntEnum): class ActionType(enum.IntEnum):
# Placeholders for unused enum values # Placeholders for unused enum values
UNUSED_0 = 0 UNUSED_0 = 0
UNUSED_1 = 1 UNUSED_1 = 1
UNUSED_2 = 2 UNUSED_2 = 2
UNUSED_8 = 8 UNUSED_8 = 8
UNUSED_9 = 9 UNUSED_9 = 9
########### Agent actions ########### ########### Agent actions ###########
# A type action that sends text to the emulator. Note that this simply sends # A type action that sends text to the emulator. Note that this simply sends
# text and does not perform any clicks for element focus or enter presses for # text and does not perform any clicks for element focus or enter presses for
# submitting text. # submitting text.
TYPE = 3 TYPE = 3
# The dual point action used to represent all gestures. # The dual point action used to represent all gestures.
DUAL_POINT = 4 DUAL_POINT = 4
# These actions differentiate pressing the home and back button from touches. # These actions differentiate pressing the home and back button from touches.
# They represent explicit presses of back and home performed using ADB. # They represent explicit presses of back and home performed using ADB.
PRESS_BACK = 5 PRESS_BACK = 5
PRESS_HOME = 6 PRESS_HOME = 6
# An action representing that ADB command for hitting enter was performed. # An action representing that ADB command for hitting enter was performed.
PRESS_ENTER = 7 PRESS_ENTER = 7
########### Episode status actions ########### ########### Episode status actions ###########
# An action used to indicate the desired task has been completed and resets # An action used to indicate the desired task has been completed and resets
# the environment. This action should also be used in the case that the task # the environment. This action should also be used in the case that the task
# has already been completed and there is nothing to do. # has already been completed and there is nothing to do.
# e.g. The task is to turn on the Wi-Fi when it is already on # e.g. The task is to turn on the Wi-Fi when it is already on
STATUS_TASK_COMPLETE = 10 STATUS_TASK_COMPLETE = 10
# An action used to indicate that desired task is impossible to complete and # An action used to indicate that desired task is impossible to complete and
# resets the environment. This can be a result of many different things # resets the environment. This can be a result of many different things
# including UI changes, Android version differences, etc. # including UI changes, Android version differences, etc.
STATUS_TASK_IMPOSSIBLE = 11 STATUS_TASK_IMPOSSIBLE = 11
_TAP_DISTANCE_THRESHOLD = 0.14 # Fraction of the screen _TAP_DISTANCE_THRESHOLD = 0.14 # Fraction of the screen
ANNOTATION_WIDTH_AUGMENT_FRACTION = 1.4 ANNOTATION_WIDTH_AUGMENT_FRACTION = 1.4
ANNOTATION_HEIGHT_AUGMENT_FRACTION = 1.4 ANNOTATION_HEIGHT_AUGMENT_FRACTION = 1.4
# Interval determining if an action is a tap or a swipe. # Interval determining if an action is a tap or a swipe.
_SWIPE_DISTANCE_THRESHOLD = 0.04 _SWIPE_DISTANCE_THRESHOLD = 0.04
def _yx_in_bounding_boxes( def _yx_in_bounding_boxes(
yx, bounding_boxes yx, bounding_boxes
): ):
"""Check if the (y,x) point is contained in each bounding box. """Check if the (y,x) point is contained in each bounding box.
Args: Args:
yx: The (y, x) coordinate in pixels of the point. yx: The (y, x) coordinate in pixels of the point.
bounding_boxes: A 2D int array of shape (num_bboxes, 4), where each row bounding_boxes: A 2D int array of shape (num_bboxes, 4), where each row
represents a bounding box: (y_top_left, x_top_left, box_height, represents a bounding box: (y_top_left, x_top_left, box_height,
box_width). Note: containment is inclusive of the bounding box edges. box_width). Note: containment is inclusive of the bounding box edges.
Returns: Returns:
is_inside: A 1D bool array where each element specifies if the point is is_inside: A 1D bool array where each element specifies if the point is
contained within the respective box. contained within the respective box.
""" """
y, x = yx y, x = yx
# `bounding_boxes` has shape (n_elements, 4); we extract each array along the # `bounding_boxes` has shape (n_elements, 4); we extract each array along the
# last axis into shape (n_elements, 1), then squeeze unneeded dimension. # last axis into shape (n_elements, 1), then squeeze unneeded dimension.
top, left, height, width = [ top, left, height, width = [
jnp.squeeze(v, axis=-1) for v in jnp.split(bounding_boxes, 4, axis=-1) jnp.squeeze(v, axis=-1) for v in jnp.split(bounding_boxes, 4, axis=-1)
] ]
# The y-axis is inverted for AndroidEnv, so bottom = top + height. # The y-axis is inverted for AndroidEnv, so bottom = top + height.
bottom, right = top + height, left + width bottom, right = top + height, left + width
return jnp.logical_and(y >= top, y <= bottom) & jnp.logical_and( return jnp.logical_and(y >= top, y <= bottom) & jnp.logical_and(
x >= left, x <= right) x >= left, x <= right)
def _resize_annotation_bounding_boxes( def _resize_annotation_bounding_boxes(
annotation_positions, annotation_width_augment_fraction, annotation_positions, annotation_width_augment_fraction,
annotation_height_augment_fraction): annotation_height_augment_fraction):
"""Resize the bounding boxes by the given fractions. """Resize the bounding boxes by the given fractions.
Args: Args:
annotation_positions: Array of shape (N, 4), where each row represents the annotation_positions: Array of shape (N, 4), where each row represents the
(y, x, height, width) of the bounding boxes. (y, x, height, width) of the bounding boxes.
annotation_width_augment_fraction: The fraction to augment the box widths, annotation_width_augment_fraction: The fraction to augment the box widths,
E.g., 1.4 == 240% total increase. E.g., 1.4 == 240% total increase.
annotation_height_augment_fraction: Same as described for width, but for box annotation_height_augment_fraction: Same as described for width, but for box
height. height.
Returns: Returns:
Resized bounding box. Resized bounding box.
""" """
height_change = ( height_change = (
annotation_height_augment_fraction * annotation_positions[:, 2]) annotation_height_augment_fraction * annotation_positions[:, 2])
width_change = ( width_change = (
annotation_width_augment_fraction * annotation_positions[:, 3]) annotation_width_augment_fraction * annotation_positions[:, 3])
# Limit bounding box positions to the screen. # Limit bounding box positions to the screen.
resized_annotations = jnp.stack([ resized_annotations = jnp.stack([
jnp.maximum(0, annotation_positions[:, 0] - (height_change / 2)), jnp.maximum(0, annotation_positions[:, 0] - (height_change / 2)),
jnp.maximum(0, annotation_positions[:, 1] - (width_change / 2)), jnp.maximum(0, annotation_positions[:, 1] - (width_change / 2)),
jnp.minimum(1, annotation_positions[:, 2] + height_change), jnp.minimum(1, annotation_positions[:, 2] + height_change),
jnp.minimum(1, annotation_positions[:, 3] + width_change), jnp.minimum(1, annotation_positions[:, 3] + width_change),
], ],
axis=1) axis=1)
return resized_annotations return resized_annotations
def is_tap_action(normalized_start_yx, def is_tap_action(normalized_start_yx,
normalized_end_yx): normalized_end_yx):
distance = jnp.linalg.norm( distance = jnp.linalg.norm(
jnp.array(normalized_start_yx) - jnp.array(normalized_end_yx)) jnp.array(normalized_start_yx) - jnp.array(normalized_end_yx))
return distance <= _SWIPE_DISTANCE_THRESHOLD return distance <= _SWIPE_DISTANCE_THRESHOLD
def _is_non_dual_point_action(action_type): def _is_non_dual_point_action(action_type):
return jnp.not_equal(action_type, ActionType.DUAL_POINT) return jnp.not_equal(action_type, ActionType.DUAL_POINT)
def _check_tap_actions_match( def _check_tap_actions_match(
tap_1_yx, tap_1_yx,
tap_2_yx, tap_2_yx,
annotation_positions, annotation_positions,
matching_tap_distance_threshold_screen_percentage, matching_tap_distance_threshold_screen_percentage,
annotation_width_augment_fraction, annotation_width_augment_fraction,
annotation_height_augment_fraction, annotation_height_augment_fraction,
): ):
"""Determines if two tap actions are the same.""" """Determines if two tap actions are the same."""
resized_annotation_positions = _resize_annotation_bounding_boxes( resized_annotation_positions = _resize_annotation_bounding_boxes(
annotation_positions, annotation_positions,
annotation_width_augment_fraction, annotation_width_augment_fraction,
annotation_height_augment_fraction, annotation_height_augment_fraction,
) )
# Check if the ground truth tap action falls in an annotation's bounding box. # Check if the ground truth tap action falls in an annotation's bounding box.
tap1_in_box = _yx_in_bounding_boxes(tap_1_yx, resized_annotation_positions) tap1_in_box = _yx_in_bounding_boxes(tap_1_yx, resized_annotation_positions)
tap2_in_box = _yx_in_bounding_boxes(tap_2_yx, resized_annotation_positions) tap2_in_box = _yx_in_bounding_boxes(tap_2_yx, resized_annotation_positions)
both_in_box = jnp.max(tap1_in_box & tap2_in_box) both_in_box = jnp.max(tap1_in_box & tap2_in_box)
# If the ground-truth tap action falls outside any of the annotation # If the ground-truth tap action falls outside any of the annotation
# bounding boxes or one of the actions is inside a bounding box and the other # bounding boxes or one of the actions is inside a bounding box and the other
# is outside bounding box or vice versa, compare the points using Euclidean # is outside bounding box or vice versa, compare the points using Euclidean
# distance. # distance.
within_threshold = ( within_threshold = (
jnp.linalg.norm(jnp.array(tap_1_yx) - jnp.array(tap_2_yx)) jnp.linalg.norm(jnp.array(tap_1_yx) - jnp.array(tap_2_yx))
<= matching_tap_distance_threshold_screen_percentage <= matching_tap_distance_threshold_screen_percentage
) )
return jnp.logical_or(both_in_box, within_threshold) return jnp.logical_or(both_in_box, within_threshold)
def _check_drag_actions_match( def _check_drag_actions_match(
drag_1_touch_yx, drag_1_touch_yx,
drag_1_lift_yx, drag_1_lift_yx,
drag_2_touch_yx, drag_2_touch_yx,
drag_2_lift_yx, drag_2_lift_yx,
): ):
"""Determines if two drag actions are the same.""" """Determines if two drag actions are the same."""
# Store drag deltas (the change in the y and x coordinates from touch to # Store drag deltas (the change in the y and x coordinates from touch to
# lift), magnitudes, and the index of the main axis, which is the axis with # lift), magnitudes, and the index of the main axis, which is the axis with
# the greatest change in coordinate value (e.g. a drag starting at (0, 0) and # the greatest change in coordinate value (e.g. a drag starting at (0, 0) and
# ending at (0.3, 0.5) has a main axis index of 1). # ending at (0.3, 0.5) has a main axis index of 1).
drag_1_deltas = drag_1_lift_yx - drag_1_touch_yx drag_1_deltas = drag_1_lift_yx - drag_1_touch_yx
drag_1_magnitudes = jnp.abs(drag_1_deltas) drag_1_magnitudes = jnp.abs(drag_1_deltas)
drag_1_main_axis = np.argmax(drag_1_magnitudes) drag_1_main_axis = np.argmax(drag_1_magnitudes)
drag_2_deltas = drag_2_lift_yx - drag_2_touch_yx drag_2_deltas = drag_2_lift_yx - drag_2_touch_yx
drag_2_magnitudes = jnp.abs(drag_2_deltas) drag_2_magnitudes = jnp.abs(drag_2_deltas)
drag_2_main_axis = np.argmax(drag_2_magnitudes) drag_2_main_axis = np.argmax(drag_2_magnitudes)
return jnp.equal(drag_1_main_axis, drag_2_main_axis) return jnp.equal(drag_1_main_axis, drag_2_main_axis)
def check_actions_match( def check_actions_match(
action_1_touch_yx, action_1_touch_yx,
action_1_lift_yx, action_1_lift_yx,
action_1_action_type, action_1_action_type,
action_2_touch_yx, action_2_touch_yx,
action_2_lift_yx, action_2_lift_yx,
action_2_action_type, action_2_action_type,
annotation_positions, annotation_positions,
tap_distance_threshold = _TAP_DISTANCE_THRESHOLD, tap_distance_threshold = _TAP_DISTANCE_THRESHOLD,
annotation_width_augment_fraction = ANNOTATION_WIDTH_AUGMENT_FRACTION, annotation_width_augment_fraction = ANNOTATION_WIDTH_AUGMENT_FRACTION,
annotation_height_augment_fraction = ANNOTATION_HEIGHT_AUGMENT_FRACTION, annotation_height_augment_fraction = ANNOTATION_HEIGHT_AUGMENT_FRACTION,
): ):
"""Determines if two actions are considered to be the same. """Determines if two actions are considered to be the same.
Two actions being "the same" is defined here as two actions that would result Two actions being "the same" is defined here as two actions that would result
in a similar screen state. in a similar screen state.
Args: Args:
action_1_touch_yx: The (y, x) coordinates of the first action's touch. action_1_touch_yx: The (y, x) coordinates of the first action's touch.
action_1_lift_yx: The (y, x) coordinates of the first action's lift. action_1_lift_yx: The (y, x) coordinates of the first action's lift.
action_1_action_type: The action type of the first action. action_1_action_type: The action type of the first action.
action_2_touch_yx: The (y, x) coordinates of the second action's touch. action_2_touch_yx: The (y, x) coordinates of the second action's touch.
action_2_lift_yx: The (y, x) coordinates of the second action's lift. action_2_lift_yx: The (y, x) coordinates of the second action's lift.
action_2_action_type: The action type of the second action. action_2_action_type: The action type of the second action.
annotation_positions: The positions of the UI annotations for the screen. It annotation_positions: The positions of the UI annotations for the screen. It
is A 2D int array of shape (num_bboxes, 4), where each row represents a is A 2D int array of shape (num_bboxes, 4), where each row represents a
bounding box: (y_top_left, x_top_left, box_height, box_width). Note that bounding box: (y_top_left, x_top_left, box_height, box_width). Note that
containment is inclusive of the bounding box edges. containment is inclusive of the bounding box edges.
tap_distance_threshold: The threshold that determines if two taps result in tap_distance_threshold: The threshold that determines if two taps result in
a matching screen state if they don't fall the same bounding boxes. a matching screen state if they don't fall the same bounding boxes.
annotation_width_augment_fraction: The fraction to increase the width of the annotation_width_augment_fraction: The fraction to increase the width of the
bounding box by. bounding box by.
annotation_height_augment_fraction: The fraction to increase the height of annotation_height_augment_fraction: The fraction to increase the height of
of the bounding box by. of the bounding box by.
Returns: Returns:
A boolean representing whether the two given actions are the same or not. A boolean representing whether the two given actions are the same or not.
""" """
action_1_touch_yx = jnp.asarray(action_1_touch_yx) action_1_touch_yx = jnp.asarray(action_1_touch_yx)
action_1_lift_yx = jnp.asarray(action_1_lift_yx) action_1_lift_yx = jnp.asarray(action_1_lift_yx)
action_2_touch_yx = jnp.asarray(action_2_touch_yx) action_2_touch_yx = jnp.asarray(action_2_touch_yx)
action_2_lift_yx = jnp.asarray(action_2_lift_yx) action_2_lift_yx = jnp.asarray(action_2_lift_yx)
# Checks if at least one of the actions is global (i.e. not DUAL_POINT), # Checks if at least one of the actions is global (i.e. not DUAL_POINT),
# because if that is the case, only the actions' types need to be compared. # because if that is the case, only the actions' types need to be compared.
has_non_dual_point_action = jnp.logical_or( has_non_dual_point_action = jnp.logical_or(
_is_non_dual_point_action(action_1_action_type), _is_non_dual_point_action(action_1_action_type),
_is_non_dual_point_action(action_2_action_type), _is_non_dual_point_action(action_2_action_type),
) )
#print("non dual point: "+str(has_non_dual_point_action)) #print("non dual point: "+str(has_non_dual_point_action))
different_dual_point_types = jnp.logical_xor( different_dual_point_types = jnp.logical_xor(
is_tap_action(action_1_touch_yx, action_1_lift_yx), is_tap_action(action_1_touch_yx, action_1_lift_yx),
is_tap_action(action_2_touch_yx, action_2_lift_yx), is_tap_action(action_2_touch_yx, action_2_lift_yx),
) )
#print("different dual type: "+str(different_dual_point_types)) #print("different dual type: "+str(different_dual_point_types))
is_tap = jnp.logical_and( is_tap = jnp.logical_and(
is_tap_action(action_1_touch_yx, action_1_lift_yx), is_tap_action(action_1_touch_yx, action_1_lift_yx),
is_tap_action(action_2_touch_yx, action_2_lift_yx), is_tap_action(action_2_touch_yx, action_2_lift_yx),
) )
#print("is tap: "+str(is_tap)) #print("is tap: "+str(is_tap))
taps_match = _check_tap_actions_match( taps_match = _check_tap_actions_match(
action_1_touch_yx, action_1_touch_yx,
action_2_touch_yx, action_2_touch_yx,
annotation_positions, annotation_positions,
tap_distance_threshold, tap_distance_threshold,
annotation_width_augment_fraction, annotation_width_augment_fraction,
annotation_height_augment_fraction, annotation_height_augment_fraction,
) )
#print("tap match: "+str(taps_match)) #print("tap match: "+str(taps_match))
taps_match = jnp.logical_and(is_tap, taps_match) taps_match = jnp.logical_and(is_tap, taps_match)
#print("tap match: "+str(taps_match)) #print("tap match: "+str(taps_match))
drags_match = _check_drag_actions_match( drags_match = _check_drag_actions_match(
action_1_touch_yx, action_1_lift_yx, action_2_touch_yx, action_2_lift_yx action_1_touch_yx, action_1_lift_yx, action_2_touch_yx, action_2_lift_yx
) )
drags_match = jnp.where(is_tap, False, drags_match) drags_match = jnp.where(is_tap, False, drags_match)
#print("drag match: "+str(drags_match)) #print("drag match: "+str(drags_match))
return jnp.where( return jnp.where(
has_non_dual_point_action, has_non_dual_point_action,
jnp.equal(action_1_action_type, action_2_action_type), jnp.equal(action_1_action_type, action_2_action_type),
jnp.where( jnp.where(
different_dual_point_types, different_dual_point_types,
False, False,
jnp.logical_or(taps_match, drags_match), jnp.logical_or(taps_match, drags_match),
), ),
) )
def action_2_format(step_data): def action_2_format(step_data):
# 把test数据集中的动作格式转换为计算matching score的格式 # 把test数据集中的动作格式转换为计算matching score的格式
action_type = step_data["action_type_id"] action_type = step_data["action_type_id"]
if action_type == 4: if action_type == 4:
if step_data["action_type_text"] == 'click': # 点击 if step_data["action_type_text"] == 'click': # 点击
touch_point = step_data["touch"] touch_point = step_data["touch"]
lift_point = step_data["lift"] lift_point = step_data["lift"]
else: # 上下左右滑动 else: # 上下左右滑动
if step_data["action_type_text"] == 'scroll down': if step_data["action_type_text"] == 'scroll down':
touch_point = [0.5, 0.8] touch_point = [0.5, 0.8]
lift_point = [0.5, 0.2] lift_point = [0.5, 0.2]
elif step_data["action_type_text"] == 'scroll up': elif step_data["action_type_text"] == 'scroll up':
touch_point = [0.5, 0.2] touch_point = [0.5, 0.2]
lift_point = [0.5, 0.8] lift_point = [0.5, 0.8]
elif step_data["action_type_text"] == 'scroll left': elif step_data["action_type_text"] == 'scroll left':
touch_point = [0.2, 0.5] touch_point = [0.2, 0.5]
lift_point = [0.8, 0.5] lift_point = [0.8, 0.5]
elif step_data["action_type_text"] == 'scroll right': elif step_data["action_type_text"] == 'scroll right':
touch_point = [0.8, 0.5] touch_point = [0.8, 0.5]
lift_point = [0.2, 0.5] lift_point = [0.2, 0.5]
else: else:
touch_point = [-1.0, -1.0] touch_point = [-1.0, -1.0]
lift_point = [-1.0, -1.0] lift_point = [-1.0, -1.0]
if action_type == 3: if action_type == 3:
typed_text = step_data["type_text"] typed_text = step_data["type_text"]
else: else:
typed_text = "" typed_text = ""
action = {"action_type": action_type, "touch_point": touch_point, "lift_point": lift_point, action = {"action_type": action_type, "touch_point": touch_point, "lift_point": lift_point,
"typed_text": typed_text} "typed_text": typed_text}
action["touch_point"] = [action["touch_point"][1], action["touch_point"][0]] action["touch_point"] = [action["touch_point"][1], action["touch_point"][0]]
action["lift_point"] = [action["lift_point"][1], action["lift_point"][0]] action["lift_point"] = [action["lift_point"][1], action["lift_point"][0]]
action["typed_text"] = action["typed_text"].lower() action["typed_text"] = action["typed_text"].lower()
return action return action
def pred_2_format(step_data): def pred_2_format(step_data):
# 把模型输出的内容转换为计算action_matching的格式 # 把模型输出的内容转换为计算action_matching的格式
action_type = step_data["action_type"] action_type = step_data["action_type"]
if action_type == 4: # 点击 if action_type == 4: # 点击
action_type_new = 4 action_type_new = 4
touch_point = step_data["click_point"] touch_point = step_data["click_point"]
lift_point = step_data["click_point"] lift_point = step_data["click_point"]
typed_text = "" typed_text = ""
elif action_type == 0: elif action_type == 0:
action_type_new = 4 action_type_new = 4
touch_point = [0.5, 0.8] touch_point = [0.5, 0.8]
lift_point = [0.5, 0.2] lift_point = [0.5, 0.2]
typed_text = "" typed_text = ""
elif action_type == 1: elif action_type == 1:
action_type_new = 4 action_type_new = 4
touch_point = [0.5, 0.2] touch_point = [0.5, 0.2]
lift_point = [0.5, 0.8] lift_point = [0.5, 0.8]
typed_text = "" typed_text = ""
elif action_type == 8: elif action_type == 8:
action_type_new = 4 action_type_new = 4
touch_point = [0.2, 0.5] touch_point = [0.2, 0.5]
lift_point = [0.8, 0.5] lift_point = [0.8, 0.5]
typed_text = "" typed_text = ""
elif action_type == 9: elif action_type == 9:
action_type_new = 4 action_type_new = 4
touch_point = [0.8, 0.5] touch_point = [0.8, 0.5]
lift_point = [0.2, 0.5] lift_point = [0.2, 0.5]
typed_text = "" typed_text = ""
else: else:
action_type_new = action_type action_type_new = action_type
touch_point = [-1.0, -1.0] touch_point = [-1.0, -1.0]
lift_point = [-1.0, -1.0] lift_point = [-1.0, -1.0]
typed_text = "" typed_text = ""
if action_type_new == 3: if action_type_new == 3:
typed_text = step_data["typed_text"] typed_text = step_data["typed_text"]
action = {"action_type": action_type_new, "touch_point": touch_point, "lift_point": lift_point, action = {"action_type": action_type_new, "touch_point": touch_point, "lift_point": lift_point,
"typed_text": typed_text} "typed_text": typed_text}
action["touch_point"] = [action["touch_point"][1], action["touch_point"][0]] action["touch_point"] = [action["touch_point"][1], action["touch_point"][0]]
action["lift_point"] = [action["lift_point"][1], action["lift_point"][0]] action["lift_point"] = [action["lift_point"][1], action["lift_point"][0]]
action["typed_text"] = action["typed_text"].lower() action["typed_text"] = action["typed_text"].lower()
return action return action
def pred_2_format_simplified(step_data): def pred_2_format_simplified(step_data):
# 把模型输出的内容转换为计算action_matching的格式 # 把模型输出的内容转换为计算action_matching的格式
action_type = step_data["action_type"] action_type = step_data["action_type"]
if action_type == 'click' : # 点击 if action_type == 'click' : # 点击
action_type_new = 4 action_type_new = 4
touch_point = step_data["click_point"] touch_point = step_data["click_point"]
lift_point = step_data["click_point"] lift_point = step_data["click_point"]
typed_text = "" typed_text = ""
elif action_type == 'scroll' and step_data["direction"] == 'down': elif action_type == 'scroll' and step_data["direction"] == 'down':
action_type_new = 4 action_type_new = 4
touch_point = [0.5, 0.8] touch_point = [0.5, 0.8]
lift_point = [0.5, 0.2] lift_point = [0.5, 0.2]
typed_text = "" typed_text = ""
elif action_type == 'scroll' and step_data["direction"] == 'up': elif action_type == 'scroll' and step_data["direction"] == 'up':
action_type_new = 4 action_type_new = 4
touch_point = [0.5, 0.2] touch_point = [0.5, 0.2]
lift_point = [0.5, 0.8] lift_point = [0.5, 0.8]
typed_text = "" typed_text = ""
elif action_type == 'scroll' and step_data["direction"] == 'left': elif action_type == 'scroll' and step_data["direction"] == 'left':
action_type_new = 4 action_type_new = 4
touch_point = [0.2, 0.5] touch_point = [0.2, 0.5]
lift_point = [0.8, 0.5] lift_point = [0.8, 0.5]
typed_text = "" typed_text = ""
elif action_type == 'scroll' and step_data["direction"] == 'right': elif action_type == 'scroll' and step_data["direction"] == 'right':
action_type_new = 4 action_type_new = 4
touch_point = [0.8, 0.5] touch_point = [0.8, 0.5]
lift_point = [0.2, 0.5] lift_point = [0.2, 0.5]
typed_text = "" typed_text = ""
elif action_type == 'type': elif action_type == 'type':
action_type_new = 3 action_type_new = 3
touch_point = [-1.0, -1.0] touch_point = [-1.0, -1.0]
lift_point = [-1.0, -1.0] lift_point = [-1.0, -1.0]
typed_text = step_data["text"] typed_text = step_data["text"]
elif action_type == 'navigate_back': elif action_type == 'navigate_back':
action_type_new = 5 action_type_new = 5
touch_point = [-1.0, -1.0] touch_point = [-1.0, -1.0]
lift_point = [-1.0, -1.0] lift_point = [-1.0, -1.0]
typed_text = "" typed_text = ""
elif action_type == 'navigate_home': elif action_type == 'navigate_home':
action_type_new = 6 action_type_new = 6
touch_point = [-1.0, -1.0] touch_point = [-1.0, -1.0]
lift_point = [-1.0, -1.0] lift_point = [-1.0, -1.0]
typed_text = "" typed_text = ""
else: else:
action_type_new = action_type action_type_new = action_type
touch_point = [-1.0, -1.0] touch_point = [-1.0, -1.0]
lift_point = [-1.0, -1.0] lift_point = [-1.0, -1.0]
typed_text = "" typed_text = ""
# if action_type_new == 'type': # if action_type_new == 'type':
# typed_text = step_data["text"] # typed_text = step_data["text"]
action = {"action_type": action_type_new, "touch_point": touch_point, "lift_point": lift_point, action = {"action_type": action_type_new, "touch_point": touch_point, "lift_point": lift_point,
"typed_text": typed_text} "typed_text": typed_text}
action["touch_point"] = [action["touch_point"][1], action["touch_point"][0]] action["touch_point"] = [action["touch_point"][1], action["touch_point"][0]]
action["lift_point"] = [action["lift_point"][1], action["lift_point"][0]] action["lift_point"] = [action["lift_point"][1], action["lift_point"][0]]
action["typed_text"] = action["typed_text"].lower() action["typed_text"] = action["typed_text"].lower()
return action return action

View File

@@ -1,45 +1,45 @@
''' '''
Adapted from https://github.com/google-research/google-research/tree/master/android_in_the_wild Adapted from https://github.com/google-research/google-research/tree/master/android_in_the_wild
''' '''
import enum import enum
class ActionType(enum.IntEnum): class ActionType(enum.IntEnum):
# Placeholders for unused enum values # Placeholders for unused enum values
UNUSED_0 = 0 UNUSED_0 = 0
UNUSED_1 = 1 UNUSED_1 = 1
UNUSED_2 = 2 UNUSED_2 = 2
UNUSED_8 = 8 UNUSED_8 = 8
UNUSED_9 = 9 UNUSED_9 = 9
########### Agent actions ########### ########### Agent actions ###########
# A type action that sends text to the emulator. Note that this simply sends # A type action that sends text to the emulator. Note that this simply sends
# text and does not perform any clicks for element focus or enter presses for # text and does not perform any clicks for element focus or enter presses for
# submitting text. # submitting text.
TYPE = 3 TYPE = 3
# The dual point action used to represent all gestures. # The dual point action used to represent all gestures.
DUAL_POINT = 4 DUAL_POINT = 4
# These actions differentiate pressing the home and back button from touches. # These actions differentiate pressing the home and back button from touches.
# They represent explicit presses of back and home performed using ADB. # They represent explicit presses of back and home performed using ADB.
PRESS_BACK = 5 PRESS_BACK = 5
PRESS_HOME = 6 PRESS_HOME = 6
# An action representing that ADB command for hitting enter was performed. # An action representing that ADB command for hitting enter was performed.
PRESS_ENTER = 7 PRESS_ENTER = 7
########### Episode status actions ########### ########### Episode status actions ###########
# An action used to indicate the desired task has been completed and resets # An action used to indicate the desired task has been completed and resets
# the environment. This action should also be used in the case that the task # the environment. This action should also be used in the case that the task
# has already been completed and there is nothing to do. # has already been completed and there is nothing to do.
# e.g. The task is to turn on the Wi-Fi when it is already on # e.g. The task is to turn on the Wi-Fi when it is already on
STATUS_TASK_COMPLETE = 10 STATUS_TASK_COMPLETE = 10
# An action used to indicate that desired task is impossible to complete and # An action used to indicate that desired task is impossible to complete and
# resets the environment. This can be a result of many different things # resets the environment. This can be a result of many different things
# including UI changes, Android version differences, etc. # including UI changes, Android version differences, etc.
STATUS_TASK_IMPOSSIBLE = 11 STATUS_TASK_IMPOSSIBLE = 11

View File

@@ -1,262 +1,262 @@
from typing import List, Optional, Union, Tuple from typing import List, Optional, Union, Tuple
import cv2 import cv2
import numpy as np import numpy as np
from supervision.detection.core import Detections from supervision.detection.core import Detections
from supervision.draw.color import Color, ColorPalette from supervision.draw.color import Color, ColorPalette
class BoxAnnotator: class BoxAnnotator:
""" """
A class for drawing bounding boxes on an image using detections provided. A class for drawing bounding boxes on an image using detections provided.
Attributes: Attributes:
color (Union[Color, ColorPalette]): The color to draw the bounding box, color (Union[Color, ColorPalette]): The color to draw the bounding box,
can be a single color or a color palette can be a single color or a color palette
thickness (int): The thickness of the bounding box lines, default is 2 thickness (int): The thickness of the bounding box lines, default is 2
text_color (Color): The color of the text on the bounding box, default is white text_color (Color): The color of the text on the bounding box, default is white
text_scale (float): The scale of the text on the bounding box, default is 0.5 text_scale (float): The scale of the text on the bounding box, default is 0.5
text_thickness (int): The thickness of the text on the bounding box, text_thickness (int): The thickness of the text on the bounding box,
default is 1 default is 1
text_padding (int): The padding around the text on the bounding box, text_padding (int): The padding around the text on the bounding box,
default is 5 default is 5
""" """
def __init__( def __init__(
self, self,
color: Union[Color, ColorPalette] = ColorPalette.DEFAULT, color: Union[Color, ColorPalette] = ColorPalette.DEFAULT,
thickness: int = 3, # 1 for seeclick 2 for mind2web and 3 for demo thickness: int = 3, # 1 for seeclick 2 for mind2web and 3 for demo
text_color: Color = Color.BLACK, text_color: Color = Color.BLACK,
text_scale: float = 0.5, # 0.8 for mobile/web, 0.3 for desktop # 0.4 for mind2web text_scale: float = 0.5, # 0.8 for mobile/web, 0.3 for desktop # 0.4 for mind2web
text_thickness: int = 2, #1, # 2 for demo text_thickness: int = 2, #1, # 2 for demo
text_padding: int = 10, text_padding: int = 10,
avoid_overlap: bool = True, avoid_overlap: bool = True,
): ):
self.color: Union[Color, ColorPalette] = color self.color: Union[Color, ColorPalette] = color
self.thickness: int = thickness self.thickness: int = thickness
self.text_color: Color = text_color self.text_color: Color = text_color
self.text_scale: float = text_scale self.text_scale: float = text_scale
self.text_thickness: int = text_thickness self.text_thickness: int = text_thickness
self.text_padding: int = text_padding self.text_padding: int = text_padding
self.avoid_overlap: bool = avoid_overlap self.avoid_overlap: bool = avoid_overlap
def annotate( def annotate(
self, self,
scene: np.ndarray, scene: np.ndarray,
detections: Detections, detections: Detections,
labels: Optional[List[str]] = None, labels: Optional[List[str]] = None,
skip_label: bool = False, skip_label: bool = False,
image_size: Optional[Tuple[int, int]] = None, image_size: Optional[Tuple[int, int]] = None,
) -> np.ndarray: ) -> np.ndarray:
""" """
Draws bounding boxes on the frame using the detections provided. Draws bounding boxes on the frame using the detections provided.
Args: Args:
scene (np.ndarray): The image on which the bounding boxes will be drawn scene (np.ndarray): The image on which the bounding boxes will be drawn
detections (Detections): The detections for which the detections (Detections): The detections for which the
bounding boxes will be drawn bounding boxes will be drawn
labels (Optional[List[str]]): An optional list of labels labels (Optional[List[str]]): An optional list of labels
corresponding to each detection. If `labels` are not provided, corresponding to each detection. If `labels` are not provided,
corresponding `class_id` will be used as label. corresponding `class_id` will be used as label.
skip_label (bool): Is set to `True`, skips bounding box label annotation. skip_label (bool): Is set to `True`, skips bounding box label annotation.
Returns: Returns:
np.ndarray: The image with the bounding boxes drawn on it np.ndarray: The image with the bounding boxes drawn on it
Example: Example:
```python ```python
import supervision as sv import supervision as sv
classes = ['person', ...] classes = ['person', ...]
image = ... image = ...
detections = sv.Detections(...) detections = sv.Detections(...)
box_annotator = sv.BoxAnnotator() box_annotator = sv.BoxAnnotator()
labels = [ labels = [
f"{classes[class_id]} {confidence:0.2f}" f"{classes[class_id]} {confidence:0.2f}"
for _, _, confidence, class_id, _ in detections for _, _, confidence, class_id, _ in detections
] ]
annotated_frame = box_annotator.annotate( annotated_frame = box_annotator.annotate(
scene=image.copy(), scene=image.copy(),
detections=detections, detections=detections,
labels=labels labels=labels
) )
``` ```
""" """
font = cv2.FONT_HERSHEY_SIMPLEX font = cv2.FONT_HERSHEY_SIMPLEX
for i in range(len(detections)): for i in range(len(detections)):
x1, y1, x2, y2 = detections.xyxy[i].astype(int) x1, y1, x2, y2 = detections.xyxy[i].astype(int)
class_id = ( class_id = (
detections.class_id[i] if detections.class_id is not None else None detections.class_id[i] if detections.class_id is not None else None
) )
idx = class_id if class_id is not None else i idx = class_id if class_id is not None else i
color = ( color = (
self.color.by_idx(idx) self.color.by_idx(idx)
if isinstance(self.color, ColorPalette) if isinstance(self.color, ColorPalette)
else self.color else self.color
) )
cv2.rectangle( cv2.rectangle(
img=scene, img=scene,
pt1=(x1, y1), pt1=(x1, y1),
pt2=(x2, y2), pt2=(x2, y2),
color=color.as_bgr(), color=color.as_bgr(),
thickness=self.thickness, thickness=self.thickness,
) )
if skip_label: if skip_label:
continue continue
text = ( text = (
f"{class_id}" f"{class_id}"
if (labels is None or len(detections) != len(labels)) if (labels is None or len(detections) != len(labels))
else labels[i] else labels[i]
) )
text_width, text_height = cv2.getTextSize( text_width, text_height = cv2.getTextSize(
text=text, text=text,
fontFace=font, fontFace=font,
fontScale=self.text_scale, fontScale=self.text_scale,
thickness=self.text_thickness, thickness=self.text_thickness,
)[0] )[0]
if not self.avoid_overlap: if not self.avoid_overlap:
text_x = x1 + self.text_padding text_x = x1 + self.text_padding
text_y = y1 - self.text_padding text_y = y1 - self.text_padding
text_background_x1 = x1 text_background_x1 = x1
text_background_y1 = y1 - 2 * self.text_padding - text_height text_background_y1 = y1 - 2 * self.text_padding - text_height
text_background_x2 = x1 + 2 * self.text_padding + text_width text_background_x2 = x1 + 2 * self.text_padding + text_width
text_background_y2 = y1 text_background_y2 = y1
# text_x = x1 - self.text_padding - text_width # text_x = x1 - self.text_padding - text_width
# text_y = y1 + self.text_padding + text_height # text_y = y1 + self.text_padding + text_height
# text_background_x1 = x1 - 2 * self.text_padding - text_width # text_background_x1 = x1 - 2 * self.text_padding - text_width
# text_background_y1 = y1 # text_background_y1 = y1
# text_background_x2 = x1 # text_background_x2 = x1
# text_background_y2 = y1 + 2 * self.text_padding + text_height # text_background_y2 = y1 + 2 * self.text_padding + text_height
else: else:
text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2 = get_optimal_label_pos(self.text_padding, text_width, text_height, x1, y1, x2, y2, detections, image_size) text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2 = get_optimal_label_pos(self.text_padding, text_width, text_height, x1, y1, x2, y2, detections, image_size)
cv2.rectangle( cv2.rectangle(
img=scene, img=scene,
pt1=(text_background_x1, text_background_y1), pt1=(text_background_x1, text_background_y1),
pt2=(text_background_x2, text_background_y2), pt2=(text_background_x2, text_background_y2),
color=color.as_bgr(), color=color.as_bgr(),
thickness=cv2.FILLED, thickness=cv2.FILLED,
) )
# import pdb; pdb.set_trace() # import pdb; pdb.set_trace()
box_color = color.as_rgb() box_color = color.as_rgb()
luminance = 0.299 * box_color[0] + 0.587 * box_color[1] + 0.114 * box_color[2] luminance = 0.299 * box_color[0] + 0.587 * box_color[1] + 0.114 * box_color[2]
text_color = (0,0,0) if luminance > 160 else (255,255,255) text_color = (0,0,0) if luminance > 160 else (255,255,255)
cv2.putText( cv2.putText(
img=scene, img=scene,
text=text, text=text,
org=(text_x, text_y), org=(text_x, text_y),
fontFace=font, fontFace=font,
fontScale=self.text_scale, fontScale=self.text_scale,
# color=self.text_color.as_rgb(), # color=self.text_color.as_rgb(),
color=text_color, color=text_color,
thickness=self.text_thickness, thickness=self.text_thickness,
lineType=cv2.LINE_AA, lineType=cv2.LINE_AA,
) )
return scene return scene
def box_area(box): def box_area(box):
return (box[2] - box[0]) * (box[3] - box[1]) return (box[2] - box[0]) * (box[3] - box[1])
def intersection_area(box1, box2): def intersection_area(box1, box2):
x1 = max(box1[0], box2[0]) x1 = max(box1[0], box2[0])
y1 = max(box1[1], box2[1]) y1 = max(box1[1], box2[1])
x2 = min(box1[2], box2[2]) x2 = min(box1[2], box2[2])
y2 = min(box1[3], box2[3]) y2 = min(box1[3], box2[3])
return max(0, x2 - x1) * max(0, y2 - y1) return max(0, x2 - x1) * max(0, y2 - y1)
def IoU(box1, box2, return_max=True): def IoU(box1, box2, return_max=True):
intersection = intersection_area(box1, box2) intersection = intersection_area(box1, box2)
union = box_area(box1) + box_area(box2) - intersection union = box_area(box1) + box_area(box2) - intersection
if box_area(box1) > 0 and box_area(box2) > 0: if box_area(box1) > 0 and box_area(box2) > 0:
ratio1 = intersection / box_area(box1) ratio1 = intersection / box_area(box1)
ratio2 = intersection / box_area(box2) ratio2 = intersection / box_area(box2)
else: else:
ratio1, ratio2 = 0, 0 ratio1, ratio2 = 0, 0
if return_max: if return_max:
return max(intersection / union, ratio1, ratio2) return max(intersection / union, ratio1, ratio2)
else: else:
return intersection / union return intersection / union
def get_optimal_label_pos(text_padding, text_width, text_height, x1, y1, x2, y2, detections, image_size): def get_optimal_label_pos(text_padding, text_width, text_height, x1, y1, x2, y2, detections, image_size):
""" check overlap of text and background detection box, and get_optimal_label_pos, """ check overlap of text and background detection box, and get_optimal_label_pos,
pos: str, position of the text, must be one of 'top left', 'top right', 'outer left', 'outer right' TODO: if all are overlapping, return the last one, i.e. outer right pos: str, position of the text, must be one of 'top left', 'top right', 'outer left', 'outer right' TODO: if all are overlapping, return the last one, i.e. outer right
Threshold: default to 0.3 Threshold: default to 0.3
""" """
def get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size): def get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size):
is_overlap = False is_overlap = False
for i in range(len(detections)): for i in range(len(detections)):
detection = detections.xyxy[i].astype(int) detection = detections.xyxy[i].astype(int)
if IoU([text_background_x1, text_background_y1, text_background_x2, text_background_y2], detection) > 0.3: if IoU([text_background_x1, text_background_y1, text_background_x2, text_background_y2], detection) > 0.3:
is_overlap = True is_overlap = True
break break
# check if the text is out of the image # check if the text is out of the image
if text_background_x1 < 0 or text_background_x2 > image_size[0] or text_background_y1 < 0 or text_background_y2 > image_size[1]: if text_background_x1 < 0 or text_background_x2 > image_size[0] or text_background_y1 < 0 or text_background_y2 > image_size[1]:
is_overlap = True is_overlap = True
return is_overlap return is_overlap
# if pos == 'top left': # if pos == 'top left':
text_x = x1 + text_padding text_x = x1 + text_padding
text_y = y1 - text_padding text_y = y1 - text_padding
text_background_x1 = x1 text_background_x1 = x1
text_background_y1 = y1 - 2 * text_padding - text_height text_background_y1 = y1 - 2 * text_padding - text_height
text_background_x2 = x1 + 2 * text_padding + text_width text_background_x2 = x1 + 2 * text_padding + text_width
text_background_y2 = y1 text_background_y2 = y1
is_overlap = get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size) is_overlap = get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size)
if not is_overlap: if not is_overlap:
return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2 return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2
# elif pos == 'outer left': # elif pos == 'outer left':
text_x = x1 - text_padding - text_width text_x = x1 - text_padding - text_width
text_y = y1 + text_padding + text_height text_y = y1 + text_padding + text_height
text_background_x1 = x1 - 2 * text_padding - text_width text_background_x1 = x1 - 2 * text_padding - text_width
text_background_y1 = y1 text_background_y1 = y1
text_background_x2 = x1 text_background_x2 = x1
text_background_y2 = y1 + 2 * text_padding + text_height text_background_y2 = y1 + 2 * text_padding + text_height
is_overlap = get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size) is_overlap = get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size)
if not is_overlap: if not is_overlap:
return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2 return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2
# elif pos == 'outer right': # elif pos == 'outer right':
text_x = x2 + text_padding text_x = x2 + text_padding
text_y = y1 + text_padding + text_height text_y = y1 + text_padding + text_height
text_background_x1 = x2 text_background_x1 = x2
text_background_y1 = y1 text_background_y1 = y1
text_background_x2 = x2 + 2 * text_padding + text_width text_background_x2 = x2 + 2 * text_padding + text_width
text_background_y2 = y1 + 2 * text_padding + text_height text_background_y2 = y1 + 2 * text_padding + text_height
is_overlap = get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size) is_overlap = get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size)
if not is_overlap: if not is_overlap:
return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2 return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2
# elif pos == 'top right': # elif pos == 'top right':
text_x = x2 - text_padding - text_width text_x = x2 - text_padding - text_width
text_y = y1 - text_padding text_y = y1 - text_padding
text_background_x1 = x2 - 2 * text_padding - text_width text_background_x1 = x2 - 2 * text_padding - text_width
text_background_y1 = y1 - 2 * text_padding - text_height text_background_y1 = y1 - 2 * text_padding - text_height
text_background_x2 = x2 text_background_x2 = x2
text_background_y2 = y1 text_background_y2 = y1
is_overlap = get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size) is_overlap = get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size)
if not is_overlap: if not is_overlap:
return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2 return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2
return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2 return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2

1076
utils.py

File diff suppressed because it is too large Load Diff

View File

@@ -1,26 +1,26 @@
import torch import torch
from ultralytics.nn.tasks import DetectionModel from ultralytics.nn.tasks import DetectionModel
from safetensors.torch import load_file from safetensors.torch import load_file
import argparse import argparse
import yaml import yaml
import os import os
# accept args to specify v1 or v1_5 # accept args to specify v1 or v1_5
parser = argparse.ArgumentParser(description='Specify version v1 or v1_5') parser = argparse.ArgumentParser(description='Specify version v1 or v1_5')
parser.add_argument('--weights_dir', type=str, required=True, help='Specify the path to the safetensor file', default='weights/icon_detect_v1_5') parser.add_argument('--weights_dir', type=str, required=True, help='Specify the path to the safetensor file', default='weights/icon_detect_v1_5')
args = parser.parse_args() args = parser.parse_args()
tensor_dict = load_file(os.path.join(args.weights_dir, "model.safetensors")) tensor_dict = load_file(os.path.join(args.weights_dir, "model.safetensors"))
model = DetectionModel(os.path.join(args.weights_dir, "model.yaml")) model = DetectionModel(os.path.join(args.weights_dir, "model.yaml"))
# from ultralytics import YOLO # from ultralytics import YOLO
# som_model = YOLO("yolo11m.pt") # som_model = YOLO("yolo11m.pt")
# model = som_model.model # model = som_model.model
model.load_state_dict(tensor_dict) model.load_state_dict(tensor_dict)
save_dict = {'model':model} save_dict = {'model':model}
with open(os.path.join(args.weights_dir, "train_args.yaml"), 'r') as file: with open(os.path.join(args.weights_dir, "train_args.yaml"), 'r') as file:
train_args = yaml.safe_load(file) train_args = yaml.safe_load(file)
save_dict.update(train_args) save_dict.update(train_args)
torch.save(save_dict, os.path.join(args.weights_dir, "best.pt")) torch.save(save_dict, os.path.join(args.weights_dir, "best.pt"))