commit 309ff79434880ef155e9fea4d8c878662ba26235 Author: Re-bin <1718954198@qq.com> Date: Tue Feb 4 01:48:02 2025 +0800 initial upload diff --git a/ImageBind/.assets/bird_audio.wav b/ImageBind/.assets/bird_audio.wav new file mode 100755 index 0000000..a98fc72 Binary files /dev/null and b/ImageBind/.assets/bird_audio.wav differ diff --git a/ImageBind/.assets/bird_image.jpg b/ImageBind/.assets/bird_image.jpg new file mode 100755 index 0000000..78b10ab Binary files /dev/null and b/ImageBind/.assets/bird_image.jpg differ diff --git a/ImageBind/.assets/car_audio.wav b/ImageBind/.assets/car_audio.wav new file mode 100755 index 0000000..b71b42a Binary files /dev/null and b/ImageBind/.assets/car_audio.wav differ diff --git a/ImageBind/.assets/car_image.jpg b/ImageBind/.assets/car_image.jpg new file mode 100755 index 0000000..e33288e Binary files /dev/null and b/ImageBind/.assets/car_image.jpg differ diff --git a/ImageBind/.assets/dog_audio.wav b/ImageBind/.assets/dog_audio.wav new file mode 100755 index 0000000..71d69c7 Binary files /dev/null and b/ImageBind/.assets/dog_audio.wav differ diff --git a/ImageBind/.assets/dog_image.jpg b/ImageBind/.assets/dog_image.jpg new file mode 100755 index 0000000..a54bffa Binary files /dev/null and b/ImageBind/.assets/dog_image.jpg differ diff --git a/ImageBind/CODE_OF_CONDUCT.md b/ImageBind/CODE_OF_CONDUCT.md new file mode 100755 index 0000000..f913b6a --- /dev/null +++ b/ImageBind/CODE_OF_CONDUCT.md @@ -0,0 +1,80 @@ +# Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to make participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic +address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a +professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies within all project spaces, and it also applies when +an individual is representing the project or its community in public spaces. +Examples of representing a project or community include using an official +project e-mail address, posting via an official social media account, or acting +as an appointed representative at an online or offline event. Representation of +a project may be further defined and clarified by project maintainers. + +This Code of Conduct also applies outside the project spaces when there is a +reasonable belief that an individual's behavior may have a negative impact on +the project or its community. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at . All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq \ No newline at end of file diff --git a/ImageBind/CONTRIBUTING.md b/ImageBind/CONTRIBUTING.md new file mode 100755 index 0000000..63d0b75 --- /dev/null +++ b/ImageBind/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing to ImageBind +We want to make contributing to this project as easy and transparent as +possible. + +## Pull Requests +We actively welcome your pull requests. + +1. Fork the repo and create your branch from `main`. +2. If you've added code that should be tested, add tests. +3. If you've changed APIs, update the documentation. +4. Ensure the test suite passes. +5. Make sure your code lints. +6. If you haven't already, complete the Contributor License Agreement ("CLA"). + +## Contributor License Agreement ("CLA") +In order to accept your pull request, we need you to submit a CLA. You only need +to do this once to work on any of Meta's open source projects. + +Complete your CLA here: + +## Issues +We use GitHub issues to track public bugs. Please ensure your description is +clear and has sufficient instructions to be able to reproduce the issue. + +Meta has a [bounty program](https://www.facebook.com/whitehat/) for the safe +disclosure of security bugs. In those cases, please go through the process +outlined on that page and do not file a public issue. + +## License +By contributing to Omnivore, you agree that your contributions will be licensed +under the [LICENSE](LICENSE) file in the root directory of this source tree. diff --git a/ImageBind/LICENSE b/ImageBind/LICENSE new file mode 100755 index 0000000..bfef380 --- /dev/null +++ b/ImageBind/LICENSE @@ -0,0 +1,437 @@ +Attribution-NonCommercial-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International +Public License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-NonCommercial-ShareAlike 4.0 International Public License +("Public License"). To the extent this Public License may be +interpreted as a contract, You are granted the Licensed Rights in +consideration of Your acceptance of these terms and conditions, and the +Licensor grants You such rights in consideration of benefits the +Licensor receives from making the Licensed Material available under +these terms and conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-NC-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution, NonCommercial, and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. NonCommercial means not primarily intended for or directed towards + commercial advantage or monetary compensation. For purposes of + this Public License, the exchange of the Licensed Material for + other material subject to Copyright and Similar Rights by digital + file-sharing or similar means is NonCommercial provided there is + no payment of monetary compensation in connection with the + exchange. + + l. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + m. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + n. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part, for NonCommercial purposes only; and + + b. produce, reproduce, and Share Adapted Material for + NonCommercial purposes only. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties, including when + the Licensed Material is used other than for NonCommercial + purposes. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-NC-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database for NonCommercial purposes + only; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + including for purposes of Section 3(b); and + + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + +======================================================================= + +Creative Commons is not a party to its public +licenses. Notwithstanding, Creative Commons may elect to apply one of +its public licenses to material it publishes and in those instances +will be considered the “Licensor.” The text of the Creative Commons +public licenses is dedicated to the public domain under the CC0 Public +Domain Dedication. Except for the limited purpose of indicating that +material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the +public licenses. + +Creative Commons may be contacted at creativecommons.org. \ No newline at end of file diff --git a/ImageBind/README.md b/ImageBind/README.md new file mode 100755 index 0000000..cacf3e9 --- /dev/null +++ b/ImageBind/README.md @@ -0,0 +1,155 @@ +# ImageBind: One Embedding Space To Bind Them All + +**[FAIR, Meta AI](https://ai.facebook.com/research/)** + +Rohit Girdhar*, +Alaaeldin El-Nouby*, +Zhuang Liu, +Mannat Singh, +Kalyan Vasudev Alwala, +Armand Joulin, +Ishan Misra* + +To appear at CVPR 2023 (*Highlighted paper*) + +[[`Paper`](https://facebookresearch.github.io/ImageBind/paper)] [[`Blog`](https://ai.facebook.com/blog/imagebind-six-modalities-binding-ai/)] [[`Demo`](https://imagebind.metademolab.com/)] [[`Supplementary Video`](https://dl.fbaipublicfiles.com/imagebind/imagebind_video.mp4)] [[`BibTex`](#citing-imagebind)] + +PyTorch implementation and pretrained models for ImageBind. For details, see the paper: **[ImageBind: One Embedding Space To Bind Them All](https://facebookresearch.github.io/ImageBind/paper)**. + +ImageBind learns a joint embedding across six different modalities - images, text, audio, depth, thermal, and IMU data. It enables novel emergent applications ‘out-of-the-box’ including cross-modal retrieval, composing modalities with arithmetic, cross-modal detection and generation. + + + +![ImageBind](https://user-images.githubusercontent.com/8495451/236859695-ffa13364-3e39-4d99-a8da-fbfab17f9a6b.gif) + +## ImageBind model + +Emergent zero-shot classification performance. + + + + + + + + + + + + + + + + + + + + + + + +
ModelIN1kK400NYU-DESCLLVIPEgo4Ddownload
imagebind_huge77.750.054.066.963.425.0checkpoint
+ +## Usage + +Install pytorch 1.13+ and other 3rd party dependencies. + +```shell +conda create --name imagebind python=3.10 -y +conda activate imagebind + +pip install . +``` + +For windows users, you might need to install `soundfile` for reading/writing audio files. (Thanks @congyue1977) + +``` +pip install soundfile +``` + + +Extract and compare features across modalities (e.g. Image, Text and Audio). + +```python +from imagebind import data +import torch +from imagebind.models import imagebind_model +from imagebind.models.imagebind_model import ModalityType + +text_list=["A dog.", "A car", "A bird"] +image_paths=[".assets/dog_image.jpg", ".assets/car_image.jpg", ".assets/bird_image.jpg"] +audio_paths=[".assets/dog_audio.wav", ".assets/car_audio.wav", ".assets/bird_audio.wav"] + +device = "cuda:0" if torch.cuda.is_available() else "cpu" + +# Instantiate model +model = imagebind_model.imagebind_huge(pretrained=True) +model.eval() +model.to(device) + +# Load data +inputs = { + ModalityType.TEXT: data.load_and_transform_text(text_list, device), + ModalityType.VISION: data.load_and_transform_vision_data(image_paths, device), + ModalityType.AUDIO: data.load_and_transform_audio_data(audio_paths, device), +} + +with torch.no_grad(): + embeddings = model(inputs) + +print( + "Vision x Text: ", + torch.softmax(embeddings[ModalityType.VISION] @ embeddings[ModalityType.TEXT].T, dim=-1), +) +print( + "Audio x Text: ", + torch.softmax(embeddings[ModalityType.AUDIO] @ embeddings[ModalityType.TEXT].T, dim=-1), +) +print( + "Vision x Audio: ", + torch.softmax(embeddings[ModalityType.VISION] @ embeddings[ModalityType.AUDIO].T, dim=-1), +) + +# Expected output: +# +# Vision x Text: +# tensor([[9.9761e-01, 2.3694e-03, 1.8612e-05], +# [3.3836e-05, 9.9994e-01, 2.4118e-05], +# [4.7997e-05, 1.3496e-02, 9.8646e-01]]) +# +# Audio x Text: +# tensor([[1., 0., 0.], +# [0., 1., 0.], +# [0., 0., 1.]]) +# +# Vision x Audio: +# tensor([[0.8070, 0.1088, 0.0842], +# [0.1036, 0.7884, 0.1079], +# [0.0018, 0.0022, 0.9960]]) + +``` + +## Model card +Please see the [model card](model_card.md) for details. + +## License + +ImageBind code and model weights are released under the CC-BY-NC 4.0 license. See [LICENSE](LICENSE) for additional details. + +## Contributing + +See [contributing](CONTRIBUTING.md) and the [code of conduct](CODE_OF_CONDUCT.md). + +## Citing ImageBind + +If you find this repository useful, please consider giving a star :star: and citation + +``` +@inproceedings{girdhar2023imagebind, + title={ImageBind: One Embedding Space To Bind Them All}, + author={Girdhar, Rohit and El-Nouby, Alaaeldin and Liu, Zhuang +and Singh, Mannat and Alwala, Kalyan Vasudev and Joulin, Armand and Misra, Ishan}, + booktitle={CVPR}, + year={2023} +} +``` diff --git a/ImageBind/build/lib/imagebind/__init__.py b/ImageBind/build/lib/imagebind/__init__.py new file mode 100644 index 0000000..559fc8d --- /dev/null +++ b/ImageBind/build/lib/imagebind/__init__.py @@ -0,0 +1,3 @@ +from imagebind import data +from imagebind.models import imagebind_model +from imagebind.models.imagebind_model import ModalityType \ No newline at end of file diff --git a/ImageBind/build/lib/imagebind/bpe/bpe_simple_vocab_16e6.txt.gz b/ImageBind/build/lib/imagebind/bpe/bpe_simple_vocab_16e6.txt.gz new file mode 100755 index 0000000..7b5088a Binary files /dev/null and b/ImageBind/build/lib/imagebind/bpe/bpe_simple_vocab_16e6.txt.gz differ diff --git a/ImageBind/build/lib/imagebind/data.py b/ImageBind/build/lib/imagebind/data.py new file mode 100644 index 0000000..6b774d6 --- /dev/null +++ b/ImageBind/build/lib/imagebind/data.py @@ -0,0 +1,343 @@ +#!/usr/bin/env python3 +# Portions Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import logging +import math +import pkg_resources + +import torch +import torch.nn as nn +import torchaudio +from PIL import Image +from pytorchvideo import transforms as pv_transforms +from pytorchvideo.data.clip_sampling import ConstantClipsPerVideoSampler +from pytorchvideo.data.encoded_video import EncodedVideo +from torchvision import transforms +from torchvision.transforms._transforms_video import NormalizeVideo + +from imagebind.models.multimodal_preprocessors import SimpleTokenizer + +DEFAULT_AUDIO_FRAME_SHIFT_MS = 10 # in milliseconds + + +def return_bpe_path(): + return pkg_resources.resource_filename( + "imagebind", "bpe/bpe_simple_vocab_16e6.txt.gz" + ) + + +def waveform2melspec(waveform, sample_rate, num_mel_bins, target_length): + # Based on https://github.com/YuanGongND/ast/blob/d7d8b4b8e06cdaeb6c843cdb38794c1c7692234c/src/dataloader.py#L102 + waveform -= waveform.mean() + fbank = torchaudio.compliance.kaldi.fbank( + waveform, + htk_compat=True, + sample_frequency=sample_rate, + use_energy=False, + window_type="hanning", + num_mel_bins=num_mel_bins, + dither=0.0, + frame_length=25, + frame_shift=DEFAULT_AUDIO_FRAME_SHIFT_MS, + ) + # Convert to [mel_bins, num_frames] shape + fbank = fbank.transpose(0, 1) + # Pad to target_length + n_frames = fbank.size(1) + p = target_length - n_frames + # if p is too large (say >20%), flash a warning + if abs(p) / n_frames > 0.2: + logging.warning( + "Large gap between audio n_frames(%d) and " + "target_length (%d). Is the audio_target_length " + "setting correct?", + n_frames, + target_length, + ) + # cut and pad + if p > 0: + fbank = torch.nn.functional.pad(fbank, (0, p), mode="constant", value=0) + elif p < 0: + fbank = fbank[:, 0:target_length] + # Convert to [1, mel_bins, num_frames] shape, essentially like a 1 + # channel image + fbank = fbank.unsqueeze(0) + return fbank + + +def get_clip_timepoints(clip_sampler, duration): + # Read out all clips in this video + all_clips_timepoints = [] + is_last_clip = False + end = 0.0 + while not is_last_clip: + start, end, _, _, is_last_clip = clip_sampler(end, duration, annotation=None) + all_clips_timepoints.append((start, end)) + return all_clips_timepoints + + +def load_and_transform_vision_data(image_paths, device): + if image_paths is None: + return None + + image_outputs = [] + + data_transform = transforms.Compose( + [ + transforms.Resize(224, interpolation=transforms.InterpolationMode.BICUBIC), + transforms.CenterCrop(224), + transforms.ToTensor(), + transforms.Normalize( + mean=(0.48145466, 0.4578275, 0.40821073), + std=(0.26862954, 0.26130258, 0.27577711), + ), + ] + ) + + for image_path in image_paths: + with open(image_path, "rb") as fopen: + image = Image.open(fopen).convert("RGB") + + image = data_transform(image).to(device) + image_outputs.append(image) + return torch.stack(image_outputs, dim=0) + + +def load_and_transform_text(text, device): + if text is None: + return None + tokenizer = SimpleTokenizer(bpe_path=return_bpe_path()) + tokens = [tokenizer(t).unsqueeze(0).to(device) for t in text] + tokens = torch.cat(tokens, dim=0) + return tokens + + +def load_and_transform_audio_data( + audio_paths, + device, + num_mel_bins=128, + target_length=204, + sample_rate=16000, + clip_duration=2, + clips_per_video=3, + mean=-4.268, + std=9.138, +): + if audio_paths is None: + return None + + audio_outputs = [] + clip_sampler = ConstantClipsPerVideoSampler( + clip_duration=clip_duration, clips_per_video=clips_per_video + ) + + for audio_path in audio_paths: + waveform, sr = torchaudio.load(audio_path) + if sample_rate != sr: + waveform = torchaudio.functional.resample( + waveform, orig_freq=sr, new_freq=sample_rate + ) + all_clips_timepoints = get_clip_timepoints( + clip_sampler, waveform.size(1) / sample_rate + ) + all_clips = [] + for clip_timepoints in all_clips_timepoints: + waveform_clip = waveform[ + :, + int(clip_timepoints[0] * sample_rate) : int( + clip_timepoints[1] * sample_rate + ), + ] + waveform_melspec = waveform2melspec( + waveform_clip, sample_rate, num_mel_bins, target_length + ) + all_clips.append(waveform_melspec) + + normalize = transforms.Normalize(mean=mean, std=std) + all_clips = [normalize(ac).to(device) for ac in all_clips] + + all_clips = torch.stack(all_clips, dim=0) + audio_outputs.append(all_clips) + + return torch.stack(audio_outputs, dim=0) + + +def crop_boxes(boxes, x_offset, y_offset): + """ + Perform crop on the bounding boxes given the offsets. + Args: + boxes (ndarray or None): bounding boxes to perform crop. The dimension + is `num boxes` x 4. + x_offset (int): cropping offset in the x axis. + y_offset (int): cropping offset in the y axis. + Returns: + cropped_boxes (ndarray or None): the cropped boxes with dimension of + `num boxes` x 4. + """ + cropped_boxes = boxes.copy() + cropped_boxes[:, [0, 2]] = boxes[:, [0, 2]] - x_offset + cropped_boxes[:, [1, 3]] = boxes[:, [1, 3]] - y_offset + + return cropped_boxes + + +def uniform_crop(images, size, spatial_idx, boxes=None, scale_size=None): + """ + Perform uniform spatial sampling on the images and corresponding boxes. + Args: + images (tensor): images to perform uniform crop. The dimension is + `num frames` x `channel` x `height` x `width`. + size (int): size of height and weight to crop the images. + spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width + is larger than height. Or 0, 1, or 2 for top, center, and bottom + crop if height is larger than width. + boxes (ndarray or None): optional. Corresponding boxes to images. + Dimension is `num boxes` x 4. + scale_size (int): optinal. If not None, resize the images to scale_size before + performing any crop. + Returns: + cropped (tensor): images with dimension of + `num frames` x `channel` x `size` x `size`. + cropped_boxes (ndarray or None): the cropped boxes with dimension of + `num boxes` x 4. + """ + assert spatial_idx in [0, 1, 2] + ndim = len(images.shape) + if ndim == 3: + images = images.unsqueeze(0) + height = images.shape[2] + width = images.shape[3] + + if scale_size is not None: + if width <= height: + width, height = scale_size, int(height / width * scale_size) + else: + width, height = int(width / height * scale_size), scale_size + images = torch.nn.functional.interpolate( + images, + size=(height, width), + mode="bilinear", + align_corners=False, + ) + + y_offset = int(math.ceil((height - size) / 2)) + x_offset = int(math.ceil((width - size) / 2)) + + if height > width: + if spatial_idx == 0: + y_offset = 0 + elif spatial_idx == 2: + y_offset = height - size + else: + if spatial_idx == 0: + x_offset = 0 + elif spatial_idx == 2: + x_offset = width - size + cropped = images[:, :, y_offset : y_offset + size, x_offset : x_offset + size] + cropped_boxes = crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None + if ndim == 3: + cropped = cropped.squeeze(0) + return cropped, cropped_boxes + + +class SpatialCrop(nn.Module): + """ + Convert the video into 3 smaller clips spatially. Must be used after the + temporal crops to get spatial crops, and should be used with + -2 in the spatial crop at the slowfast augmentation stage (so full + frames are passed in here). Will return a larger list with the + 3x spatial crops as well. + """ + + def __init__(self, crop_size: int = 224, num_crops: int = 3): + super().__init__() + self.crop_size = crop_size + if num_crops == 3: + self.crops_to_ext = [0, 1, 2] + self.flipped_crops_to_ext = [] + elif num_crops == 1: + self.crops_to_ext = [1] + self.flipped_crops_to_ext = [] + else: + raise NotImplementedError("Nothing else supported yet") + + def forward(self, videos): + """ + Args: + videos: A list of C, T, H, W videos. + Returns: + videos: A list with 3x the number of elements. Each video converted + to C, T, H', W' by spatial cropping. + """ + assert isinstance(videos, list), "Must be a list of videos after temporal crops" + assert all([video.ndim == 4 for video in videos]), "Must be (C,T,H,W)" + res = [] + for video in videos: + for spatial_idx in self.crops_to_ext: + res.append(uniform_crop(video, self.crop_size, spatial_idx)[0]) + if not self.flipped_crops_to_ext: + continue + flipped_video = transforms.functional.hflip(video) + for spatial_idx in self.flipped_crops_to_ext: + res.append(uniform_crop(flipped_video, self.crop_size, spatial_idx)[0]) + return res + + +def load_and_transform_video_data( + video_paths, + device, + clip_duration=2, + clips_per_video=5, + sample_rate=16000, +): + if video_paths is None: + return None + + video_outputs = [] + video_transform = transforms.Compose( + [ + pv_transforms.ShortSideScale(224), + NormalizeVideo( + mean=(0.48145466, 0.4578275, 0.40821073), + std=(0.26862954, 0.26130258, 0.27577711), + ), + ] + ) + + clip_sampler = ConstantClipsPerVideoSampler( + clip_duration=clip_duration, clips_per_video=clips_per_video + ) + frame_sampler = pv_transforms.UniformTemporalSubsample(num_samples=clip_duration) + + for video_path in video_paths: + video = EncodedVideo.from_path( + video_path, + decoder="decord", + decode_audio=False, + **{"sample_rate": sample_rate}, + ) + + all_clips_timepoints = get_clip_timepoints(clip_sampler, video.duration) + + all_video = [] + for clip_timepoints in all_clips_timepoints: + # Read the clip, get frames + clip = video.get_clip(clip_timepoints[0], clip_timepoints[1]) + if clip is None: + raise ValueError("No clip found") + video_clip = frame_sampler(clip["video"]) + video_clip = video_clip / 255.0 # since this is float, need 0-1 + + all_video.append(video_clip) + + all_video = [video_transform(clip) for clip in all_video] + all_video = SpatialCrop(224, num_crops=3)(all_video) + + all_video = torch.stack(all_video, dim=0) + video_outputs.append(all_video) + + return torch.stack(video_outputs, dim=0).to(device) diff --git a/ImageBind/build/lib/imagebind/models/__init__.py b/ImageBind/build/lib/imagebind/models/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ImageBind/build/lib/imagebind/models/helpers.py b/ImageBind/build/lib/imagebind/models/helpers.py new file mode 100644 index 0000000..71abe9b --- /dev/null +++ b/ImageBind/build/lib/imagebind/models/helpers.py @@ -0,0 +1,140 @@ +#!/usr/bin/env python3 +# Portions Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + + +import einops +import numpy as np +import torch +import torch.nn as nn + + +class Normalize(nn.Module): + def __init__(self, dim: int) -> None: + super().__init__() + self.dim = dim + + def forward(self, x): + return torch.nn.functional.normalize(x, dim=self.dim, p=2) + + +class LearnableLogitScaling(nn.Module): + def __init__( + self, + logit_scale_init: float = 1 / 0.07, + learnable: bool = True, + max_logit_scale: float = 100, + ) -> None: + super().__init__() + self.max_logit_scale = max_logit_scale + self.logit_scale_init = logit_scale_init + self.learnable = learnable + log_logit_scale = torch.ones([]) * np.log(self.logit_scale_init) + if learnable: + self.log_logit_scale = nn.Parameter(log_logit_scale) + else: + self.register_buffer("log_logit_scale", log_logit_scale) + + def forward(self, x): + return torch.clip(self.log_logit_scale.exp(), max=self.max_logit_scale) * x + + def extra_repr(self): + st = f"logit_scale_init={self.logit_scale_init},learnable={self.learnable}," \ + f" max_logit_scale={self.max_logit_scale}" + return st + + +class EinOpsRearrange(nn.Module): + def __init__(self, rearrange_expr: str, **kwargs) -> None: + super().__init__() + self.rearrange_expr = rearrange_expr + self.kwargs = kwargs + + def forward(self, x): + assert isinstance(x, torch.Tensor) + return einops.rearrange(x, self.rearrange_expr, **self.kwargs) + + +class VerboseNNModule(nn.Module): + """ + Wrapper around nn.Module that prints registered buffers and parameter names. + """ + + @staticmethod + def get_readable_tensor_repr(name: str, tensor: torch.Tensor) -> str: + st = ( + "(" + + name + + "): " + + "tensor(" + + str(tuple(tensor[1].shape)) + + ", requires_grad=" + + str(tensor[1].requires_grad) + + ")\n" + ) + return st + + def extra_repr(self) -> str: + named_modules = set() + for p in self.named_modules(): + named_modules.update([p[0]]) + named_modules = list(named_modules) + + string_repr = "" + for p in self.named_parameters(): + name = p[0].split(".")[0] + if name not in named_modules: + string_repr += self.get_readable_tensor_repr(name, p) + + for p in self.named_buffers(): + name = p[0].split(".")[0] + string_repr += self.get_readable_tensor_repr(name, p) + + return string_repr + + +def cast_if_src_dtype( + tensor: torch.Tensor, src_dtype: torch.dtype, tgt_dtype: torch.dtype +): + updated = False + if tensor.dtype == src_dtype: + tensor = tensor.to(dtype=tgt_dtype) + updated = True + return tensor, updated + + +class QuickGELU(nn.Module): + # From https://github.com/openai/CLIP/blob/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1/clip/model.py#L166 + def forward(self, x: torch.Tensor): + return x * torch.sigmoid(1.702 * x) + + +class SelectElement(nn.Module): + def __init__(self, index) -> None: + super().__init__() + self.index = index + + def forward(self, x): + assert x.ndim >= 3 + return x[:, self.index, ...] + + +class SelectEOSAndProject(nn.Module): + """ + Text Pooling used in OpenCLIP + """ + + def __init__(self, proj: nn.Module) -> None: + super().__init__() + self.proj = proj + + def forward(self, x, seq_len): + assert x.ndim == 3 + # x is of shape B x L x D + # take features from the eot embedding (eot_token is the highest number in each sequence) + x = x[torch.arange(x.shape[0]), seq_len] + x = self.proj(x) + return x diff --git a/ImageBind/build/lib/imagebind/models/imagebind_model.py b/ImageBind/build/lib/imagebind/models/imagebind_model.py new file mode 100644 index 0000000..c560945 --- /dev/null +++ b/ImageBind/build/lib/imagebind/models/imagebind_model.py @@ -0,0 +1,506 @@ +#!/usr/bin/env python3 +# Portions Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + + +import os +from functools import partial +from types import SimpleNamespace + +import torch +import torch.nn as nn + +from imagebind.models.helpers import (EinOpsRearrange, LearnableLogitScaling, Normalize, + SelectElement, SelectEOSAndProject) +from imagebind.models.multimodal_preprocessors import (AudioPreprocessor, + IMUPreprocessor, PadIm2Video, + PatchEmbedGeneric, + RGBDTPreprocessor, + SpatioTemporalPosEmbeddingHelper, + TextPreprocessor, + ThermalPreprocessor) +from imagebind.models.transformer import MultiheadAttention, SimpleTransformer + +ModalityType = SimpleNamespace( + VISION="vision", + TEXT="text", + AUDIO="audio", + THERMAL="thermal", + DEPTH="depth", + IMU="imu", +) + + +class ImageBindModel(nn.Module): + def __init__( + self, + video_frames=2, + kernel_size=(2, 14, 14), + audio_kernel_size=16, + audio_stride=10, + out_embed_dim=768, + vision_embed_dim=1024, + vision_num_blocks=24, + vision_num_heads=16, + audio_embed_dim=768, + audio_num_blocks=12, + audio_num_heads=12, + audio_num_mel_bins=128, + audio_target_len=204, + audio_drop_path=0.1, + text_embed_dim=768, + text_num_blocks=12, + text_num_heads=12, + depth_embed_dim=384, + depth_kernel_size=16, + depth_num_blocks=12, + depth_num_heads=8, + depth_drop_path=0.0, + thermal_embed_dim=768, + thermal_kernel_size=16, + thermal_num_blocks=12, + thermal_num_heads=12, + thermal_drop_path=0.0, + imu_embed_dim=512, + imu_kernel_size=8, + imu_num_blocks=6, + imu_num_heads=8, + imu_drop_path=0.7, + ): + super().__init__() + + self.modality_preprocessors = self._create_modality_preprocessors( + video_frames, + vision_embed_dim, + kernel_size, + text_embed_dim, + audio_embed_dim, + audio_kernel_size, + audio_stride, + audio_num_mel_bins, + audio_target_len, + depth_embed_dim, + depth_kernel_size, + thermal_embed_dim, + thermal_kernel_size, + imu_embed_dim, + ) + + self.modality_trunks = self._create_modality_trunks( + vision_embed_dim, + vision_num_blocks, + vision_num_heads, + text_embed_dim, + text_num_blocks, + text_num_heads, + audio_embed_dim, + audio_num_blocks, + audio_num_heads, + audio_drop_path, + depth_embed_dim, + depth_num_blocks, + depth_num_heads, + depth_drop_path, + thermal_embed_dim, + thermal_num_blocks, + thermal_num_heads, + thermal_drop_path, + imu_embed_dim, + imu_num_blocks, + imu_num_heads, + imu_drop_path, + ) + + self.modality_heads = self._create_modality_heads( + out_embed_dim, + vision_embed_dim, + text_embed_dim, + audio_embed_dim, + depth_embed_dim, + thermal_embed_dim, + imu_embed_dim, + ) + + self.modality_postprocessors = self._create_modality_postprocessors( + out_embed_dim + ) + + def _create_modality_preprocessors( + self, + video_frames=2, + vision_embed_dim=1024, + kernel_size=(2, 14, 14), + text_embed_dim=768, + audio_embed_dim=768, + audio_kernel_size=16, + audio_stride=10, + audio_num_mel_bins=128, + audio_target_len=204, + depth_embed_dim=768, + depth_kernel_size=16, + thermal_embed_dim=768, + thermal_kernel_size=16, + imu_embed_dim=512, + ): + rgbt_stem = PatchEmbedGeneric( + proj_stem=[ + PadIm2Video(pad_type="repeat", ntimes=2), + nn.Conv3d( + in_channels=3, + kernel_size=kernel_size, + out_channels=vision_embed_dim, + stride=kernel_size, + bias=False, + ), + ] + ) + rgbt_preprocessor = RGBDTPreprocessor( + img_size=[3, video_frames, 224, 224], + num_cls_tokens=1, + pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True), + rgbt_stem=rgbt_stem, + depth_stem=None, + ) + + text_preprocessor = TextPreprocessor( + context_length=77, + vocab_size=49408, + embed_dim=text_embed_dim, + causal_masking=True, + ) + + audio_stem = PatchEmbedGeneric( + proj_stem=[ + nn.Conv2d( + in_channels=1, + kernel_size=audio_kernel_size, + stride=audio_stride, + out_channels=audio_embed_dim, + bias=False, + ), + ], + norm_layer=nn.LayerNorm(normalized_shape=audio_embed_dim), + ) + audio_preprocessor = AudioPreprocessor( + img_size=[1, audio_num_mel_bins, audio_target_len], + num_cls_tokens=1, + pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True), + audio_stem=audio_stem, + ) + + depth_stem = PatchEmbedGeneric( + [ + nn.Conv2d( + kernel_size=depth_kernel_size, + in_channels=1, + out_channels=depth_embed_dim, + stride=depth_kernel_size, + bias=False, + ), + ], + norm_layer=nn.LayerNorm(normalized_shape=depth_embed_dim), + ) + + depth_preprocessor = RGBDTPreprocessor( + img_size=[1, 224, 224], + num_cls_tokens=1, + pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True), + rgbt_stem=None, + depth_stem=depth_stem, + ) + + thermal_stem = PatchEmbedGeneric( + [ + nn.Conv2d( + kernel_size=thermal_kernel_size, + in_channels=1, + out_channels=thermal_embed_dim, + stride=thermal_kernel_size, + bias=False, + ), + ], + norm_layer=nn.LayerNorm(normalized_shape=thermal_embed_dim), + ) + thermal_preprocessor = ThermalPreprocessor( + img_size=[1, 224, 224], + num_cls_tokens=1, + pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True), + thermal_stem=thermal_stem, + ) + + imu_stem = PatchEmbedGeneric( + [ + nn.Linear( + in_features=48, + out_features=imu_embed_dim, + bias=False, + ), + ], + norm_layer=nn.LayerNorm(normalized_shape=imu_embed_dim), + ) + + imu_preprocessor = IMUPreprocessor( + img_size=[6, 2000], + num_cls_tokens=1, + kernel_size=8, + embed_dim=imu_embed_dim, + pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True), + imu_stem=imu_stem, + ) + + modality_preprocessors = { + ModalityType.VISION: rgbt_preprocessor, + ModalityType.TEXT: text_preprocessor, + ModalityType.AUDIO: audio_preprocessor, + ModalityType.DEPTH: depth_preprocessor, + ModalityType.THERMAL: thermal_preprocessor, + ModalityType.IMU: imu_preprocessor, + } + + return nn.ModuleDict(modality_preprocessors) + + def _create_modality_trunks( + self, + vision_embed_dim=1024, + vision_num_blocks=24, + vision_num_heads=16, + text_embed_dim=768, + text_num_blocks=12, + text_num_heads=12, + audio_embed_dim=768, + audio_num_blocks=12, + audio_num_heads=12, + audio_drop_path=0.0, + depth_embed_dim=768, + depth_num_blocks=12, + depth_num_heads=12, + depth_drop_path=0.0, + thermal_embed_dim=768, + thermal_num_blocks=12, + thermal_num_heads=12, + thermal_drop_path=0.0, + imu_embed_dim=512, + imu_num_blocks=6, + imu_num_heads=8, + imu_drop_path=0.7, + ): + def instantiate_trunk( + embed_dim, num_blocks, num_heads, pre_transformer_ln, add_bias_kv, drop_path + ): + return SimpleTransformer( + embed_dim=embed_dim, + num_blocks=num_blocks, + ffn_dropout_rate=0.0, + drop_path_rate=drop_path, + attn_target=partial( + MultiheadAttention, + embed_dim=embed_dim, + num_heads=num_heads, + bias=True, + add_bias_kv=add_bias_kv, + ), + pre_transformer_layer=nn.Sequential( + nn.LayerNorm(embed_dim, eps=1e-6) + if pre_transformer_ln + else nn.Identity(), + EinOpsRearrange("b l d -> l b d"), + ), + post_transformer_layer=EinOpsRearrange("l b d -> b l d"), + ) + + modality_trunks = {} + modality_trunks[ModalityType.VISION] = instantiate_trunk( + vision_embed_dim, + vision_num_blocks, + vision_num_heads, + pre_transformer_ln=True, + add_bias_kv=False, + drop_path=0.0, + ) + modality_trunks[ModalityType.TEXT] = instantiate_trunk( + text_embed_dim, + text_num_blocks, + text_num_heads, + pre_transformer_ln=False, + add_bias_kv=False, + drop_path=0.0, + ) + modality_trunks[ModalityType.AUDIO] = instantiate_trunk( + audio_embed_dim, + audio_num_blocks, + audio_num_heads, + pre_transformer_ln=False, + add_bias_kv=True, + drop_path=audio_drop_path, + ) + modality_trunks[ModalityType.DEPTH] = instantiate_trunk( + depth_embed_dim, + depth_num_blocks, + depth_num_heads, + pre_transformer_ln=False, + add_bias_kv=True, + drop_path=depth_drop_path, + ) + modality_trunks[ModalityType.THERMAL] = instantiate_trunk( + thermal_embed_dim, + thermal_num_blocks, + thermal_num_heads, + pre_transformer_ln=False, + add_bias_kv=True, + drop_path=thermal_drop_path, + ) + modality_trunks[ModalityType.IMU] = instantiate_trunk( + imu_embed_dim, + imu_num_blocks, + imu_num_heads, + pre_transformer_ln=False, + add_bias_kv=True, + drop_path=imu_drop_path, + ) + + return nn.ModuleDict(modality_trunks) + + def _create_modality_heads( + self, + out_embed_dim, + vision_embed_dim, + text_embed_dim, + audio_embed_dim, + depth_embed_dim, + thermal_embed_dim, + imu_embed_dim, + ): + modality_heads = {} + + modality_heads[ModalityType.VISION] = nn.Sequential( + nn.LayerNorm(normalized_shape=vision_embed_dim, eps=1e-6), + SelectElement(index=0), + nn.Linear(vision_embed_dim, out_embed_dim, bias=False), + ) + + modality_heads[ModalityType.TEXT] = SelectEOSAndProject( + proj=nn.Sequential( + nn.LayerNorm(normalized_shape=text_embed_dim, eps=1e-6), + nn.Linear(text_embed_dim, out_embed_dim, bias=False), + ) + ) + + modality_heads[ModalityType.AUDIO] = nn.Sequential( + nn.LayerNorm(normalized_shape=audio_embed_dim, eps=1e-6), + SelectElement(index=0), + nn.Linear(audio_embed_dim, out_embed_dim, bias=False), + ) + + modality_heads[ModalityType.DEPTH] = nn.Sequential( + nn.LayerNorm(normalized_shape=depth_embed_dim, eps=1e-6), + SelectElement(index=0), + nn.Linear(depth_embed_dim, out_embed_dim, bias=False), + ) + + modality_heads[ModalityType.THERMAL] = nn.Sequential( + nn.LayerNorm(normalized_shape=thermal_embed_dim, eps=1e-6), + SelectElement(index=0), + nn.Linear(thermal_embed_dim, out_embed_dim, bias=False), + ) + + modality_heads[ModalityType.IMU] = nn.Sequential( + nn.LayerNorm(normalized_shape=imu_embed_dim, eps=1e-6), + SelectElement(index=0), + nn.Dropout(p=0.5), + nn.Linear(imu_embed_dim, out_embed_dim, bias=False), + ) + + return nn.ModuleDict(modality_heads) + + def _create_modality_postprocessors(self, out_embed_dim): + modality_postprocessors = {} + + modality_postprocessors[ModalityType.VISION] = Normalize(dim=-1) + modality_postprocessors[ModalityType.TEXT] = nn.Sequential( + Normalize(dim=-1), LearnableLogitScaling(learnable=True) + ) + modality_postprocessors[ModalityType.AUDIO] = nn.Sequential( + Normalize(dim=-1), + LearnableLogitScaling(logit_scale_init=20.0, learnable=False), + ) + modality_postprocessors[ModalityType.DEPTH] = nn.Sequential( + Normalize(dim=-1), + LearnableLogitScaling(logit_scale_init=5.0, learnable=False), + ) + modality_postprocessors[ModalityType.THERMAL] = nn.Sequential( + Normalize(dim=-1), + LearnableLogitScaling(logit_scale_init=10.0, learnable=False), + ) + modality_postprocessors[ModalityType.IMU] = nn.Sequential( + Normalize(dim=-1), + LearnableLogitScaling(logit_scale_init=5.0, learnable=False), + ) + + return nn.ModuleDict(modality_postprocessors) + + def forward(self, inputs): + outputs = {} + for modality_key, modality_value in inputs.items(): + reduce_list = ( + modality_value.ndim >= 5 + ) # Audio and Video inputs consist of multiple clips + if reduce_list: + B, S = modality_value.shape[:2] + modality_value = modality_value.reshape( + B * S, *modality_value.shape[2:] + ) + + if modality_value is not None: + modality_value = self.modality_preprocessors[modality_key]( + **{modality_key: modality_value} + ) + trunk_inputs = modality_value["trunk"] + head_inputs = modality_value["head"] + modality_value = self.modality_trunks[modality_key](**trunk_inputs) + modality_value = self.modality_heads[modality_key]( + modality_value, **head_inputs + ) + modality_value = self.modality_postprocessors[modality_key]( + modality_value + ) + + if reduce_list: + modality_value = modality_value.reshape(B, S, -1) + modality_value = modality_value.mean(dim=1) + + outputs[modality_key] = modality_value + + return outputs + + +def imagebind_huge(pretrained=False): + model = ImageBindModel( + vision_embed_dim=1280, + vision_num_blocks=32, + vision_num_heads=16, + text_embed_dim=1024, + text_num_blocks=24, + text_num_heads=16, + out_embed_dim=1024, + audio_drop_path=0.1, + imu_drop_path=0.7, + ) + + if pretrained: + if not os.path.exists(".checkpoints/imagebind_huge.pth"): + print( + "Downloading imagebind weights to .checkpoints/imagebind_huge.pth ..." + ) + os.makedirs(".checkpoints", exist_ok=True) + torch.hub.download_url_to_file( + "https://dl.fbaipublicfiles.com/imagebind/imagebind_huge.pth", + ".checkpoints/imagebind_huge.pth", + progress=True, + ) + + model.load_state_dict(torch.load(".checkpoints/imagebind_huge.pth")) + + return model diff --git a/ImageBind/build/lib/imagebind/models/multimodal_preprocessors.py b/ImageBind/build/lib/imagebind/models/multimodal_preprocessors.py new file mode 100644 index 0000000..3384b87 --- /dev/null +++ b/ImageBind/build/lib/imagebind/models/multimodal_preprocessors.py @@ -0,0 +1,685 @@ +#!/usr/bin/env python3 +# Portions Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import gzip +import html +import io +import math +from functools import lru_cache +from typing import Callable, List, Optional, Tuple + +import ftfy +import numpy as np +import regex as re +import torch +import torch.nn as nn +from iopath.common.file_io import g_pathmgr +from timm.models.layers import trunc_normal_ + +from imagebind.models.helpers import VerboseNNModule, cast_if_src_dtype + + +def get_sinusoid_encoding_table(n_position, d_hid): + """Sinusoid position encoding table""" + + # TODO: make it with torch instead of numpy + def get_position_angle_vec(position): + return [ + position / np.power(10000, 2 * (hid_j // 2) / d_hid) + for hid_j in range(d_hid) + ] + + sinusoid_table = np.array( + [get_position_angle_vec(pos_i) for pos_i in range(n_position)] + ) + sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i + sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 + + return torch.FloatTensor(sinusoid_table).unsqueeze(0) + + +def interpolate_pos_encoding_2d(target_spatial_size, pos_embed): + N = pos_embed.shape[1] + if N == target_spatial_size: + return pos_embed + dim = pos_embed.shape[-1] + # nn.functional.interpolate doesn't work with bfloat16 so we cast to float32 + pos_embed, updated = cast_if_src_dtype(pos_embed, torch.bfloat16, torch.float32) + pos_embed = nn.functional.interpolate( + pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute( + 0, 3, 1, 2 + ), + scale_factor=math.sqrt(target_spatial_size / N), + mode="bicubic", + ) + if updated: + pos_embed, _ = cast_if_src_dtype(pos_embed, torch.float32, torch.bfloat16) + pos_embed = pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) + return pos_embed + + +def interpolate_pos_encoding( + npatch_per_img, + pos_embed, + patches_layout, + input_shape=None, + first_patch_idx=1, +): + assert first_patch_idx == 0 or first_patch_idx == 1, "there is 1 CLS token or none" + N = pos_embed.shape[1] - first_patch_idx # since it's 1 if cls_token exists + if npatch_per_img == N: + return pos_embed + + assert ( + patches_layout[-1] == patches_layout[-2] + ), "Interpolation of pos embed not supported for non-square layouts" + + class_emb = pos_embed[:, :first_patch_idx] + pos_embed = pos_embed[:, first_patch_idx:] + + if input_shape is None or patches_layout[0] == 1: + # simple 2D pos embedding, no temporal component + pos_embed = interpolate_pos_encoding_2d(npatch_per_img, pos_embed) + elif patches_layout[0] > 1: + # pos embed has a temporal component + assert len(input_shape) == 4, "temporal interpolation not supported" + # we only support 2D interpolation in this case + num_frames = patches_layout[0] + num_spatial_tokens = patches_layout[1] * patches_layout[2] + pos_embed = pos_embed.view(1, num_frames, num_spatial_tokens, -1) + # interpolate embedding for zeroth frame + pos_embed = interpolate_pos_encoding_2d( + npatch_per_img, pos_embed[0, 0, ...].unsqueeze(0) + ) + else: + raise ValueError("This type of interpolation isn't implemented") + + return torch.cat((class_emb, pos_embed), dim=1) + + +def _get_pos_embedding( + npatch_per_img, + pos_embed, + patches_layout, + input_shape, + first_patch_idx=1, +): + pos_embed = interpolate_pos_encoding( + npatch_per_img, + pos_embed, + patches_layout, + input_shape=input_shape, + first_patch_idx=first_patch_idx, + ) + return pos_embed + + +class PatchEmbedGeneric(nn.Module): + """ + PatchEmbed from Hydra + """ + + def __init__(self, proj_stem, norm_layer: Optional[nn.Module] = None): + super().__init__() + + if len(proj_stem) > 1: + self.proj = nn.Sequential(*proj_stem) + else: + # Special case to be able to load pre-trained models that were + # trained with a standard stem + self.proj = proj_stem[0] + self.norm_layer = norm_layer + + def get_patch_layout(self, img_size): + with torch.no_grad(): + dummy_img = torch.zeros( + [ + 1, + ] + + img_size + ) + dummy_out = self.proj(dummy_img) + embed_dim = dummy_out.shape[1] + patches_layout = tuple(dummy_out.shape[2:]) + num_patches = np.prod(patches_layout) + return patches_layout, num_patches, embed_dim + + def forward(self, x): + x = self.proj(x) + # B C (T) H W -> B (T)HW C + x = x.flatten(2).transpose(1, 2) + if self.norm_layer is not None: + x = self.norm_layer(x) + return x + + +class SpatioTemporalPosEmbeddingHelper(VerboseNNModule): + def __init__( + self, + patches_layout: List, + num_patches: int, + num_cls_tokens: int, + embed_dim: int, + learnable: bool, + ) -> None: + super().__init__() + self.num_cls_tokens = num_cls_tokens + self.patches_layout = patches_layout + self.num_patches = num_patches + self.num_tokens = num_cls_tokens + num_patches + self.learnable = learnable + if self.learnable: + self.pos_embed = nn.Parameter(torch.zeros(1, self.num_tokens, embed_dim)) + trunc_normal_(self.pos_embed, std=0.02) + else: + self.register_buffer( + "pos_embed", get_sinusoid_encoding_table(self.num_tokens, embed_dim) + ) + + def get_pos_embedding(self, vision_input, all_vision_tokens): + input_shape = vision_input.shape + pos_embed = _get_pos_embedding( + all_vision_tokens.size(1) - self.num_cls_tokens, + pos_embed=self.pos_embed, + patches_layout=self.patches_layout, + input_shape=input_shape, + first_patch_idx=self.num_cls_tokens, + ) + return pos_embed + + +class RGBDTPreprocessor(VerboseNNModule): + def __init__( + self, + rgbt_stem: PatchEmbedGeneric, + depth_stem: Optional[PatchEmbedGeneric], + img_size: Tuple = (3, 224, 224), + num_cls_tokens: int = 1, + pos_embed_fn: Optional[Callable] = None, + use_type_embed: bool = False, + init_param_style: str = "openclip", + ) -> None: + super().__init__() + stem = rgbt_stem if rgbt_stem is not None else depth_stem + ( + self.patches_layout, + self.num_patches, + self.embed_dim, + ) = stem.get_patch_layout(img_size) + self.rgbt_stem = rgbt_stem + self.depth_stem = depth_stem + self.use_pos_embed = pos_embed_fn is not None + self.use_type_embed = use_type_embed + self.num_cls_tokens = num_cls_tokens + + if self.use_pos_embed: + self.pos_embedding_helper = pos_embed_fn( + patches_layout=self.patches_layout, + num_cls_tokens=num_cls_tokens, + num_patches=self.num_patches, + embed_dim=self.embed_dim, + ) + if self.num_cls_tokens > 0: + self.cls_token = nn.Parameter( + torch.zeros(1, self.num_cls_tokens, self.embed_dim) + ) + if self.use_type_embed: + self.type_embed = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) + + self.init_parameters(init_param_style) + + @torch.no_grad() + def init_parameters(self, init_param_style): + if init_param_style == "openclip": + # OpenCLIP style initialization + scale = self.embed_dim**-0.5 + if self.use_pos_embed: + nn.init.normal_(self.pos_embedding_helper.pos_embed) + self.pos_embedding_helper.pos_embed *= scale + + if self.num_cls_tokens > 0: + nn.init.normal_(self.cls_token) + self.cls_token *= scale + elif init_param_style == "vit": + self.cls_token.data.fill_(0) + else: + raise ValueError(f"Unknown init {init_param_style}") + + if self.use_type_embed: + nn.init.normal_(self.type_embed) + + def tokenize_input_and_cls_pos(self, input, stem, mask): + # tokens is of shape B x L x D + tokens = stem(input) + assert tokens.ndim == 3 + assert tokens.shape[2] == self.embed_dim + B = tokens.shape[0] + if self.num_cls_tokens > 0: + class_tokens = self.cls_token.expand( + B, -1, -1 + ) # stole class_tokens impl from Phil Wang, thanks + tokens = torch.cat((class_tokens, tokens), dim=1) + if self.use_pos_embed: + pos_embed = self.pos_embedding_helper.get_pos_embedding(input, tokens) + tokens = tokens + pos_embed + if self.use_type_embed: + tokens = tokens + self.type_embed.expand(B, -1, -1) + return tokens + + def forward(self, vision=None, depth=None, patch_mask=None): + if patch_mask is not None: + raise NotImplementedError() + + if vision is not None: + vision_tokens = self.tokenize_input_and_cls_pos( + vision, self.rgbt_stem, patch_mask + ) + + if depth is not None: + depth_tokens = self.tokenize_input_and_cls_pos( + depth, self.depth_stem, patch_mask + ) + + # aggregate tokens + if vision is not None and depth is not None: + final_tokens = vision_tokens + depth_tokens + else: + final_tokens = vision_tokens if vision is not None else depth_tokens + return_dict = { + "trunk": { + "tokens": final_tokens, + }, + "head": {}, + } + return return_dict + + +class AudioPreprocessor(RGBDTPreprocessor): + def __init__(self, audio_stem: PatchEmbedGeneric, **kwargs) -> None: + super().__init__(rgbt_stem=audio_stem, depth_stem=None, **kwargs) + + def forward(self, audio=None): + return super().forward(vision=audio) + + +class ThermalPreprocessor(RGBDTPreprocessor): + def __init__(self, thermal_stem: PatchEmbedGeneric, **kwargs) -> None: + super().__init__(rgbt_stem=thermal_stem, depth_stem=None, **kwargs) + + def forward(self, thermal=None): + return super().forward(vision=thermal) + + +def build_causal_attention_mask(context_length): + # lazily create causal attention mask, with full attention between the vision tokens + # pytorch uses additive attention mask; fill with -inf + mask = torch.empty(context_length, context_length, requires_grad=False) + mask.fill_(float("-inf")) + mask.triu_(1) # zero out the lower diagonal + return mask + + +class TextPreprocessor(VerboseNNModule): + def __init__( + self, + vocab_size: int, + context_length: int, + embed_dim: int, + causal_masking: bool, + supply_seq_len_to_head: bool = True, + num_cls_tokens: int = 0, + init_param_style: str = "openclip", + ) -> None: + super().__init__() + self.vocab_size = vocab_size + self.context_length = context_length + self.token_embedding = nn.Embedding(vocab_size, embed_dim) + self.pos_embed = nn.Parameter( + torch.empty(1, self.context_length + num_cls_tokens, embed_dim) + ) + self.causal_masking = causal_masking + if self.causal_masking: + mask = build_causal_attention_mask(self.context_length) + # register the mask as a buffer so it can be moved to the right device + self.register_buffer("mask", mask) + + self.supply_seq_len_to_head = supply_seq_len_to_head + self.num_cls_tokens = num_cls_tokens + self.embed_dim = embed_dim + if num_cls_tokens > 0: + assert self.causal_masking is False, "Masking + CLS token isn't implemented" + self.cls_token = nn.Parameter( + torch.zeros(1, self.num_cls_tokens, embed_dim) + ) + + self.init_parameters(init_param_style) + + @torch.no_grad() + def init_parameters(self, init_param_style="openclip"): + # OpenCLIP style initialization + nn.init.normal_(self.token_embedding.weight, std=0.02) + nn.init.normal_(self.pos_embed, std=0.01) + + if init_param_style == "openclip": + # OpenCLIP style initialization + scale = self.embed_dim**-0.5 + if self.num_cls_tokens > 0: + nn.init.normal_(self.cls_token) + self.cls_token *= scale + elif init_param_style == "vit": + self.cls_token.data.fill_(0) + else: + raise ValueError(f"Unknown init {init_param_style}") + + def forward(self, text): + # text tokens are of shape B x L x D + text_tokens = self.token_embedding(text) + # concat CLS tokens if any + if self.num_cls_tokens > 0: + B = text_tokens.shape[0] + class_tokens = self.cls_token.expand( + B, -1, -1 + ) # stole class_tokens impl from Phil Wang, thanks + text_tokens = torch.cat((class_tokens, text_tokens), dim=1) + text_tokens = text_tokens + self.pos_embed + return_dict = { + "trunk": { + "tokens": text_tokens, + }, + "head": {}, + } + # Compute sequence length after adding CLS tokens + if self.supply_seq_len_to_head: + text_lengths = text.argmax(dim=-1) + return_dict["head"] = { + "seq_len": text_lengths, + } + if self.causal_masking: + return_dict["trunk"].update({"attn_mask": self.mask}) + return return_dict + + +class Im2Video(nn.Module): + """Convert an image into a trivial video.""" + + def __init__(self, time_dim=2): + super().__init__() + self.time_dim = time_dim + + def forward(self, x): + if x.ndim == 4: + # B, C, H, W -> B, C, T, H, W + return x.unsqueeze(self.time_dim) + elif x.ndim == 5: + return x + else: + raise ValueError(f"Dimension incorrect {x.shape}") + + +class PadIm2Video(Im2Video): + def __init__(self, ntimes, pad_type, time_dim=2): + super().__init__(time_dim=time_dim) + assert ntimes > 0 + assert pad_type in ["zero", "repeat"] + self.ntimes = ntimes + self.pad_type = pad_type + + def forward(self, x): + x = super().forward(x) + if x.shape[self.time_dim] == 1: + if self.pad_type == "repeat": + new_shape = [1] * len(x.shape) + new_shape[self.time_dim] = self.ntimes + x = x.repeat(new_shape) + elif self.pad_type == "zero": + padarg = [0, 0] * len(x.shape) + padarg[2 * self.time_dim + 1] = self.ntimes - x.shape[self.time_dim] + x = nn.functional.pad(x, padarg) + return x + + +# Modified from github.com/openai/CLIP +@lru_cache() +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a corresponding list of unicode strings. + The reversible bpe codes work on unicode strings. + This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. + When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. + This is a signficant percentage of your normal, say, 32K bpe vocab. + To avoid that, we want lookup tables between utf-8 bytes and unicode strings. + And avoids mapping to whitespace/control characters the bpe code barfs on. + """ + bs = ( + list(range(ord("!"), ord("~") + 1)) + + list(range(ord("¡"), ord("¬") + 1)) + + list(range(ord("®"), ord("ÿ") + 1)) + ) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8 + n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +def get_pairs(word): + """Return set of symbol pairs in a word. + Word is represented as tuple of symbols (symbols being variable-length strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +def basic_clean(text): + text = ftfy.fix_text(text) + text = html.unescape(html.unescape(text)) + return text.strip() + + +def whitespace_clean(text): + text = re.sub(r"\s+", " ", text) + text = text.strip() + return text + + +class SimpleTokenizer(object): + def __init__(self, bpe_path: str, context_length=77): + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} + + with g_pathmgr.open(bpe_path, "rb") as fh: + bpe_bytes = io.BytesIO(fh.read()) + merges: List[str] = gzip.open(bpe_bytes).read().decode("utf-8").split("\n") + merges = merges[1 : 49152 - 256 - 2 + 1] + merges: List[Tuple[str, ...]] = [tuple(merge.split()) for merge in merges] + vocab = list(bytes_to_unicode().values()) + vocab = vocab + [v + "" for v in vocab] + for merge in merges: + vocab.append("".join(merge)) + vocab.extend(["<|startoftext|>", "<|endoftext|>"]) + self.encoder = dict(zip(vocab, range(len(vocab)))) + self.decoder = {v: k for k, v in self.encoder.items()} + self.bpe_ranks = dict(zip(merges, range(len(merges)))) + self.cache = { + "<|startoftext|>": "<|startoftext|>", + "<|endoftext|>": "<|endoftext|>", + } + self.pat = re.compile( + r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", + re.IGNORECASE, + ) + self.context_length = context_length + + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token[:-1]) + (token[-1] + "",) + pairs = get_pairs(word) + + if not pairs: + return token + "" + + while True: + bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + new_word.extend(word[i:j]) + i = j + except: + new_word.extend(word[i:]) + break + + if word[i] == first and i < len(word) - 1 and word[i + 1] == second: + new_word.append(first + second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = " ".join(word) + self.cache[token] = word + return word + + def encode(self, text): + bpe_tokens = [] + text = whitespace_clean(basic_clean(text)).lower() + for token in re.findall(self.pat, text): + token = "".join(self.byte_encoder[b] for b in token.encode("utf-8")) + bpe_tokens.extend( + self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" ") + ) + return bpe_tokens + + def decode(self, tokens): + text = "".join([self.decoder[token] for token in tokens]) + text = ( + bytearray([self.byte_decoder[c] for c in text]) + .decode("utf-8", errors="replace") + .replace("", " ") + ) + return text + + def __call__(self, texts, context_length=None): + if not context_length: + context_length = self.context_length + + if isinstance(texts, str): + texts = [texts] + + sot_token = self.encoder["<|startoftext|>"] + eot_token = self.encoder["<|endoftext|>"] + all_tokens = [[sot_token] + self.encode(text) + [eot_token] for text in texts] + result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) + + for i, tokens in enumerate(all_tokens): + tokens = tokens[:context_length] + result[i, : len(tokens)] = torch.tensor(tokens) + + if len(result) == 1: + return result[0] + return result + + +class IMUPreprocessor(VerboseNNModule): + def __init__( + self, + kernel_size: int, + imu_stem: PatchEmbedGeneric, + embed_dim: int, + img_size: Tuple = (6, 2000), + num_cls_tokens: int = 1, + pos_embed_fn: Optional[Callable] = None, + init_param_style: str = "openclip", + ) -> None: + super().__init__() + self.imu_stem = imu_stem + self.embed_dim = embed_dim + self.use_pos_embed = pos_embed_fn is not None + self.num_cls_tokens = num_cls_tokens + self.kernel_size = kernel_size + self.pos_embed = nn.Parameter( + torch.empty(1, (img_size[1] // kernel_size) + num_cls_tokens, embed_dim) + ) + + if self.num_cls_tokens > 0: + self.cls_token = nn.Parameter( + torch.zeros(1, self.num_cls_tokens, self.embed_dim) + ) + + self.init_parameters(init_param_style) + + @torch.no_grad() + def init_parameters(self, init_param_style): + nn.init.normal_(self.pos_embed, std=0.01) + + if init_param_style == "openclip": + # OpenCLIP style initialization + scale = self.embed_dim**-0.5 + + if self.num_cls_tokens > 0: + nn.init.normal_(self.cls_token) + self.cls_token *= scale + elif init_param_style == "vit": + self.cls_token.data.fill_(0) + else: + raise ValueError(f"Unknown init {init_param_style}") + + def tokenize_input_and_cls_pos(self, input, stem): + # tokens is of shape B x L x D + tokens = stem.norm_layer(stem.proj(input)) + assert tokens.ndim == 3 + assert tokens.shape[2] == self.embed_dim + B = tokens.shape[0] + if self.num_cls_tokens > 0: + class_tokens = self.cls_token.expand( + B, -1, -1 + ) # stole class_tokens impl from Phil Wang, thanks + tokens = torch.cat((class_tokens, tokens), dim=1) + if self.use_pos_embed: + tokens = tokens + self.pos_embed + return tokens + + def forward(self, imu): + # Patchify + imu = imu.unfold( + -1, + self.kernel_size, + self.kernel_size, + ).permute(0, 2, 1, 3) + imu = imu.reshape(imu.size(0), imu.size(1), -1) + + imu_tokens = self.tokenize_input_and_cls_pos( + imu, + self.imu_stem, + ) + + return_dict = { + "trunk": { + "tokens": imu_tokens, + }, + "head": {}, + } + return return_dict diff --git a/ImageBind/build/lib/imagebind/models/transformer.py b/ImageBind/build/lib/imagebind/models/transformer.py new file mode 100644 index 0000000..6224faf --- /dev/null +++ b/ImageBind/build/lib/imagebind/models/transformer.py @@ -0,0 +1,280 @@ +#!/usr/bin/env python3 +# Portions Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +# Code modified from +# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py ; +# https://github.com/facebookresearch/deit/blob/main/models.py +# and https://github.com/facebookresearch/vissl/blob/main/vissl/models/trunks/vision_transformer.py + + +from functools import partial +from typing import Callable, List, Optional + +import torch +import torch.nn as nn +import torch.utils.checkpoint as checkpoint +from timm.models.layers import DropPath, trunc_normal_ + + +class Attention(nn.Module): + def __init__( + self, + dim, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop=0.0, + proj_drop=0.0, + ): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + # NOTE scale factor was wrong in my original version, + # can set manually to be compat with prev weights + self.scale = qk_scale or head_dim**-0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qkv = ( + self.qkv(x) + .reshape(B, N, 3, self.num_heads, C // self.num_heads) + .permute(2, 0, 3, 1, 4) + ) + q, k, v = ( + qkv[0], + qkv[1], + qkv[2], + ) # make torchscript happy (cannot use tensor as tuple) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Mlp(nn.Module): + def __init__( + self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.0, + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class MultiheadAttention(nn.MultiheadAttention): + def forward(self, x: torch.Tensor, attn_mask: torch.Tensor): + return super().forward(x, x, x, need_weights=False, attn_mask=attn_mask)[0] + + +class ViTAttention(Attention): + def forward(self, x: torch.Tensor, attn_mask: torch.Tensor): + assert attn_mask is None + return super().forward(x) + + +class BlockWithMasking(nn.Module): + def __init__( + self, + dim: int, + attn_target: Callable, + mlp_ratio: int = 4, + act_layer: Callable = nn.GELU, + norm_layer: Callable = nn.LayerNorm, + ffn_dropout_rate: float = 0.0, + drop_path: float = 0.0, + layer_scale_type: Optional[str] = None, + layer_scale_init_value: float = 1e-4, + ): + super().__init__() + + assert not isinstance( + attn_target, nn.Module + ), "attn_target should be a Callable. Otherwise attn_target is shared across blocks!" + self.attn = attn_target() + if drop_path > 0.0: + self.drop_path = DropPath(drop_path) + else: + self.drop_path = nn.Identity() + self.norm_1 = norm_layer(dim) + mlp_hidden_dim = int(mlp_ratio * dim) + self.mlp = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=ffn_dropout_rate, + ) + self.norm_2 = norm_layer(dim) + self.layer_scale_type = layer_scale_type + if self.layer_scale_type is not None: + assert self.layer_scale_type in [ + "per_channel", + "scalar", + ], f"Found Layer scale type {self.layer_scale_type}" + if self.layer_scale_type == "per_channel": + # one gamma value per channel + gamma_shape = [1, 1, dim] + elif self.layer_scale_type == "scalar": + # single gamma value for all channels + gamma_shape = [1, 1, 1] + # two gammas: for each part of the fwd in the encoder + self.layer_scale_gamma1 = nn.Parameter( + torch.ones(size=gamma_shape) * layer_scale_init_value, + requires_grad=True, + ) + self.layer_scale_gamma2 = nn.Parameter( + torch.ones(size=gamma_shape) * layer_scale_init_value, + requires_grad=True, + ) + + def forward(self, x: torch.Tensor, attn_mask: torch.Tensor): + if self.layer_scale_type is None: + x = x + self.drop_path(self.attn(self.norm_1(x), attn_mask)) + x = x + self.drop_path(self.mlp(self.norm_2(x))) + else: + x = ( + x + + self.drop_path(self.attn(self.norm_1(x), attn_mask)) + * self.layer_scale_gamma1 + ) + x = x + self.drop_path(self.mlp(self.norm_2(x))) * self.layer_scale_gamma2 + return x + + +_LAYER_NORM = partial(nn.LayerNorm, eps=1e-6) + + +class SimpleTransformer(nn.Module): + def __init__( + self, + attn_target: Callable, + embed_dim: int, + num_blocks: int, + block: Callable = BlockWithMasking, + pre_transformer_layer: Optional[Callable] = None, + post_transformer_layer: Optional[Callable] = None, + drop_path_rate: float = 0.0, + drop_path_type: str = "progressive", + norm_layer: Callable = _LAYER_NORM, + mlp_ratio: int = 4, + ffn_dropout_rate: float = 0.0, + layer_scale_type: Optional[str] = None, # from cait; possible values are None, "per_channel", "scalar" + layer_scale_init_value: float = 1e-4, # from cait; float + weight_init_style: str = "jax", # possible values jax or pytorch + ): + """ + Simple Transformer with the following features + 1. Supports masked attention + 2. Supports DropPath + 3. Supports LayerScale + 4. Supports Dropout in Attention and FFN + 5. Makes few assumptions about the input except that it is a Tensor + """ + super().__init__() + self.pre_transformer_layer = pre_transformer_layer + if drop_path_type == "progressive": + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, num_blocks)] + elif drop_path_type == "uniform": + dpr = [drop_path_rate for i in range(num_blocks)] + else: + raise ValueError(f"Unknown drop_path_type: {drop_path_type}") + + self.blocks = nn.Sequential( + *[ + block( + dim=embed_dim, + attn_target=attn_target, + mlp_ratio=mlp_ratio, + ffn_dropout_rate=ffn_dropout_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + layer_scale_type=layer_scale_type, + layer_scale_init_value=layer_scale_init_value, + ) + for i in range(num_blocks) + ] + ) + self.post_transformer_layer = post_transformer_layer + self.weight_init_style = weight_init_style + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + if self.weight_init_style == "jax": + # Based on MAE and official Jax ViT implementation + torch.nn.init.xavier_uniform_(m.weight) + elif self.weight_init_style == "pytorch": + # PyTorch ViT uses trunc_normal_ + trunc_normal_(m.weight, std=0.02) + + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, (nn.LayerNorm)): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def forward( + self, + tokens: torch.Tensor, + attn_mask: torch.Tensor = None, + use_checkpoint: bool = False, + checkpoint_every_n: int = 1, + checkpoint_blk_ids: Optional[List[int]] = None, + ): + """ + Inputs + - tokens: data of shape N x L x D (or L x N x D depending on the attention implementation) + - attn: mask of shape L x L + + Output + - x: data of shape N x L x D (or L x N x D depending on the attention implementation) + """ + if self.pre_transformer_layer: + tokens = self.pre_transformer_layer(tokens) + if use_checkpoint and checkpoint_blk_ids is None: + checkpoint_blk_ids = [ + blk_id + for blk_id in range(len(self.blocks)) + if blk_id % checkpoint_every_n == 0 + ] + if checkpoint_blk_ids: + checkpoint_blk_ids = set(checkpoint_blk_ids) + for blk_id, blk in enumerate(self.blocks): + if use_checkpoint and blk_id in checkpoint_blk_ids: + tokens = checkpoint.checkpoint( + blk, tokens, attn_mask, use_reentrant=False + ) + else: + tokens = blk(tokens, attn_mask=attn_mask) + if self.post_transformer_layer: + tokens = self.post_transformer_layer(tokens) + return tokens diff --git a/ImageBind/imagebind.egg-info/PKG-INFO b/ImageBind/imagebind.egg-info/PKG-INFO new file mode 100644 index 0000000..ec03efc --- /dev/null +++ b/ImageBind/imagebind.egg-info/PKG-INFO @@ -0,0 +1,170 @@ +Metadata-Version: 2.2 +Name: imagebind +Version: 0.1.0 +Summary: A brief description of the package +Home-page: https://github.com/facebookresearch/ImageBind +Classifier: Programming Language :: Python :: 3 +Classifier: License :: Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International +Description-Content-Type: text/markdown +License-File: LICENSE +Dynamic: classifier +Dynamic: description +Dynamic: description-content-type +Dynamic: home-page +Dynamic: summary + +# ImageBind: One Embedding Space To Bind Them All + +**[FAIR, Meta AI](https://ai.facebook.com/research/)** + +Rohit Girdhar*, +Alaaeldin El-Nouby*, +Zhuang Liu, +Mannat Singh, +Kalyan Vasudev Alwala, +Armand Joulin, +Ishan Misra* + +To appear at CVPR 2023 (*Highlighted paper*) + +[[`Paper`](https://facebookresearch.github.io/ImageBind/paper)] [[`Blog`](https://ai.facebook.com/blog/imagebind-six-modalities-binding-ai/)] [[`Demo`](https://imagebind.metademolab.com/)] [[`Supplementary Video`](https://dl.fbaipublicfiles.com/imagebind/imagebind_video.mp4)] [[`BibTex`](#citing-imagebind)] + +PyTorch implementation and pretrained models for ImageBind. For details, see the paper: **[ImageBind: One Embedding Space To Bind Them All](https://facebookresearch.github.io/ImageBind/paper)**. + +ImageBind learns a joint embedding across six different modalities - images, text, audio, depth, thermal, and IMU data. It enables novel emergent applications ‘out-of-the-box’ including cross-modal retrieval, composing modalities with arithmetic, cross-modal detection and generation. + + + +![ImageBind](https://user-images.githubusercontent.com/8495451/236859695-ffa13364-3e39-4d99-a8da-fbfab17f9a6b.gif) + +## ImageBind model + +Emergent zero-shot classification performance. + + + + + + + + + + + + + + + + + + + + + + + +
ModelIN1kK400NYU-DESCLLVIPEgo4Ddownload
imagebind_huge77.750.054.066.963.425.0checkpoint
+ +## Usage + +Install pytorch 1.13+ and other 3rd party dependencies. + +```shell +conda create --name imagebind python=3.10 -y +conda activate imagebind + +pip install . +``` + +For windows users, you might need to install `soundfile` for reading/writing audio files. (Thanks @congyue1977) + +``` +pip install soundfile +``` + + +Extract and compare features across modalities (e.g. Image, Text and Audio). + +```python +from imagebind import data +import torch +from imagebind.models import imagebind_model +from imagebind.models.imagebind_model import ModalityType + +text_list=["A dog.", "A car", "A bird"] +image_paths=[".assets/dog_image.jpg", ".assets/car_image.jpg", ".assets/bird_image.jpg"] +audio_paths=[".assets/dog_audio.wav", ".assets/car_audio.wav", ".assets/bird_audio.wav"] + +device = "cuda:0" if torch.cuda.is_available() else "cpu" + +# Instantiate model +model = imagebind_model.imagebind_huge(pretrained=True) +model.eval() +model.to(device) + +# Load data +inputs = { + ModalityType.TEXT: data.load_and_transform_text(text_list, device), + ModalityType.VISION: data.load_and_transform_vision_data(image_paths, device), + ModalityType.AUDIO: data.load_and_transform_audio_data(audio_paths, device), +} + +with torch.no_grad(): + embeddings = model(inputs) + +print( + "Vision x Text: ", + torch.softmax(embeddings[ModalityType.VISION] @ embeddings[ModalityType.TEXT].T, dim=-1), +) +print( + "Audio x Text: ", + torch.softmax(embeddings[ModalityType.AUDIO] @ embeddings[ModalityType.TEXT].T, dim=-1), +) +print( + "Vision x Audio: ", + torch.softmax(embeddings[ModalityType.VISION] @ embeddings[ModalityType.AUDIO].T, dim=-1), +) + +# Expected output: +# +# Vision x Text: +# tensor([[9.9761e-01, 2.3694e-03, 1.8612e-05], +# [3.3836e-05, 9.9994e-01, 2.4118e-05], +# [4.7997e-05, 1.3496e-02, 9.8646e-01]]) +# +# Audio x Text: +# tensor([[1., 0., 0.], +# [0., 1., 0.], +# [0., 0., 1.]]) +# +# Vision x Audio: +# tensor([[0.8070, 0.1088, 0.0842], +# [0.1036, 0.7884, 0.1079], +# [0.0018, 0.0022, 0.9960]]) + +``` + +## Model card +Please see the [model card](model_card.md) for details. + +## License + +ImageBind code and model weights are released under the CC-BY-NC 4.0 license. See [LICENSE](LICENSE) for additional details. + +## Contributing + +See [contributing](CONTRIBUTING.md) and the [code of conduct](CODE_OF_CONDUCT.md). + +## Citing ImageBind + +If you find this repository useful, please consider giving a star :star: and citation + +``` +@inproceedings{girdhar2023imagebind, + title={ImageBind: One Embedding Space To Bind Them All}, + author={Girdhar, Rohit and El-Nouby, Alaaeldin and Liu, Zhuang +and Singh, Mannat and Alwala, Kalyan Vasudev and Joulin, Armand and Misra, Ishan}, + booktitle={CVPR}, + year={2023} +} +``` diff --git a/ImageBind/imagebind.egg-info/SOURCES.txt b/ImageBind/imagebind.egg-info/SOURCES.txt new file mode 100755 index 0000000..d1ba5aa --- /dev/null +++ b/ImageBind/imagebind.egg-info/SOURCES.txt @@ -0,0 +1,15 @@ +LICENSE +README.md +setup.py +imagebind/__init__.py +imagebind/data.py +imagebind.egg-info/PKG-INFO +imagebind.egg-info/SOURCES.txt +imagebind.egg-info/dependency_links.txt +imagebind.egg-info/top_level.txt +imagebind/bpe/bpe_simple_vocab_16e6.txt.gz +imagebind/models/__init__.py +imagebind/models/helpers.py +imagebind/models/imagebind_model.py +imagebind/models/multimodal_preprocessors.py +imagebind/models/transformer.py \ No newline at end of file diff --git a/ImageBind/imagebind.egg-info/dependency_links.txt b/ImageBind/imagebind.egg-info/dependency_links.txt new file mode 100755 index 0000000..7613eb6 --- /dev/null +++ b/ImageBind/imagebind.egg-info/dependency_links.txt @@ -0,0 +1 @@ +https://download.pytorch.org/whl/cu113 diff --git a/ImageBind/imagebind.egg-info/top_level.txt b/ImageBind/imagebind.egg-info/top_level.txt new file mode 100755 index 0000000..e769602 --- /dev/null +++ b/ImageBind/imagebind.egg-info/top_level.txt @@ -0,0 +1 @@ +imagebind diff --git a/ImageBind/imagebind/__init__.py b/ImageBind/imagebind/__init__.py new file mode 100755 index 0000000..559fc8d --- /dev/null +++ b/ImageBind/imagebind/__init__.py @@ -0,0 +1,3 @@ +from imagebind import data +from imagebind.models import imagebind_model +from imagebind.models.imagebind_model import ModalityType \ No newline at end of file diff --git a/ImageBind/imagebind/bpe/bpe_simple_vocab_16e6.txt.gz b/ImageBind/imagebind/bpe/bpe_simple_vocab_16e6.txt.gz new file mode 100755 index 0000000..7b5088a Binary files /dev/null and b/ImageBind/imagebind/bpe/bpe_simple_vocab_16e6.txt.gz differ diff --git a/ImageBind/imagebind/data.py b/ImageBind/imagebind/data.py new file mode 100755 index 0000000..6b774d6 --- /dev/null +++ b/ImageBind/imagebind/data.py @@ -0,0 +1,343 @@ +#!/usr/bin/env python3 +# Portions Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import logging +import math +import pkg_resources + +import torch +import torch.nn as nn +import torchaudio +from PIL import Image +from pytorchvideo import transforms as pv_transforms +from pytorchvideo.data.clip_sampling import ConstantClipsPerVideoSampler +from pytorchvideo.data.encoded_video import EncodedVideo +from torchvision import transforms +from torchvision.transforms._transforms_video import NormalizeVideo + +from imagebind.models.multimodal_preprocessors import SimpleTokenizer + +DEFAULT_AUDIO_FRAME_SHIFT_MS = 10 # in milliseconds + + +def return_bpe_path(): + return pkg_resources.resource_filename( + "imagebind", "bpe/bpe_simple_vocab_16e6.txt.gz" + ) + + +def waveform2melspec(waveform, sample_rate, num_mel_bins, target_length): + # Based on https://github.com/YuanGongND/ast/blob/d7d8b4b8e06cdaeb6c843cdb38794c1c7692234c/src/dataloader.py#L102 + waveform -= waveform.mean() + fbank = torchaudio.compliance.kaldi.fbank( + waveform, + htk_compat=True, + sample_frequency=sample_rate, + use_energy=False, + window_type="hanning", + num_mel_bins=num_mel_bins, + dither=0.0, + frame_length=25, + frame_shift=DEFAULT_AUDIO_FRAME_SHIFT_MS, + ) + # Convert to [mel_bins, num_frames] shape + fbank = fbank.transpose(0, 1) + # Pad to target_length + n_frames = fbank.size(1) + p = target_length - n_frames + # if p is too large (say >20%), flash a warning + if abs(p) / n_frames > 0.2: + logging.warning( + "Large gap between audio n_frames(%d) and " + "target_length (%d). Is the audio_target_length " + "setting correct?", + n_frames, + target_length, + ) + # cut and pad + if p > 0: + fbank = torch.nn.functional.pad(fbank, (0, p), mode="constant", value=0) + elif p < 0: + fbank = fbank[:, 0:target_length] + # Convert to [1, mel_bins, num_frames] shape, essentially like a 1 + # channel image + fbank = fbank.unsqueeze(0) + return fbank + + +def get_clip_timepoints(clip_sampler, duration): + # Read out all clips in this video + all_clips_timepoints = [] + is_last_clip = False + end = 0.0 + while not is_last_clip: + start, end, _, _, is_last_clip = clip_sampler(end, duration, annotation=None) + all_clips_timepoints.append((start, end)) + return all_clips_timepoints + + +def load_and_transform_vision_data(image_paths, device): + if image_paths is None: + return None + + image_outputs = [] + + data_transform = transforms.Compose( + [ + transforms.Resize(224, interpolation=transforms.InterpolationMode.BICUBIC), + transforms.CenterCrop(224), + transforms.ToTensor(), + transforms.Normalize( + mean=(0.48145466, 0.4578275, 0.40821073), + std=(0.26862954, 0.26130258, 0.27577711), + ), + ] + ) + + for image_path in image_paths: + with open(image_path, "rb") as fopen: + image = Image.open(fopen).convert("RGB") + + image = data_transform(image).to(device) + image_outputs.append(image) + return torch.stack(image_outputs, dim=0) + + +def load_and_transform_text(text, device): + if text is None: + return None + tokenizer = SimpleTokenizer(bpe_path=return_bpe_path()) + tokens = [tokenizer(t).unsqueeze(0).to(device) for t in text] + tokens = torch.cat(tokens, dim=0) + return tokens + + +def load_and_transform_audio_data( + audio_paths, + device, + num_mel_bins=128, + target_length=204, + sample_rate=16000, + clip_duration=2, + clips_per_video=3, + mean=-4.268, + std=9.138, +): + if audio_paths is None: + return None + + audio_outputs = [] + clip_sampler = ConstantClipsPerVideoSampler( + clip_duration=clip_duration, clips_per_video=clips_per_video + ) + + for audio_path in audio_paths: + waveform, sr = torchaudio.load(audio_path) + if sample_rate != sr: + waveform = torchaudio.functional.resample( + waveform, orig_freq=sr, new_freq=sample_rate + ) + all_clips_timepoints = get_clip_timepoints( + clip_sampler, waveform.size(1) / sample_rate + ) + all_clips = [] + for clip_timepoints in all_clips_timepoints: + waveform_clip = waveform[ + :, + int(clip_timepoints[0] * sample_rate) : int( + clip_timepoints[1] * sample_rate + ), + ] + waveform_melspec = waveform2melspec( + waveform_clip, sample_rate, num_mel_bins, target_length + ) + all_clips.append(waveform_melspec) + + normalize = transforms.Normalize(mean=mean, std=std) + all_clips = [normalize(ac).to(device) for ac in all_clips] + + all_clips = torch.stack(all_clips, dim=0) + audio_outputs.append(all_clips) + + return torch.stack(audio_outputs, dim=0) + + +def crop_boxes(boxes, x_offset, y_offset): + """ + Perform crop on the bounding boxes given the offsets. + Args: + boxes (ndarray or None): bounding boxes to perform crop. The dimension + is `num boxes` x 4. + x_offset (int): cropping offset in the x axis. + y_offset (int): cropping offset in the y axis. + Returns: + cropped_boxes (ndarray or None): the cropped boxes with dimension of + `num boxes` x 4. + """ + cropped_boxes = boxes.copy() + cropped_boxes[:, [0, 2]] = boxes[:, [0, 2]] - x_offset + cropped_boxes[:, [1, 3]] = boxes[:, [1, 3]] - y_offset + + return cropped_boxes + + +def uniform_crop(images, size, spatial_idx, boxes=None, scale_size=None): + """ + Perform uniform spatial sampling on the images and corresponding boxes. + Args: + images (tensor): images to perform uniform crop. The dimension is + `num frames` x `channel` x `height` x `width`. + size (int): size of height and weight to crop the images. + spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width + is larger than height. Or 0, 1, or 2 for top, center, and bottom + crop if height is larger than width. + boxes (ndarray or None): optional. Corresponding boxes to images. + Dimension is `num boxes` x 4. + scale_size (int): optinal. If not None, resize the images to scale_size before + performing any crop. + Returns: + cropped (tensor): images with dimension of + `num frames` x `channel` x `size` x `size`. + cropped_boxes (ndarray or None): the cropped boxes with dimension of + `num boxes` x 4. + """ + assert spatial_idx in [0, 1, 2] + ndim = len(images.shape) + if ndim == 3: + images = images.unsqueeze(0) + height = images.shape[2] + width = images.shape[3] + + if scale_size is not None: + if width <= height: + width, height = scale_size, int(height / width * scale_size) + else: + width, height = int(width / height * scale_size), scale_size + images = torch.nn.functional.interpolate( + images, + size=(height, width), + mode="bilinear", + align_corners=False, + ) + + y_offset = int(math.ceil((height - size) / 2)) + x_offset = int(math.ceil((width - size) / 2)) + + if height > width: + if spatial_idx == 0: + y_offset = 0 + elif spatial_idx == 2: + y_offset = height - size + else: + if spatial_idx == 0: + x_offset = 0 + elif spatial_idx == 2: + x_offset = width - size + cropped = images[:, :, y_offset : y_offset + size, x_offset : x_offset + size] + cropped_boxes = crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None + if ndim == 3: + cropped = cropped.squeeze(0) + return cropped, cropped_boxes + + +class SpatialCrop(nn.Module): + """ + Convert the video into 3 smaller clips spatially. Must be used after the + temporal crops to get spatial crops, and should be used with + -2 in the spatial crop at the slowfast augmentation stage (so full + frames are passed in here). Will return a larger list with the + 3x spatial crops as well. + """ + + def __init__(self, crop_size: int = 224, num_crops: int = 3): + super().__init__() + self.crop_size = crop_size + if num_crops == 3: + self.crops_to_ext = [0, 1, 2] + self.flipped_crops_to_ext = [] + elif num_crops == 1: + self.crops_to_ext = [1] + self.flipped_crops_to_ext = [] + else: + raise NotImplementedError("Nothing else supported yet") + + def forward(self, videos): + """ + Args: + videos: A list of C, T, H, W videos. + Returns: + videos: A list with 3x the number of elements. Each video converted + to C, T, H', W' by spatial cropping. + """ + assert isinstance(videos, list), "Must be a list of videos after temporal crops" + assert all([video.ndim == 4 for video in videos]), "Must be (C,T,H,W)" + res = [] + for video in videos: + for spatial_idx in self.crops_to_ext: + res.append(uniform_crop(video, self.crop_size, spatial_idx)[0]) + if not self.flipped_crops_to_ext: + continue + flipped_video = transforms.functional.hflip(video) + for spatial_idx in self.flipped_crops_to_ext: + res.append(uniform_crop(flipped_video, self.crop_size, spatial_idx)[0]) + return res + + +def load_and_transform_video_data( + video_paths, + device, + clip_duration=2, + clips_per_video=5, + sample_rate=16000, +): + if video_paths is None: + return None + + video_outputs = [] + video_transform = transforms.Compose( + [ + pv_transforms.ShortSideScale(224), + NormalizeVideo( + mean=(0.48145466, 0.4578275, 0.40821073), + std=(0.26862954, 0.26130258, 0.27577711), + ), + ] + ) + + clip_sampler = ConstantClipsPerVideoSampler( + clip_duration=clip_duration, clips_per_video=clips_per_video + ) + frame_sampler = pv_transforms.UniformTemporalSubsample(num_samples=clip_duration) + + for video_path in video_paths: + video = EncodedVideo.from_path( + video_path, + decoder="decord", + decode_audio=False, + **{"sample_rate": sample_rate}, + ) + + all_clips_timepoints = get_clip_timepoints(clip_sampler, video.duration) + + all_video = [] + for clip_timepoints in all_clips_timepoints: + # Read the clip, get frames + clip = video.get_clip(clip_timepoints[0], clip_timepoints[1]) + if clip is None: + raise ValueError("No clip found") + video_clip = frame_sampler(clip["video"]) + video_clip = video_clip / 255.0 # since this is float, need 0-1 + + all_video.append(video_clip) + + all_video = [video_transform(clip) for clip in all_video] + all_video = SpatialCrop(224, num_crops=3)(all_video) + + all_video = torch.stack(all_video, dim=0) + video_outputs.append(all_video) + + return torch.stack(video_outputs, dim=0).to(device) diff --git a/ImageBind/imagebind/models/__init__.py b/ImageBind/imagebind/models/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/ImageBind/imagebind/models/helpers.py b/ImageBind/imagebind/models/helpers.py new file mode 100755 index 0000000..71abe9b --- /dev/null +++ b/ImageBind/imagebind/models/helpers.py @@ -0,0 +1,140 @@ +#!/usr/bin/env python3 +# Portions Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + + +import einops +import numpy as np +import torch +import torch.nn as nn + + +class Normalize(nn.Module): + def __init__(self, dim: int) -> None: + super().__init__() + self.dim = dim + + def forward(self, x): + return torch.nn.functional.normalize(x, dim=self.dim, p=2) + + +class LearnableLogitScaling(nn.Module): + def __init__( + self, + logit_scale_init: float = 1 / 0.07, + learnable: bool = True, + max_logit_scale: float = 100, + ) -> None: + super().__init__() + self.max_logit_scale = max_logit_scale + self.logit_scale_init = logit_scale_init + self.learnable = learnable + log_logit_scale = torch.ones([]) * np.log(self.logit_scale_init) + if learnable: + self.log_logit_scale = nn.Parameter(log_logit_scale) + else: + self.register_buffer("log_logit_scale", log_logit_scale) + + def forward(self, x): + return torch.clip(self.log_logit_scale.exp(), max=self.max_logit_scale) * x + + def extra_repr(self): + st = f"logit_scale_init={self.logit_scale_init},learnable={self.learnable}," \ + f" max_logit_scale={self.max_logit_scale}" + return st + + +class EinOpsRearrange(nn.Module): + def __init__(self, rearrange_expr: str, **kwargs) -> None: + super().__init__() + self.rearrange_expr = rearrange_expr + self.kwargs = kwargs + + def forward(self, x): + assert isinstance(x, torch.Tensor) + return einops.rearrange(x, self.rearrange_expr, **self.kwargs) + + +class VerboseNNModule(nn.Module): + """ + Wrapper around nn.Module that prints registered buffers and parameter names. + """ + + @staticmethod + def get_readable_tensor_repr(name: str, tensor: torch.Tensor) -> str: + st = ( + "(" + + name + + "): " + + "tensor(" + + str(tuple(tensor[1].shape)) + + ", requires_grad=" + + str(tensor[1].requires_grad) + + ")\n" + ) + return st + + def extra_repr(self) -> str: + named_modules = set() + for p in self.named_modules(): + named_modules.update([p[0]]) + named_modules = list(named_modules) + + string_repr = "" + for p in self.named_parameters(): + name = p[0].split(".")[0] + if name not in named_modules: + string_repr += self.get_readable_tensor_repr(name, p) + + for p in self.named_buffers(): + name = p[0].split(".")[0] + string_repr += self.get_readable_tensor_repr(name, p) + + return string_repr + + +def cast_if_src_dtype( + tensor: torch.Tensor, src_dtype: torch.dtype, tgt_dtype: torch.dtype +): + updated = False + if tensor.dtype == src_dtype: + tensor = tensor.to(dtype=tgt_dtype) + updated = True + return tensor, updated + + +class QuickGELU(nn.Module): + # From https://github.com/openai/CLIP/blob/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1/clip/model.py#L166 + def forward(self, x: torch.Tensor): + return x * torch.sigmoid(1.702 * x) + + +class SelectElement(nn.Module): + def __init__(self, index) -> None: + super().__init__() + self.index = index + + def forward(self, x): + assert x.ndim >= 3 + return x[:, self.index, ...] + + +class SelectEOSAndProject(nn.Module): + """ + Text Pooling used in OpenCLIP + """ + + def __init__(self, proj: nn.Module) -> None: + super().__init__() + self.proj = proj + + def forward(self, x, seq_len): + assert x.ndim == 3 + # x is of shape B x L x D + # take features from the eot embedding (eot_token is the highest number in each sequence) + x = x[torch.arange(x.shape[0]), seq_len] + x = self.proj(x) + return x diff --git a/ImageBind/imagebind/models/imagebind_model.py b/ImageBind/imagebind/models/imagebind_model.py new file mode 100755 index 0000000..c560945 --- /dev/null +++ b/ImageBind/imagebind/models/imagebind_model.py @@ -0,0 +1,506 @@ +#!/usr/bin/env python3 +# Portions Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + + +import os +from functools import partial +from types import SimpleNamespace + +import torch +import torch.nn as nn + +from imagebind.models.helpers import (EinOpsRearrange, LearnableLogitScaling, Normalize, + SelectElement, SelectEOSAndProject) +from imagebind.models.multimodal_preprocessors import (AudioPreprocessor, + IMUPreprocessor, PadIm2Video, + PatchEmbedGeneric, + RGBDTPreprocessor, + SpatioTemporalPosEmbeddingHelper, + TextPreprocessor, + ThermalPreprocessor) +from imagebind.models.transformer import MultiheadAttention, SimpleTransformer + +ModalityType = SimpleNamespace( + VISION="vision", + TEXT="text", + AUDIO="audio", + THERMAL="thermal", + DEPTH="depth", + IMU="imu", +) + + +class ImageBindModel(nn.Module): + def __init__( + self, + video_frames=2, + kernel_size=(2, 14, 14), + audio_kernel_size=16, + audio_stride=10, + out_embed_dim=768, + vision_embed_dim=1024, + vision_num_blocks=24, + vision_num_heads=16, + audio_embed_dim=768, + audio_num_blocks=12, + audio_num_heads=12, + audio_num_mel_bins=128, + audio_target_len=204, + audio_drop_path=0.1, + text_embed_dim=768, + text_num_blocks=12, + text_num_heads=12, + depth_embed_dim=384, + depth_kernel_size=16, + depth_num_blocks=12, + depth_num_heads=8, + depth_drop_path=0.0, + thermal_embed_dim=768, + thermal_kernel_size=16, + thermal_num_blocks=12, + thermal_num_heads=12, + thermal_drop_path=0.0, + imu_embed_dim=512, + imu_kernel_size=8, + imu_num_blocks=6, + imu_num_heads=8, + imu_drop_path=0.7, + ): + super().__init__() + + self.modality_preprocessors = self._create_modality_preprocessors( + video_frames, + vision_embed_dim, + kernel_size, + text_embed_dim, + audio_embed_dim, + audio_kernel_size, + audio_stride, + audio_num_mel_bins, + audio_target_len, + depth_embed_dim, + depth_kernel_size, + thermal_embed_dim, + thermal_kernel_size, + imu_embed_dim, + ) + + self.modality_trunks = self._create_modality_trunks( + vision_embed_dim, + vision_num_blocks, + vision_num_heads, + text_embed_dim, + text_num_blocks, + text_num_heads, + audio_embed_dim, + audio_num_blocks, + audio_num_heads, + audio_drop_path, + depth_embed_dim, + depth_num_blocks, + depth_num_heads, + depth_drop_path, + thermal_embed_dim, + thermal_num_blocks, + thermal_num_heads, + thermal_drop_path, + imu_embed_dim, + imu_num_blocks, + imu_num_heads, + imu_drop_path, + ) + + self.modality_heads = self._create_modality_heads( + out_embed_dim, + vision_embed_dim, + text_embed_dim, + audio_embed_dim, + depth_embed_dim, + thermal_embed_dim, + imu_embed_dim, + ) + + self.modality_postprocessors = self._create_modality_postprocessors( + out_embed_dim + ) + + def _create_modality_preprocessors( + self, + video_frames=2, + vision_embed_dim=1024, + kernel_size=(2, 14, 14), + text_embed_dim=768, + audio_embed_dim=768, + audio_kernel_size=16, + audio_stride=10, + audio_num_mel_bins=128, + audio_target_len=204, + depth_embed_dim=768, + depth_kernel_size=16, + thermal_embed_dim=768, + thermal_kernel_size=16, + imu_embed_dim=512, + ): + rgbt_stem = PatchEmbedGeneric( + proj_stem=[ + PadIm2Video(pad_type="repeat", ntimes=2), + nn.Conv3d( + in_channels=3, + kernel_size=kernel_size, + out_channels=vision_embed_dim, + stride=kernel_size, + bias=False, + ), + ] + ) + rgbt_preprocessor = RGBDTPreprocessor( + img_size=[3, video_frames, 224, 224], + num_cls_tokens=1, + pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True), + rgbt_stem=rgbt_stem, + depth_stem=None, + ) + + text_preprocessor = TextPreprocessor( + context_length=77, + vocab_size=49408, + embed_dim=text_embed_dim, + causal_masking=True, + ) + + audio_stem = PatchEmbedGeneric( + proj_stem=[ + nn.Conv2d( + in_channels=1, + kernel_size=audio_kernel_size, + stride=audio_stride, + out_channels=audio_embed_dim, + bias=False, + ), + ], + norm_layer=nn.LayerNorm(normalized_shape=audio_embed_dim), + ) + audio_preprocessor = AudioPreprocessor( + img_size=[1, audio_num_mel_bins, audio_target_len], + num_cls_tokens=1, + pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True), + audio_stem=audio_stem, + ) + + depth_stem = PatchEmbedGeneric( + [ + nn.Conv2d( + kernel_size=depth_kernel_size, + in_channels=1, + out_channels=depth_embed_dim, + stride=depth_kernel_size, + bias=False, + ), + ], + norm_layer=nn.LayerNorm(normalized_shape=depth_embed_dim), + ) + + depth_preprocessor = RGBDTPreprocessor( + img_size=[1, 224, 224], + num_cls_tokens=1, + pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True), + rgbt_stem=None, + depth_stem=depth_stem, + ) + + thermal_stem = PatchEmbedGeneric( + [ + nn.Conv2d( + kernel_size=thermal_kernel_size, + in_channels=1, + out_channels=thermal_embed_dim, + stride=thermal_kernel_size, + bias=False, + ), + ], + norm_layer=nn.LayerNorm(normalized_shape=thermal_embed_dim), + ) + thermal_preprocessor = ThermalPreprocessor( + img_size=[1, 224, 224], + num_cls_tokens=1, + pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True), + thermal_stem=thermal_stem, + ) + + imu_stem = PatchEmbedGeneric( + [ + nn.Linear( + in_features=48, + out_features=imu_embed_dim, + bias=False, + ), + ], + norm_layer=nn.LayerNorm(normalized_shape=imu_embed_dim), + ) + + imu_preprocessor = IMUPreprocessor( + img_size=[6, 2000], + num_cls_tokens=1, + kernel_size=8, + embed_dim=imu_embed_dim, + pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True), + imu_stem=imu_stem, + ) + + modality_preprocessors = { + ModalityType.VISION: rgbt_preprocessor, + ModalityType.TEXT: text_preprocessor, + ModalityType.AUDIO: audio_preprocessor, + ModalityType.DEPTH: depth_preprocessor, + ModalityType.THERMAL: thermal_preprocessor, + ModalityType.IMU: imu_preprocessor, + } + + return nn.ModuleDict(modality_preprocessors) + + def _create_modality_trunks( + self, + vision_embed_dim=1024, + vision_num_blocks=24, + vision_num_heads=16, + text_embed_dim=768, + text_num_blocks=12, + text_num_heads=12, + audio_embed_dim=768, + audio_num_blocks=12, + audio_num_heads=12, + audio_drop_path=0.0, + depth_embed_dim=768, + depth_num_blocks=12, + depth_num_heads=12, + depth_drop_path=0.0, + thermal_embed_dim=768, + thermal_num_blocks=12, + thermal_num_heads=12, + thermal_drop_path=0.0, + imu_embed_dim=512, + imu_num_blocks=6, + imu_num_heads=8, + imu_drop_path=0.7, + ): + def instantiate_trunk( + embed_dim, num_blocks, num_heads, pre_transformer_ln, add_bias_kv, drop_path + ): + return SimpleTransformer( + embed_dim=embed_dim, + num_blocks=num_blocks, + ffn_dropout_rate=0.0, + drop_path_rate=drop_path, + attn_target=partial( + MultiheadAttention, + embed_dim=embed_dim, + num_heads=num_heads, + bias=True, + add_bias_kv=add_bias_kv, + ), + pre_transformer_layer=nn.Sequential( + nn.LayerNorm(embed_dim, eps=1e-6) + if pre_transformer_ln + else nn.Identity(), + EinOpsRearrange("b l d -> l b d"), + ), + post_transformer_layer=EinOpsRearrange("l b d -> b l d"), + ) + + modality_trunks = {} + modality_trunks[ModalityType.VISION] = instantiate_trunk( + vision_embed_dim, + vision_num_blocks, + vision_num_heads, + pre_transformer_ln=True, + add_bias_kv=False, + drop_path=0.0, + ) + modality_trunks[ModalityType.TEXT] = instantiate_trunk( + text_embed_dim, + text_num_blocks, + text_num_heads, + pre_transformer_ln=False, + add_bias_kv=False, + drop_path=0.0, + ) + modality_trunks[ModalityType.AUDIO] = instantiate_trunk( + audio_embed_dim, + audio_num_blocks, + audio_num_heads, + pre_transformer_ln=False, + add_bias_kv=True, + drop_path=audio_drop_path, + ) + modality_trunks[ModalityType.DEPTH] = instantiate_trunk( + depth_embed_dim, + depth_num_blocks, + depth_num_heads, + pre_transformer_ln=False, + add_bias_kv=True, + drop_path=depth_drop_path, + ) + modality_trunks[ModalityType.THERMAL] = instantiate_trunk( + thermal_embed_dim, + thermal_num_blocks, + thermal_num_heads, + pre_transformer_ln=False, + add_bias_kv=True, + drop_path=thermal_drop_path, + ) + modality_trunks[ModalityType.IMU] = instantiate_trunk( + imu_embed_dim, + imu_num_blocks, + imu_num_heads, + pre_transformer_ln=False, + add_bias_kv=True, + drop_path=imu_drop_path, + ) + + return nn.ModuleDict(modality_trunks) + + def _create_modality_heads( + self, + out_embed_dim, + vision_embed_dim, + text_embed_dim, + audio_embed_dim, + depth_embed_dim, + thermal_embed_dim, + imu_embed_dim, + ): + modality_heads = {} + + modality_heads[ModalityType.VISION] = nn.Sequential( + nn.LayerNorm(normalized_shape=vision_embed_dim, eps=1e-6), + SelectElement(index=0), + nn.Linear(vision_embed_dim, out_embed_dim, bias=False), + ) + + modality_heads[ModalityType.TEXT] = SelectEOSAndProject( + proj=nn.Sequential( + nn.LayerNorm(normalized_shape=text_embed_dim, eps=1e-6), + nn.Linear(text_embed_dim, out_embed_dim, bias=False), + ) + ) + + modality_heads[ModalityType.AUDIO] = nn.Sequential( + nn.LayerNorm(normalized_shape=audio_embed_dim, eps=1e-6), + SelectElement(index=0), + nn.Linear(audio_embed_dim, out_embed_dim, bias=False), + ) + + modality_heads[ModalityType.DEPTH] = nn.Sequential( + nn.LayerNorm(normalized_shape=depth_embed_dim, eps=1e-6), + SelectElement(index=0), + nn.Linear(depth_embed_dim, out_embed_dim, bias=False), + ) + + modality_heads[ModalityType.THERMAL] = nn.Sequential( + nn.LayerNorm(normalized_shape=thermal_embed_dim, eps=1e-6), + SelectElement(index=0), + nn.Linear(thermal_embed_dim, out_embed_dim, bias=False), + ) + + modality_heads[ModalityType.IMU] = nn.Sequential( + nn.LayerNorm(normalized_shape=imu_embed_dim, eps=1e-6), + SelectElement(index=0), + nn.Dropout(p=0.5), + nn.Linear(imu_embed_dim, out_embed_dim, bias=False), + ) + + return nn.ModuleDict(modality_heads) + + def _create_modality_postprocessors(self, out_embed_dim): + modality_postprocessors = {} + + modality_postprocessors[ModalityType.VISION] = Normalize(dim=-1) + modality_postprocessors[ModalityType.TEXT] = nn.Sequential( + Normalize(dim=-1), LearnableLogitScaling(learnable=True) + ) + modality_postprocessors[ModalityType.AUDIO] = nn.Sequential( + Normalize(dim=-1), + LearnableLogitScaling(logit_scale_init=20.0, learnable=False), + ) + modality_postprocessors[ModalityType.DEPTH] = nn.Sequential( + Normalize(dim=-1), + LearnableLogitScaling(logit_scale_init=5.0, learnable=False), + ) + modality_postprocessors[ModalityType.THERMAL] = nn.Sequential( + Normalize(dim=-1), + LearnableLogitScaling(logit_scale_init=10.0, learnable=False), + ) + modality_postprocessors[ModalityType.IMU] = nn.Sequential( + Normalize(dim=-1), + LearnableLogitScaling(logit_scale_init=5.0, learnable=False), + ) + + return nn.ModuleDict(modality_postprocessors) + + def forward(self, inputs): + outputs = {} + for modality_key, modality_value in inputs.items(): + reduce_list = ( + modality_value.ndim >= 5 + ) # Audio and Video inputs consist of multiple clips + if reduce_list: + B, S = modality_value.shape[:2] + modality_value = modality_value.reshape( + B * S, *modality_value.shape[2:] + ) + + if modality_value is not None: + modality_value = self.modality_preprocessors[modality_key]( + **{modality_key: modality_value} + ) + trunk_inputs = modality_value["trunk"] + head_inputs = modality_value["head"] + modality_value = self.modality_trunks[modality_key](**trunk_inputs) + modality_value = self.modality_heads[modality_key]( + modality_value, **head_inputs + ) + modality_value = self.modality_postprocessors[modality_key]( + modality_value + ) + + if reduce_list: + modality_value = modality_value.reshape(B, S, -1) + modality_value = modality_value.mean(dim=1) + + outputs[modality_key] = modality_value + + return outputs + + +def imagebind_huge(pretrained=False): + model = ImageBindModel( + vision_embed_dim=1280, + vision_num_blocks=32, + vision_num_heads=16, + text_embed_dim=1024, + text_num_blocks=24, + text_num_heads=16, + out_embed_dim=1024, + audio_drop_path=0.1, + imu_drop_path=0.7, + ) + + if pretrained: + if not os.path.exists(".checkpoints/imagebind_huge.pth"): + print( + "Downloading imagebind weights to .checkpoints/imagebind_huge.pth ..." + ) + os.makedirs(".checkpoints", exist_ok=True) + torch.hub.download_url_to_file( + "https://dl.fbaipublicfiles.com/imagebind/imagebind_huge.pth", + ".checkpoints/imagebind_huge.pth", + progress=True, + ) + + model.load_state_dict(torch.load(".checkpoints/imagebind_huge.pth")) + + return model diff --git a/ImageBind/imagebind/models/multimodal_preprocessors.py b/ImageBind/imagebind/models/multimodal_preprocessors.py new file mode 100755 index 0000000..3384b87 --- /dev/null +++ b/ImageBind/imagebind/models/multimodal_preprocessors.py @@ -0,0 +1,685 @@ +#!/usr/bin/env python3 +# Portions Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import gzip +import html +import io +import math +from functools import lru_cache +from typing import Callable, List, Optional, Tuple + +import ftfy +import numpy as np +import regex as re +import torch +import torch.nn as nn +from iopath.common.file_io import g_pathmgr +from timm.models.layers import trunc_normal_ + +from imagebind.models.helpers import VerboseNNModule, cast_if_src_dtype + + +def get_sinusoid_encoding_table(n_position, d_hid): + """Sinusoid position encoding table""" + + # TODO: make it with torch instead of numpy + def get_position_angle_vec(position): + return [ + position / np.power(10000, 2 * (hid_j // 2) / d_hid) + for hid_j in range(d_hid) + ] + + sinusoid_table = np.array( + [get_position_angle_vec(pos_i) for pos_i in range(n_position)] + ) + sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i + sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 + + return torch.FloatTensor(sinusoid_table).unsqueeze(0) + + +def interpolate_pos_encoding_2d(target_spatial_size, pos_embed): + N = pos_embed.shape[1] + if N == target_spatial_size: + return pos_embed + dim = pos_embed.shape[-1] + # nn.functional.interpolate doesn't work with bfloat16 so we cast to float32 + pos_embed, updated = cast_if_src_dtype(pos_embed, torch.bfloat16, torch.float32) + pos_embed = nn.functional.interpolate( + pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute( + 0, 3, 1, 2 + ), + scale_factor=math.sqrt(target_spatial_size / N), + mode="bicubic", + ) + if updated: + pos_embed, _ = cast_if_src_dtype(pos_embed, torch.float32, torch.bfloat16) + pos_embed = pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) + return pos_embed + + +def interpolate_pos_encoding( + npatch_per_img, + pos_embed, + patches_layout, + input_shape=None, + first_patch_idx=1, +): + assert first_patch_idx == 0 or first_patch_idx == 1, "there is 1 CLS token or none" + N = pos_embed.shape[1] - first_patch_idx # since it's 1 if cls_token exists + if npatch_per_img == N: + return pos_embed + + assert ( + patches_layout[-1] == patches_layout[-2] + ), "Interpolation of pos embed not supported for non-square layouts" + + class_emb = pos_embed[:, :first_patch_idx] + pos_embed = pos_embed[:, first_patch_idx:] + + if input_shape is None or patches_layout[0] == 1: + # simple 2D pos embedding, no temporal component + pos_embed = interpolate_pos_encoding_2d(npatch_per_img, pos_embed) + elif patches_layout[0] > 1: + # pos embed has a temporal component + assert len(input_shape) == 4, "temporal interpolation not supported" + # we only support 2D interpolation in this case + num_frames = patches_layout[0] + num_spatial_tokens = patches_layout[1] * patches_layout[2] + pos_embed = pos_embed.view(1, num_frames, num_spatial_tokens, -1) + # interpolate embedding for zeroth frame + pos_embed = interpolate_pos_encoding_2d( + npatch_per_img, pos_embed[0, 0, ...].unsqueeze(0) + ) + else: + raise ValueError("This type of interpolation isn't implemented") + + return torch.cat((class_emb, pos_embed), dim=1) + + +def _get_pos_embedding( + npatch_per_img, + pos_embed, + patches_layout, + input_shape, + first_patch_idx=1, +): + pos_embed = interpolate_pos_encoding( + npatch_per_img, + pos_embed, + patches_layout, + input_shape=input_shape, + first_patch_idx=first_patch_idx, + ) + return pos_embed + + +class PatchEmbedGeneric(nn.Module): + """ + PatchEmbed from Hydra + """ + + def __init__(self, proj_stem, norm_layer: Optional[nn.Module] = None): + super().__init__() + + if len(proj_stem) > 1: + self.proj = nn.Sequential(*proj_stem) + else: + # Special case to be able to load pre-trained models that were + # trained with a standard stem + self.proj = proj_stem[0] + self.norm_layer = norm_layer + + def get_patch_layout(self, img_size): + with torch.no_grad(): + dummy_img = torch.zeros( + [ + 1, + ] + + img_size + ) + dummy_out = self.proj(dummy_img) + embed_dim = dummy_out.shape[1] + patches_layout = tuple(dummy_out.shape[2:]) + num_patches = np.prod(patches_layout) + return patches_layout, num_patches, embed_dim + + def forward(self, x): + x = self.proj(x) + # B C (T) H W -> B (T)HW C + x = x.flatten(2).transpose(1, 2) + if self.norm_layer is not None: + x = self.norm_layer(x) + return x + + +class SpatioTemporalPosEmbeddingHelper(VerboseNNModule): + def __init__( + self, + patches_layout: List, + num_patches: int, + num_cls_tokens: int, + embed_dim: int, + learnable: bool, + ) -> None: + super().__init__() + self.num_cls_tokens = num_cls_tokens + self.patches_layout = patches_layout + self.num_patches = num_patches + self.num_tokens = num_cls_tokens + num_patches + self.learnable = learnable + if self.learnable: + self.pos_embed = nn.Parameter(torch.zeros(1, self.num_tokens, embed_dim)) + trunc_normal_(self.pos_embed, std=0.02) + else: + self.register_buffer( + "pos_embed", get_sinusoid_encoding_table(self.num_tokens, embed_dim) + ) + + def get_pos_embedding(self, vision_input, all_vision_tokens): + input_shape = vision_input.shape + pos_embed = _get_pos_embedding( + all_vision_tokens.size(1) - self.num_cls_tokens, + pos_embed=self.pos_embed, + patches_layout=self.patches_layout, + input_shape=input_shape, + first_patch_idx=self.num_cls_tokens, + ) + return pos_embed + + +class RGBDTPreprocessor(VerboseNNModule): + def __init__( + self, + rgbt_stem: PatchEmbedGeneric, + depth_stem: Optional[PatchEmbedGeneric], + img_size: Tuple = (3, 224, 224), + num_cls_tokens: int = 1, + pos_embed_fn: Optional[Callable] = None, + use_type_embed: bool = False, + init_param_style: str = "openclip", + ) -> None: + super().__init__() + stem = rgbt_stem if rgbt_stem is not None else depth_stem + ( + self.patches_layout, + self.num_patches, + self.embed_dim, + ) = stem.get_patch_layout(img_size) + self.rgbt_stem = rgbt_stem + self.depth_stem = depth_stem + self.use_pos_embed = pos_embed_fn is not None + self.use_type_embed = use_type_embed + self.num_cls_tokens = num_cls_tokens + + if self.use_pos_embed: + self.pos_embedding_helper = pos_embed_fn( + patches_layout=self.patches_layout, + num_cls_tokens=num_cls_tokens, + num_patches=self.num_patches, + embed_dim=self.embed_dim, + ) + if self.num_cls_tokens > 0: + self.cls_token = nn.Parameter( + torch.zeros(1, self.num_cls_tokens, self.embed_dim) + ) + if self.use_type_embed: + self.type_embed = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) + + self.init_parameters(init_param_style) + + @torch.no_grad() + def init_parameters(self, init_param_style): + if init_param_style == "openclip": + # OpenCLIP style initialization + scale = self.embed_dim**-0.5 + if self.use_pos_embed: + nn.init.normal_(self.pos_embedding_helper.pos_embed) + self.pos_embedding_helper.pos_embed *= scale + + if self.num_cls_tokens > 0: + nn.init.normal_(self.cls_token) + self.cls_token *= scale + elif init_param_style == "vit": + self.cls_token.data.fill_(0) + else: + raise ValueError(f"Unknown init {init_param_style}") + + if self.use_type_embed: + nn.init.normal_(self.type_embed) + + def tokenize_input_and_cls_pos(self, input, stem, mask): + # tokens is of shape B x L x D + tokens = stem(input) + assert tokens.ndim == 3 + assert tokens.shape[2] == self.embed_dim + B = tokens.shape[0] + if self.num_cls_tokens > 0: + class_tokens = self.cls_token.expand( + B, -1, -1 + ) # stole class_tokens impl from Phil Wang, thanks + tokens = torch.cat((class_tokens, tokens), dim=1) + if self.use_pos_embed: + pos_embed = self.pos_embedding_helper.get_pos_embedding(input, tokens) + tokens = tokens + pos_embed + if self.use_type_embed: + tokens = tokens + self.type_embed.expand(B, -1, -1) + return tokens + + def forward(self, vision=None, depth=None, patch_mask=None): + if patch_mask is not None: + raise NotImplementedError() + + if vision is not None: + vision_tokens = self.tokenize_input_and_cls_pos( + vision, self.rgbt_stem, patch_mask + ) + + if depth is not None: + depth_tokens = self.tokenize_input_and_cls_pos( + depth, self.depth_stem, patch_mask + ) + + # aggregate tokens + if vision is not None and depth is not None: + final_tokens = vision_tokens + depth_tokens + else: + final_tokens = vision_tokens if vision is not None else depth_tokens + return_dict = { + "trunk": { + "tokens": final_tokens, + }, + "head": {}, + } + return return_dict + + +class AudioPreprocessor(RGBDTPreprocessor): + def __init__(self, audio_stem: PatchEmbedGeneric, **kwargs) -> None: + super().__init__(rgbt_stem=audio_stem, depth_stem=None, **kwargs) + + def forward(self, audio=None): + return super().forward(vision=audio) + + +class ThermalPreprocessor(RGBDTPreprocessor): + def __init__(self, thermal_stem: PatchEmbedGeneric, **kwargs) -> None: + super().__init__(rgbt_stem=thermal_stem, depth_stem=None, **kwargs) + + def forward(self, thermal=None): + return super().forward(vision=thermal) + + +def build_causal_attention_mask(context_length): + # lazily create causal attention mask, with full attention between the vision tokens + # pytorch uses additive attention mask; fill with -inf + mask = torch.empty(context_length, context_length, requires_grad=False) + mask.fill_(float("-inf")) + mask.triu_(1) # zero out the lower diagonal + return mask + + +class TextPreprocessor(VerboseNNModule): + def __init__( + self, + vocab_size: int, + context_length: int, + embed_dim: int, + causal_masking: bool, + supply_seq_len_to_head: bool = True, + num_cls_tokens: int = 0, + init_param_style: str = "openclip", + ) -> None: + super().__init__() + self.vocab_size = vocab_size + self.context_length = context_length + self.token_embedding = nn.Embedding(vocab_size, embed_dim) + self.pos_embed = nn.Parameter( + torch.empty(1, self.context_length + num_cls_tokens, embed_dim) + ) + self.causal_masking = causal_masking + if self.causal_masking: + mask = build_causal_attention_mask(self.context_length) + # register the mask as a buffer so it can be moved to the right device + self.register_buffer("mask", mask) + + self.supply_seq_len_to_head = supply_seq_len_to_head + self.num_cls_tokens = num_cls_tokens + self.embed_dim = embed_dim + if num_cls_tokens > 0: + assert self.causal_masking is False, "Masking + CLS token isn't implemented" + self.cls_token = nn.Parameter( + torch.zeros(1, self.num_cls_tokens, embed_dim) + ) + + self.init_parameters(init_param_style) + + @torch.no_grad() + def init_parameters(self, init_param_style="openclip"): + # OpenCLIP style initialization + nn.init.normal_(self.token_embedding.weight, std=0.02) + nn.init.normal_(self.pos_embed, std=0.01) + + if init_param_style == "openclip": + # OpenCLIP style initialization + scale = self.embed_dim**-0.5 + if self.num_cls_tokens > 0: + nn.init.normal_(self.cls_token) + self.cls_token *= scale + elif init_param_style == "vit": + self.cls_token.data.fill_(0) + else: + raise ValueError(f"Unknown init {init_param_style}") + + def forward(self, text): + # text tokens are of shape B x L x D + text_tokens = self.token_embedding(text) + # concat CLS tokens if any + if self.num_cls_tokens > 0: + B = text_tokens.shape[0] + class_tokens = self.cls_token.expand( + B, -1, -1 + ) # stole class_tokens impl from Phil Wang, thanks + text_tokens = torch.cat((class_tokens, text_tokens), dim=1) + text_tokens = text_tokens + self.pos_embed + return_dict = { + "trunk": { + "tokens": text_tokens, + }, + "head": {}, + } + # Compute sequence length after adding CLS tokens + if self.supply_seq_len_to_head: + text_lengths = text.argmax(dim=-1) + return_dict["head"] = { + "seq_len": text_lengths, + } + if self.causal_masking: + return_dict["trunk"].update({"attn_mask": self.mask}) + return return_dict + + +class Im2Video(nn.Module): + """Convert an image into a trivial video.""" + + def __init__(self, time_dim=2): + super().__init__() + self.time_dim = time_dim + + def forward(self, x): + if x.ndim == 4: + # B, C, H, W -> B, C, T, H, W + return x.unsqueeze(self.time_dim) + elif x.ndim == 5: + return x + else: + raise ValueError(f"Dimension incorrect {x.shape}") + + +class PadIm2Video(Im2Video): + def __init__(self, ntimes, pad_type, time_dim=2): + super().__init__(time_dim=time_dim) + assert ntimes > 0 + assert pad_type in ["zero", "repeat"] + self.ntimes = ntimes + self.pad_type = pad_type + + def forward(self, x): + x = super().forward(x) + if x.shape[self.time_dim] == 1: + if self.pad_type == "repeat": + new_shape = [1] * len(x.shape) + new_shape[self.time_dim] = self.ntimes + x = x.repeat(new_shape) + elif self.pad_type == "zero": + padarg = [0, 0] * len(x.shape) + padarg[2 * self.time_dim + 1] = self.ntimes - x.shape[self.time_dim] + x = nn.functional.pad(x, padarg) + return x + + +# Modified from github.com/openai/CLIP +@lru_cache() +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a corresponding list of unicode strings. + The reversible bpe codes work on unicode strings. + This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. + When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. + This is a signficant percentage of your normal, say, 32K bpe vocab. + To avoid that, we want lookup tables between utf-8 bytes and unicode strings. + And avoids mapping to whitespace/control characters the bpe code barfs on. + """ + bs = ( + list(range(ord("!"), ord("~") + 1)) + + list(range(ord("¡"), ord("¬") + 1)) + + list(range(ord("®"), ord("ÿ") + 1)) + ) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8 + n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +def get_pairs(word): + """Return set of symbol pairs in a word. + Word is represented as tuple of symbols (symbols being variable-length strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +def basic_clean(text): + text = ftfy.fix_text(text) + text = html.unescape(html.unescape(text)) + return text.strip() + + +def whitespace_clean(text): + text = re.sub(r"\s+", " ", text) + text = text.strip() + return text + + +class SimpleTokenizer(object): + def __init__(self, bpe_path: str, context_length=77): + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} + + with g_pathmgr.open(bpe_path, "rb") as fh: + bpe_bytes = io.BytesIO(fh.read()) + merges: List[str] = gzip.open(bpe_bytes).read().decode("utf-8").split("\n") + merges = merges[1 : 49152 - 256 - 2 + 1] + merges: List[Tuple[str, ...]] = [tuple(merge.split()) for merge in merges] + vocab = list(bytes_to_unicode().values()) + vocab = vocab + [v + "" for v in vocab] + for merge in merges: + vocab.append("".join(merge)) + vocab.extend(["<|startoftext|>", "<|endoftext|>"]) + self.encoder = dict(zip(vocab, range(len(vocab)))) + self.decoder = {v: k for k, v in self.encoder.items()} + self.bpe_ranks = dict(zip(merges, range(len(merges)))) + self.cache = { + "<|startoftext|>": "<|startoftext|>", + "<|endoftext|>": "<|endoftext|>", + } + self.pat = re.compile( + r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", + re.IGNORECASE, + ) + self.context_length = context_length + + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token[:-1]) + (token[-1] + "",) + pairs = get_pairs(word) + + if not pairs: + return token + "" + + while True: + bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + new_word.extend(word[i:j]) + i = j + except: + new_word.extend(word[i:]) + break + + if word[i] == first and i < len(word) - 1 and word[i + 1] == second: + new_word.append(first + second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = " ".join(word) + self.cache[token] = word + return word + + def encode(self, text): + bpe_tokens = [] + text = whitespace_clean(basic_clean(text)).lower() + for token in re.findall(self.pat, text): + token = "".join(self.byte_encoder[b] for b in token.encode("utf-8")) + bpe_tokens.extend( + self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" ") + ) + return bpe_tokens + + def decode(self, tokens): + text = "".join([self.decoder[token] for token in tokens]) + text = ( + bytearray([self.byte_decoder[c] for c in text]) + .decode("utf-8", errors="replace") + .replace("", " ") + ) + return text + + def __call__(self, texts, context_length=None): + if not context_length: + context_length = self.context_length + + if isinstance(texts, str): + texts = [texts] + + sot_token = self.encoder["<|startoftext|>"] + eot_token = self.encoder["<|endoftext|>"] + all_tokens = [[sot_token] + self.encode(text) + [eot_token] for text in texts] + result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) + + for i, tokens in enumerate(all_tokens): + tokens = tokens[:context_length] + result[i, : len(tokens)] = torch.tensor(tokens) + + if len(result) == 1: + return result[0] + return result + + +class IMUPreprocessor(VerboseNNModule): + def __init__( + self, + kernel_size: int, + imu_stem: PatchEmbedGeneric, + embed_dim: int, + img_size: Tuple = (6, 2000), + num_cls_tokens: int = 1, + pos_embed_fn: Optional[Callable] = None, + init_param_style: str = "openclip", + ) -> None: + super().__init__() + self.imu_stem = imu_stem + self.embed_dim = embed_dim + self.use_pos_embed = pos_embed_fn is not None + self.num_cls_tokens = num_cls_tokens + self.kernel_size = kernel_size + self.pos_embed = nn.Parameter( + torch.empty(1, (img_size[1] // kernel_size) + num_cls_tokens, embed_dim) + ) + + if self.num_cls_tokens > 0: + self.cls_token = nn.Parameter( + torch.zeros(1, self.num_cls_tokens, self.embed_dim) + ) + + self.init_parameters(init_param_style) + + @torch.no_grad() + def init_parameters(self, init_param_style): + nn.init.normal_(self.pos_embed, std=0.01) + + if init_param_style == "openclip": + # OpenCLIP style initialization + scale = self.embed_dim**-0.5 + + if self.num_cls_tokens > 0: + nn.init.normal_(self.cls_token) + self.cls_token *= scale + elif init_param_style == "vit": + self.cls_token.data.fill_(0) + else: + raise ValueError(f"Unknown init {init_param_style}") + + def tokenize_input_and_cls_pos(self, input, stem): + # tokens is of shape B x L x D + tokens = stem.norm_layer(stem.proj(input)) + assert tokens.ndim == 3 + assert tokens.shape[2] == self.embed_dim + B = tokens.shape[0] + if self.num_cls_tokens > 0: + class_tokens = self.cls_token.expand( + B, -1, -1 + ) # stole class_tokens impl from Phil Wang, thanks + tokens = torch.cat((class_tokens, tokens), dim=1) + if self.use_pos_embed: + tokens = tokens + self.pos_embed + return tokens + + def forward(self, imu): + # Patchify + imu = imu.unfold( + -1, + self.kernel_size, + self.kernel_size, + ).permute(0, 2, 1, 3) + imu = imu.reshape(imu.size(0), imu.size(1), -1) + + imu_tokens = self.tokenize_input_and_cls_pos( + imu, + self.imu_stem, + ) + + return_dict = { + "trunk": { + "tokens": imu_tokens, + }, + "head": {}, + } + return return_dict diff --git a/ImageBind/imagebind/models/transformer.py b/ImageBind/imagebind/models/transformer.py new file mode 100755 index 0000000..6224faf --- /dev/null +++ b/ImageBind/imagebind/models/transformer.py @@ -0,0 +1,280 @@ +#!/usr/bin/env python3 +# Portions Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +# Code modified from +# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py ; +# https://github.com/facebookresearch/deit/blob/main/models.py +# and https://github.com/facebookresearch/vissl/blob/main/vissl/models/trunks/vision_transformer.py + + +from functools import partial +from typing import Callable, List, Optional + +import torch +import torch.nn as nn +import torch.utils.checkpoint as checkpoint +from timm.models.layers import DropPath, trunc_normal_ + + +class Attention(nn.Module): + def __init__( + self, + dim, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop=0.0, + proj_drop=0.0, + ): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + # NOTE scale factor was wrong in my original version, + # can set manually to be compat with prev weights + self.scale = qk_scale or head_dim**-0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qkv = ( + self.qkv(x) + .reshape(B, N, 3, self.num_heads, C // self.num_heads) + .permute(2, 0, 3, 1, 4) + ) + q, k, v = ( + qkv[0], + qkv[1], + qkv[2], + ) # make torchscript happy (cannot use tensor as tuple) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Mlp(nn.Module): + def __init__( + self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.0, + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class MultiheadAttention(nn.MultiheadAttention): + def forward(self, x: torch.Tensor, attn_mask: torch.Tensor): + return super().forward(x, x, x, need_weights=False, attn_mask=attn_mask)[0] + + +class ViTAttention(Attention): + def forward(self, x: torch.Tensor, attn_mask: torch.Tensor): + assert attn_mask is None + return super().forward(x) + + +class BlockWithMasking(nn.Module): + def __init__( + self, + dim: int, + attn_target: Callable, + mlp_ratio: int = 4, + act_layer: Callable = nn.GELU, + norm_layer: Callable = nn.LayerNorm, + ffn_dropout_rate: float = 0.0, + drop_path: float = 0.0, + layer_scale_type: Optional[str] = None, + layer_scale_init_value: float = 1e-4, + ): + super().__init__() + + assert not isinstance( + attn_target, nn.Module + ), "attn_target should be a Callable. Otherwise attn_target is shared across blocks!" + self.attn = attn_target() + if drop_path > 0.0: + self.drop_path = DropPath(drop_path) + else: + self.drop_path = nn.Identity() + self.norm_1 = norm_layer(dim) + mlp_hidden_dim = int(mlp_ratio * dim) + self.mlp = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=ffn_dropout_rate, + ) + self.norm_2 = norm_layer(dim) + self.layer_scale_type = layer_scale_type + if self.layer_scale_type is not None: + assert self.layer_scale_type in [ + "per_channel", + "scalar", + ], f"Found Layer scale type {self.layer_scale_type}" + if self.layer_scale_type == "per_channel": + # one gamma value per channel + gamma_shape = [1, 1, dim] + elif self.layer_scale_type == "scalar": + # single gamma value for all channels + gamma_shape = [1, 1, 1] + # two gammas: for each part of the fwd in the encoder + self.layer_scale_gamma1 = nn.Parameter( + torch.ones(size=gamma_shape) * layer_scale_init_value, + requires_grad=True, + ) + self.layer_scale_gamma2 = nn.Parameter( + torch.ones(size=gamma_shape) * layer_scale_init_value, + requires_grad=True, + ) + + def forward(self, x: torch.Tensor, attn_mask: torch.Tensor): + if self.layer_scale_type is None: + x = x + self.drop_path(self.attn(self.norm_1(x), attn_mask)) + x = x + self.drop_path(self.mlp(self.norm_2(x))) + else: + x = ( + x + + self.drop_path(self.attn(self.norm_1(x), attn_mask)) + * self.layer_scale_gamma1 + ) + x = x + self.drop_path(self.mlp(self.norm_2(x))) * self.layer_scale_gamma2 + return x + + +_LAYER_NORM = partial(nn.LayerNorm, eps=1e-6) + + +class SimpleTransformer(nn.Module): + def __init__( + self, + attn_target: Callable, + embed_dim: int, + num_blocks: int, + block: Callable = BlockWithMasking, + pre_transformer_layer: Optional[Callable] = None, + post_transformer_layer: Optional[Callable] = None, + drop_path_rate: float = 0.0, + drop_path_type: str = "progressive", + norm_layer: Callable = _LAYER_NORM, + mlp_ratio: int = 4, + ffn_dropout_rate: float = 0.0, + layer_scale_type: Optional[str] = None, # from cait; possible values are None, "per_channel", "scalar" + layer_scale_init_value: float = 1e-4, # from cait; float + weight_init_style: str = "jax", # possible values jax or pytorch + ): + """ + Simple Transformer with the following features + 1. Supports masked attention + 2. Supports DropPath + 3. Supports LayerScale + 4. Supports Dropout in Attention and FFN + 5. Makes few assumptions about the input except that it is a Tensor + """ + super().__init__() + self.pre_transformer_layer = pre_transformer_layer + if drop_path_type == "progressive": + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, num_blocks)] + elif drop_path_type == "uniform": + dpr = [drop_path_rate for i in range(num_blocks)] + else: + raise ValueError(f"Unknown drop_path_type: {drop_path_type}") + + self.blocks = nn.Sequential( + *[ + block( + dim=embed_dim, + attn_target=attn_target, + mlp_ratio=mlp_ratio, + ffn_dropout_rate=ffn_dropout_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + layer_scale_type=layer_scale_type, + layer_scale_init_value=layer_scale_init_value, + ) + for i in range(num_blocks) + ] + ) + self.post_transformer_layer = post_transformer_layer + self.weight_init_style = weight_init_style + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + if self.weight_init_style == "jax": + # Based on MAE and official Jax ViT implementation + torch.nn.init.xavier_uniform_(m.weight) + elif self.weight_init_style == "pytorch": + # PyTorch ViT uses trunc_normal_ + trunc_normal_(m.weight, std=0.02) + + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, (nn.LayerNorm)): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def forward( + self, + tokens: torch.Tensor, + attn_mask: torch.Tensor = None, + use_checkpoint: bool = False, + checkpoint_every_n: int = 1, + checkpoint_blk_ids: Optional[List[int]] = None, + ): + """ + Inputs + - tokens: data of shape N x L x D (or L x N x D depending on the attention implementation) + - attn: mask of shape L x L + + Output + - x: data of shape N x L x D (or L x N x D depending on the attention implementation) + """ + if self.pre_transformer_layer: + tokens = self.pre_transformer_layer(tokens) + if use_checkpoint and checkpoint_blk_ids is None: + checkpoint_blk_ids = [ + blk_id + for blk_id in range(len(self.blocks)) + if blk_id % checkpoint_every_n == 0 + ] + if checkpoint_blk_ids: + checkpoint_blk_ids = set(checkpoint_blk_ids) + for blk_id, blk in enumerate(self.blocks): + if use_checkpoint and blk_id in checkpoint_blk_ids: + tokens = checkpoint.checkpoint( + blk, tokens, attn_mask, use_reentrant=False + ) + else: + tokens = blk(tokens, attn_mask=attn_mask) + if self.post_transformer_layer: + tokens = self.post_transformer_layer(tokens) + return tokens diff --git a/ImageBind/model_card.md b/ImageBind/model_card.md new file mode 100755 index 0000000..c7bb265 --- /dev/null +++ b/ImageBind/model_card.md @@ -0,0 +1,94 @@ +# Model Card for ImageBind + +Multimodal joint embedding model for image/video, text, audio, depth, IMU, and thermal images. +Input any of the six modalities and get the same sized embedding that can be used for cross-modal and multimodal tasks. + +# Model Details + +## Model Description + + +Multimodal joint embedding model for image/video, text, audio, depth, IMU, and thermal images + +- **Developed by:** Meta AI +- **Model type:** Multimodal model +- **Language(s) (NLP):** en +- **License:** CC BY-NC-SA 4.0 +- **Resources for more information:** + - [GitHub Repo](https://github.com/facebookresearch/ImageBind) + + +# Uses + + +This model is intended only for research purposes. It provides a joint embedding space for different modalities -- image/video, text, audio, depth, IMU and thermal images. +We hope that these joint embeddings can be used for a variety of different cross-modal research, e.g., cross-modal retrieval and combining embeddings from different modalities. + +## Out-of-Scope Use + + + + +This model is *NOT* intended to be used in any real world application -- commercial or otherwise. +It may produce harmful associations with different inputs. +The model needs to be investigated and likely re-trained on specific data for any such application. +The model is expected to work better on web-based visual data since it was trained on such data. +The text encoder is likely to work only on English language text because of the underlying training datasets. + +# Bias, Risks, and Limitations + + +Open-domain joint embedding models are prone to producing specific biases, e.g., study from [CLIP](https://github.com/openai/CLIP/blob/main/model-card.md#bias-and-fairness). +Since our model uses such models as initialization, it will exhibit such biases too. +Moreover, for learning joint embeddings for other modalities such as audio, thermal, depth, and IMU we leverage datasets that are relatively small. These joint embeddings are thus limited to the concepts present in the datasets. For example, the thermal datasets we used are limited to outdoor street scenes, while the depth datasets are limited to indoor scenes. + + + +# Training Details + +## Training Data + + + +ImageBind uses image-paired data for training -- (image, X) where X is one of text, audio, depth, IMU or thermal data. +In particular, we initialize and freeze the image and text encoders using an OpenCLIP ViT-H encoder. +We train audio embeddings using Audioset, depth embeddings using the SUN RGB-D dataset, IMU using the Ego4D dataset and thermal embeddings using the LLVIP dataset. +We provide the exact training data details in the paper. + + +## Training Procedure + + +Please refer to the research paper and github repo for exact details on this. + +# Evaluation + +## Testing Data, Factors & Metrics + +We evaluate the model on a variety of different classification benchmarks for each modality. +The evaluation details are presented in the paper. +The models performance is measured using standard classification metrics such as accuracy and mAP. + +# Citation + + + +**BibTeX:** +``` +@inproceedings{girdhar2023imagebind, + title={ImageBind: One Embedding Space To Bind Them All}, + author={Girdhar, Rohit and El-Nouby, Alaaeldin and Liu, Zhuang +and Singh, Mannat and Alwala, Kalyan Vasudev and Joulin, Armand and Misra, Ishan}, + booktitle={CVPR}, + year={2023} +} +``` + + +# Model Card Contact + +Please reach out to the authors at: rgirdhar@meta.com imisra@meta.com alaaelnouby@gmail.com + +# How to Get Started with the Model + +Our github repo provides a simple example to extract embeddings from images, audio etc. diff --git a/ImageBind/requirements.txt b/ImageBind/requirements.txt new file mode 100755 index 0000000..e69de29 diff --git a/ImageBind/setup.py b/ImageBind/setup.py new file mode 100755 index 0000000..ab11c2b --- /dev/null +++ b/ImageBind/setup.py @@ -0,0 +1,23 @@ +from setuptools import setup, find_packages + +with open('requirements.txt') as f: + required = f.read().splitlines() + +setup( + name='imagebind', + version='0.1.0', + packages=find_packages(), + package_data={ + 'imagebind': ['bpe/bpe_simple_vocab_16e6.txt.gz'], + }, + description='A brief description of the package', + long_description=open('README.md', encoding='utf-8').read(), + long_description_content_type="text/markdown", + url='https://github.com/facebookresearch/ImageBind', + classifiers=[ + 'Programming Language :: Python :: 3', + 'License :: Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International', + ], + install_requires=required, + dependency_links=['https://download.pytorch.org/whl/cu113'], +) diff --git a/LICENSE b/LICENSE new file mode 100755 index 0000000..bfef380 --- /dev/null +++ b/LICENSE @@ -0,0 +1,437 @@ +Attribution-NonCommercial-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International +Public License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-NonCommercial-ShareAlike 4.0 International Public License +("Public License"). To the extent this Public License may be +interpreted as a contract, You are granted the Licensed Rights in +consideration of Your acceptance of these terms and conditions, and the +Licensor grants You such rights in consideration of benefits the +Licensor receives from making the Licensed Material available under +these terms and conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-NC-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution, NonCommercial, and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. NonCommercial means not primarily intended for or directed towards + commercial advantage or monetary compensation. For purposes of + this Public License, the exchange of the Licensed Material for + other material subject to Copyright and Similar Rights by digital + file-sharing or similar means is NonCommercial provided there is + no payment of monetary compensation in connection with the + exchange. + + l. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + m. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + n. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part, for NonCommercial purposes only; and + + b. produce, reproduce, and Share Adapted Material for + NonCommercial purposes only. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties, including when + the Licensed Material is used other than for NonCommercial + purposes. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-NC-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database for NonCommercial purposes + only; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + including for purposes of Section 3(b); and + + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + +======================================================================= + +Creative Commons is not a party to its public +licenses. Notwithstanding, Creative Commons may elect to apply one of +its public licenses to material it publishes and in those instances +will be considered the “Licensor.” The text of the Creative Commons +public licenses is dedicated to the public domain under the CC0 Public +Domain Dedication. Except for the limited purpose of indicating that +material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the +public licenses. + +Creative Commons may be contacted at creativecommons.org. \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..6e08677 --- /dev/null +++ b/README.md @@ -0,0 +1,215 @@ +# VideoRAG: Retrieval-Augmented Generation with Extreme Long-Context Videos + + + + This is the PyTorch implementation for VideoRAG proposed in this paper: + + >**VideoRAG: Retrieval-Augmented Generation with Extreme Long-Context Videos** + >Xubin Ren*, Lingrui Xu*, Long Xia, Shuaiqiang Wang, Dawei Yin, Chao Huang† + +\* denotes equal contribution. +† denotes corresponding author + + In this paper, we proposed a retrieval-augmented generation framework specifically designed for processing and understanding **extremely long-context videos**. + +## VideoRAG Framework + +

+VideoRAG +

+ +VideoRAG introduces a novel dual-channel architecture that synergistically combines graph-driven textual knowledge grounding for modeling cross-video semantic relationships with hierarchical multimodal context encoding to preserve spatiotemporal visual patterns, enabling unbounded-length video understanding through dynamically constructed knowledge graphs that maintain semantic coherence across multi-video contexts while optimizing retrieval efficiency via adaptive multimodal fusion mechanisms. + +## Installation + +To utilize VideoRAG, please first create a conda environment with the following commands: +```bash +conda create --name videorag python=3.11 +conda activate videorag + +pip install numpy==1.26.4 +pip install torch==2.1.2 torchvision==0.16.2 torchaudio==2.1.2 +pip install accelerate==0.30.1 +pip install bitsandbytes==0.43.1 +pip install moviepy==1.0.3 +pip install git+https://github.com/facebookresearch/pytorchvideo.git@28fe037d212663c6a24f373b94cc5d478c8c1a1d +pip install timm==0.6.7 ftfy regex einops fvcore eva-decord==0.6.1 iopath matplotlib types-regex cartopy +pip install ctranslate2==4.4.0 faster_whisper neo4j hnswlib xxhash nano-vectordb +pip install transformers==4.37.1 +pip install tiktoken openai tenacity + +# Install ImageBind using the provided code in this repository, where we have removed the requirements.txt to avoid environment conflicts. +cd ImageBind +pip install . +``` + +Then, please download the necessary checkpoints in **the repository's root folder** for MiniCPM-V, Whisper, and ImageBind as follows: +```bash +# minicpm-v +git clone https://huggingface.co/openbmb/MiniCPM-V-2_6-int4 + +# whisper +git clone https://huggingface.co/Systran/faster-distil-whisper-large-v3 + +# imagebind +mkdir .checkpoints +cd .checkpoints +wget https://dl.fbaipublicfiles.com/imagebind/imagebind_huge.pth +cd ../ +``` + +## Quick Start + +VideoRAG is capable of extracting knowledge from multiple videos and answering queries based on those videos. Now, try VideoRAG with your own videos 🤗. + +> [!NOTE] +> Currently, VideoRAG has only been tested in an English environment. To process videos in multiple languages, it is recommended to modify the ```WhisperModel``` in [asr.py](VideoRAG/videorag/_videoutil/asr.py). For more details, please refer to [faster-whisper](https://github.com/systran/faster-whisper). + +**At first**, let the VideoRAG extract and indexing the knowledge from given videos (Only one GPU with 24GB of memory is sufficient, such as the RTX 3090): +```python +import os +import logging +import warnings +import multiprocessing + +warnings.filterwarnings("ignore") +logging.getLogger("httpx").setLevel(logging.WARNING) + +# Please enter your openai key +os.environ["OPENAI_API_KEY"] = "" + +from videorag._llm import * +from videorag import VideoRAG, QueryParam + + +if __name__ == '__main__': + multiprocessing.set_start_method('spawn') + + # Please enter your video file path in this list; there is no limit on the length. + # Here is an example; you can use your own videos instead. + video_paths = [ + 'movies/Iron-Man.mp4', + 'movies/Spider-Man.mkv', + ] + videorag = VideoRAG(cheap_model_func=gpt_4o_mini_complete, best_model_func=gpt_4o_mini_complete, working_dir=f"./videorag-workdir") + videorag.insert_video(video_path_list=video_paths) +``` + +**Then**, ask any questions about the videos! Here is an exmaple: +```python +import os +import logging +import warnings +import multiprocessing + +warnings.filterwarnings("ignore") +logging.getLogger("httpx").setLevel(logging.WARNING) + +# Please enter your openai key +os.environ["OPENAI_API_KEY"] = "" + +from videorag._llm import * +from videorag import VideoRAG, QueryParam + + +if __name__ == '__main__': + multiprocessing.set_start_method('spawn') + + query = 'What is the relationship between Iron Man and Spider-Man? How do they meet, and how does Iron Man help Spider-Man?' + param = QueryParam(mode="videorag") + # if param.wo_reference = False, VideoRAG will add reference to video clips in the response + param.wo_reference = True + + videorag = VideoRAG(cheap_model_func=gpt_4o_mini_complete, best_model_func=gpt_4o_mini_complete, working_dir=f"./videorag-workdir") + videorag.load_caption_model(debug=False) + response = videorag.query(query=query, param=param) + print(response) +``` + +## Evaluation + +### LongerVideos +We constructed the LongerVideos benchmark to evaluate the model's performance in comprehending multiple long-context videos and answering open-ended queries. All the videos are open-access videos on YouTube, and we record the URLs of the collections of videos as well as the corresponding queries in the [JSON](longervideos/dataset.json) file. + +| Video Type | #video list | #video | #query | #avg. queries per list | #overall duration | +|------------------|------------:|-------:|-------:|-----------------------:|-------------------------| +| **Lecture** | 12 | 135 | 376 | 31.3 | ~ 64.3 hours | +| **Documentary** | 5 | 12 | 114 | 22.8 | ~ 28.5 hours | +| **Entertainment**| 5 | 17 | 112 | 22.4 | ~ 41.9 hours | +| **All** | 22 | 164 | 602 | 27.4 | ~ 134.6 hours | + +### Process LongerVideos with VideoRAG + +First, we need to download all the videos in 720p resolution using the `yt-dlp` library. You can use the following commands to download all the videos in LongerVideos: + +```shell +cd longervideos +python prepare_videos.py # create collection folders +sh download.sh # downloading videos +``` + +Then, you can run the following example command to process and answer queries for LongerVideos with VideoRAG: + +```shell +# Please enter your openai_key in line 18 at first +python videorag_experiment.py --collection 4-rag-lecture --cuda 0 +``` + +### Evaluation + +We conduct win-rate comparisons as well as quantitative comparisons with RAG-based baselines and long-context video understanding methods separately. **NaiveRAG, GraphRAG and LightRAG** are implemented using the `nano-graphrag` library, which is consistent with our VideoRAG, ensuring a fair comparison. + +In this part, we directly provided the **answers from all the methods** (including VideoRAG) as well as the evaluation codes for experiment reproduction. Please utilize the following commands to download the answers: + +```shell +cd reproduce +wget https://archive.org/download/videorag/all_answers.zip +unzip all_answers +``` + +#### Win-Rate Comparison + +We conduct the win-rate comparison with RAG-based baselines. To reproduce the results, please follow these steps: + +```shell +cd reproduce/winrate_comparison + +# First Step: Upload the batch request to OpenAI (remember to enter your key in the file, same for the following steps). +python batch_winrate_eval_upload.py + +# Second Step: Download the results. Please enter the batch ID and then the output file ID in the file. Generally, you need to run this twice: first to obtain the output file ID, and then to download it. +python batch_winrate_eval_download.py + +# Third Step: Parsing the results. Please the output file ID in the file. +python batch_winrate_eval_download.py + +# Fourth Step: Calculate the results. Please enter the parsed result file name in the file. +python batch_winrate_eval_calculate.py + +``` + +#### Quantitative Comparison + +We conduct a quantitative comparison, which extends the win-rate comparison by assigning a 5-point score to long-context video understanding methods. We use the answers from NaiveRAG as the baseline response for scoring each query. To reproduce the results, please follow these steps: + +```shell +cd reproduce/quantitative_comparison + +# First Step: Upload the batch request to OpenAI (remember to enter your key in the file, same for the following steps). +python batch_winrate_quant_upload.py + +# Second Step: Download the results. Please enter the batch ID and then the output file ID in the file. Generally, you need to run this twice: first to obtain the output file ID, and then to download it. +python batch_winrate_quant_download.py + +# Third Step: Parsing the results. Please the output file ID in the file. +python batch_winrate_quant_download.py + +# Fourth Step: Calculate the results. Please enter the parsed result file name in the file. +python batch_winrate_quant_calculate.py +``` + +### Acknowledgement +You may refer to related work that serves as foundations for our framework and code repository, +[nano-graphrag](https://github.com/gusye1234/nano-graphrag) and [LightRAG](https://github.com/HKUDS/LightRAG). Thanks for their wonderful works. + +**Thank you for your interest in our work!** \ No newline at end of file diff --git a/VideoRAG.png b/VideoRAG.png new file mode 100644 index 0000000..b2c6f70 Binary files /dev/null and b/VideoRAG.png differ diff --git a/VideoRAG_cover.png b/VideoRAG_cover.png new file mode 100644 index 0000000..17b5a51 Binary files /dev/null and b/VideoRAG_cover.png differ diff --git a/longervideos/dataset.json b/longervideos/dataset.json new file mode 100755 index 0000000..9c51493 --- /dev/null +++ b/longervideos/dataset.json @@ -0,0 +1,2794 @@ +{ + "0": [ + { + "video_url": [ + "https://www.youtube.com/watch?v=lyd5Q77qHKA" + ], + "description": "fights-in-animal-kingdom", + "questions": [ + { + "id": 0, + "question": "What prompts the otters to engage with the caiman?" + }, + { + "id": 1, + "question": "How do chimpanzees communicate and coordinate their roles as drivers, blockers, and ambushers?" + }, + { + "id": 2, + "question": "Do chimpanzees have a designated leader who dictates the strategy, or is it a more fluid process based on individual initiative and cues from the environment?" + }, + { + "id": 3, + "question": "What factors ultimately determined the outcome of dramatic encounter between a group of wolves and muskox?" + }, + { + "id": 4, + "question": "How do hippos, known for their aggressive nature, manage to integrate a new calf into the group without causing harm?" + }, + { + "id": 5, + "question": "What are the ecological and evolutionary factors that have driven the development of the Japanese honeybees' heat-balling technique?" + }, + { + "id": 6, + "question": "What are the ecological and evolutionary factors that have driven the development of the killer whale's beaching behavior?" + }, + { + "id": 7, + "question": "Considering the vulnerability of freshly molted crabs, how do they determine the optimal timing for their mass molting event?" + }, + { + "id": 8, + "question": "The video showcases the incredible journey of caribou as they migrate across vast distances. What navigational cues do they use to maintain their course and reach their destination?" + }, + { + "id": 9, + "question": "Describe the dramatic fight between two male ibex competing for access to females at a waterhole." + }, + { + "id": 10, + "question": "Describe the challenges faced by mother bears securing salmon." + } + ], + "type": "documentary" + } + ], + "1": [ + { + "video_url": [ + "https://www.youtube.com/watch?v=7ZhdXgRfxHI" + ], + "description": "nature-scenes", + "questions": [ + { + "id": 0, + "question": "How do flying fish evade predators like the dorado and frigate birds?" + }, + { + "id": 1, + "question": "How do Darwin's bark spiders construct their webs, and what makes their silk so special?" + }, + { + "id": 2, + "question": "What unusual behavior do vampire finches exhibit on the Galapagos Islands, and what are some possible explanations for this behavior?" + }, + { + "id": 3, + "question": "How do poison dart frogs care for their young?" + }, + { + "id": 4, + "question": "What challenges do Humboldt penguins face when trying to reach the sea for food in Peru, and how do they overcome these challenges?" + }, + { + "id": 5, + "question": "How do army ants coordinate their hunting efforts and overcome obstacles in their path?" + }, + { + "id": 6, + "question": "How do the giant cuttlefish of Australia reproduce?" + }, + { + "id": 7, + "question": "What challenges do red crabs face during their annual migration on Christmas Island?" + }, + { + "id": 8, + "question": "How do the red-legged seriema birds manage to avoid predators like the jaguar and the harpy eagle?" + }, + { + "id": 9, + "question": "How do the hunting strategies of arctic wolves and cheetahs differ when pursuing caribou?" + }, + { + "id": 10, + "question": "How do the reproductive strategies of male and female giant cuttlefish differ?" + }, + { + "id": 11, + "question": "How does the symbiotic relationship between saddleback clownfish and carpet anemones benefit both species?" + }, + { + "id": 12, + "question": "How have Eden's whales adapted their hunting behavior in response to declining fish populations due to agricultural pollution?" + }, + { + "id": 13, + "question": "What makes the silk of Darwin's bark spider unique, and how does this property relate to its web-building strategy?" + }, + { + "id": 14, + "question": "How does the Portuguese man-of-war capture and consume its prey?" + }, + { + "id": 15, + "question": "What is the nature of its relationship with the man-of-war fish?" + }, + { + "id": 16, + "question": "What challenges do puffin parents face when raising their chicks, and how have declining fish numbers impacted their ability to provide for their offspring?" + } + ], + "type": "documentary" + } + ], + "2": [ + { + "video_url": [ + "https://www.youtube.com/watch?v=5nwVzy2QJKM", + "https://www.youtube.com/watch?v=OlL9xwg5h5k", + "https://www.youtube.com/watch?v=O2AFzHERkDc", + "https://www.youtube.com/watch?v=A2vwQOaVg-E" + ], + "description": "climate-week-at-columbia-engineering", + "questions": [ + { + "id": 0, + "question": "What role does the spherical tokamak design play in Tokamak Energy LLC's approach to fusion?" + }, + { + "id": 1, + "question": "Why did the triple product in fusion research stop accelerating in the late 1990s/early 2000s?" + }, + { + "id": 2, + "question": "What is the potential impact of fusion for climate and space exploration?" + }, + { + "id": 3, + "question": "What is the Department of Energy's (DOE) Milestone program?" + }, + { + "id": 4, + "question": "How can symbolic regression be used to improve climate modeling?" + }, + { + "id": 5, + "question": "How can we improve the trustworthiness and interpretability of climate models that use machine learning?" + }, + { + "id": 6, + "question": "How does the Software Carbon Intensity (SCI) standard apply to AI?" + }, + { + "id": 7, + "question": "How do researchers at Columbia University work to close data gaps in air pollution monitoring?" + }, + { + "id": 8, + "question": "What is the idea behind the air sensors lab at Columbia's Lamont Doherty Earth Observatory?" + }, + { + "id": 9, + "question": "How do researchers calibrate low-cost air quality sensors to provide more accurate data?" + }, + { + "id": 10, + "question": "What are the challenges of transitioning from charcoal to LPG as a cooking fuel in Ghana, and what are the potential benefits?" + }, + { + "id": 11, + "question": "How does AI impact sustainable computing's energy demands?" + }, + { + "id": 12, + "question": "What are the main challenges hindering fusion energy commercialization?" + }, + { + "id": 13, + "question": "Outline the timeline for a fusion pilot plant, ideal vs. realistic." + }, + { + "id": 14, + "question": "What are the economic benefits and opportunities that could arise from successful commercialization of fusion energy?" + }, + { + "id": 15, + "question": "How does the spherical tokamak design differ from traditional tokamaks, and what potential advantages does it offer?" + }, + { + "id": 16, + "question": "What specific engineering challenges are currently hindering the progress of fusion energy development?" + }, + { + "id": 17, + "question": "What are some specific examples of how AI has already been successfully applied to improve climate modeling?" + }, + { + "id": 18, + "question": "How can AI be used to improve the accuracy and resolution of climate predictions, particularly at regional and local scales?" + }, + { + "id": 19, + "question": "What are some of the benchmark datasets and metrics being used to evaluate the performance of AI-based climate models?" + }, + { + "id": 20, + "question": "What are some of the techniques being explored to improve the energy efficiency of data centers, particularly in the context of growing AI workloads?" + }, + { + "id": 21, + "question": "What are some of the challenges in accurately measuring the carbon footprint of AI systems, and what initiatives are underway to address these challenges?" + }, + { + "id": 22, + "question": "What are some of the policy and regulatory options for mitigating the environmental impact of AI, and what are the trade-offs associated with different approaches?" + }, + { + "id": 23, + "question": "What are the challenges and opportunities in using low-cost air quality sensors to inform policy decisions and community action?" + }, + { + "id": 24, + "question": "What are the social and economic factors that influence the adoption of clean energy technologies in developing countries, and how can interventions be designed to address these barriers?" + }, + { + "id": 25, + "question": "What are the potential benefits of integrating social and economic considerations into building energy models and decarbonization plans?" + } + ], + "type": "lecture" + } + ], + "3": [ + { + "video_url": [ + "https://www.youtube.com/watch?v=YASXk7Pu8HA", + "https://www.youtube.com/watch?v=6SAn0qQJTrE", + "https://www.youtube.com/watch?v=z6-56Hotizo", + "https://www.youtube.com/watch?v=c3feO_Lkp5A", + "https://www.youtube.com/watch?v=cHOGc6vW4Pc", + "https://www.youtube.com/watch?v=FsawILngs9E", + "https://www.youtube.com/watch?v=zRjDebvjxaY", + "https://www.youtube.com/watch?v=pKCkCJhTHA8", + "https://www.youtube.com/watch?v=TmnRuIFn1iU", + "https://www.youtube.com/watch?v=_FLt-6AMbx8" + ], + "description": "black-myth-wukong", + "questions": [ + { + "id": 0, + "question": "What overarching themes of freedom, destiny, and morality emerge?" + }, + { + "id": 1, + "question": "What are the central conflicts and their resolutions in this story?" + }, + { + "id": 2, + "question": "How does the narrative explore the nature of power and its consequences?" + }, + { + "id": 3, + "question": "How does the game use visual storytelling to convey its themes?" + }, + { + "id": 4, + "question": "What specific in-game mechanics are described in Part 1?" + }, + { + "id": 5, + "question": "What gameplay mechanics are described in the excerpts?" + }, + { + "id": 6, + "question": "What are the strengths and weaknesses of the 'Tiger-Snake' enemies encountered in Chapter 5?" + }, + { + "id": 7, + "question": "What significant events occur in Black Wind Mountain?" + }, + { + "id": 8, + "question": "What are some of the challenges the player faces in mastering the combat system?" + }, + { + "id": 9, + "question": "What strategies can players employ to effectively counter the attacks of enemies that inflict frost status ailments?" + }, + { + "id": 10, + "question": "How does the 'Self Advance' system enhance the player's combat abilities?" + }, + { + "id": 11, + "question": "What are the distinct combat advantages of using the spear compared to the staff?" + }, + { + "id": 12, + "question": "What are the different stances or forms associated with the staff, and how do they affect the player's combat approach?" + }, + { + "id": 13, + "question": "What story does the mural in the Water Curtain Cave tell, and how does it relate to the events of the game?" + }, + { + "id": 14, + "question": "What is the significance of the statues depicting the 'Jade Emperor' in the game's world?" + }, + { + "id": 15, + "question": "What challenges does the player encounter when fighting enemies in the cave environments of Chapter 5?" + }, + { + "id": 16, + "question": "How does the camera behave differently during combat with certain enemies?" + }, + { + "id": 17, + "question": "What is the purpose of the 'Keeper Shrines' found throughout the game world?" + }, + { + "id": 18, + "question": "What notable environmental features are present in the 'Valley of Ecstasy'?" + }, + { + "id": 19, + "question": "What visually distinctive element guides the player through the environments?" + }, + { + "id": 20, + "question": "How does the player acquire new spells?" + }, + { + "id": 21, + "question": "What effect does the 'Knot of Voidness' item have on the player's abilities?" + }, + { + "id": 22, + "question": "What is unique about the staff the player receives as a reward from the character in the 'Valley of Ecstasy'?" + } + ], + "type": "entertainment" + } + ], + "4": [ + { + "video_url": [ + "https://www.youtube.com/watch?v=45LJT-bt500", + "https://www.youtube.com/watch?v=g21royNJ4fw", + "https://www.youtube.com/watch?v=kzP1sFynhxE", + "https://www.youtube.com/watch?v=oageL-1I0GE", + "https://www.youtube.com/watch?v=Hj7PuK1bMZU", + "https://www.youtube.com/watch?v=YPs4eGDpIY4", + "https://www.youtube.com/watch?v=6efwN_US-zk", + "https://www.youtube.com/watch?v=tmiBae2goJM", + "https://www.youtube.com/watch?v=w5WGbUGAE3s", + "https://www.youtube.com/watch?v=DI9Q60T_054", + "https://www.youtube.com/watch?v=UZg_xyIS9_E", + "https://www.youtube.com/watch?v=Fv_j52DDJUE", + "https://www.youtube.com/watch?v=SrXjuNRTOcI", + "https://www.youtube.com/watch?v=W-iUd_pjOQA", + "https://www.youtube.com/watch?v=rhJJynv47Pw", + "https://www.youtube.com/watch?v=QQAkXHRJcZg", + "https://www.youtube.com/watch?v=EwtoG-f1mLk", + "https://www.youtube.com/watch?v=Rg35oYuus-w", + "https://www.youtube.com/watch?v=vX3A96_F3FU" + ], + "description": "rag-lecture", + "questions": [ + { + "id": 0, + "question": "Describe the main differences between the two RAG systems mentioned in the video (text-based and vision-based)." + }, + { + "id": 1, + "question": "What are the advantages of vision-based RAG over text-based RAG?" + }, + { + "id": 2, + "question": "Comparative analysis of the advantages and disadvantages of ColPali and traditional RAG in PDF processing." + }, + { + "id": 3, + "question": "Explain Anthropic's prompt caching mechanism and its difference with Gemini context caching." + }, + { + "id": 4, + "question": "Describe the core difference between traditional RAG and Agentic RAG, highlighting the role of agents." + }, + { + "id": 5, + "question": "Explain the process of query refinement and iterative retrieval within the Agentic RAG framework." + }, + { + "id": 6, + "question": "Compare and contrast the functionality and performance of localGPT-Vision with traditional RAG pipelines." + }, + { + "id": 7, + "question": "Explain ColPali's vision-based approach to RAG." + }, + { + "id": 8, + "question": "How does ColPail improve document retrieval?" + }, + { + "id": 9, + "question": "How does ColPali improve PDF processing?" + }, + { + "id": 10, + "question": "How does LightRAG compare to GraphRAG in terms of cost and performance?" + }, + { + "id": 11, + "question": "Discuss the role of keyword-based search mechanisms (e.g., BM25) in improving RAG performance." + }, + { + "id": 12, + "question": "Discuss the limitations of RAG systems." + }, + { + "id": 13, + "question": "Discuss the Framework of GraphRAG." + }, + { + "id": 14, + "question": "How to build Multi-modal RAG System?" + }, + { + "id": 15, + "question": "What are the key components of the Local GPT Vision system?" + }, + { + "id": 16, + "question": "How does Gemini's ability to understand the visual layout of a PDF, such as the placement of figures and tables, impact its accuracy in extracting information like captions and reference lists compared to traditional RAG systems?" + }, + { + "id": 17, + "question": "When to use prompt caching?" + }, + { + "id": 18, + "question": "Discuss the relationships between ColPali and LocalGPT-Vision." + }, + { + "id": 19, + "question": "Using ColBERT as an example to explain the benefits of NotebookLM compared to the standard RAG architecture." + }, + { + "id": 20, + "question": "What are the key differences between Anthropic and Gemini's caching approaches?" + }, + { + "id": 21, + "question": "Compare and contrast Gemini Flash's PDF processing capabilities with those of GPT-4, highlighting specific examples from the transcript." + }, + { + "id": 22, + "question": "Discuss the economic implications of using Gemini Flash versus traditional RAG systems for PDF processing, considering file size." + }, + { + "id": 23, + "question": "Analyze Gemini Flash's multi-modal capabilities in handling PDFs containing images, tables, and text, citing specific examples." + }, + { + "id": 24, + "question": "What are the limitations of using vision-language models like Quin-2 for local multimodal RAG implementations, especially concerning resource requirements and model performance on complex documents." + }, + { + "id": 25, + "question": "How does the concept of 'late chunking' presented in the sources challenge traditional notions of chunking in RAG?" + }, + { + "id": 26, + "question": "How might the evolution of long-context models like Gemini Flash impact the future of RAG development, particularly in terms of balancing cost, efficiency, and retrieval accuracy?" + }, + { + "id": 27, + "question": "What are the potential ethical concerns of using a proprietary API like Gemini for PDF processing, compared to open-source RAG solutions? Consider factors like data privacy, vendor lock-in, and the transparency of model behavior." + }, + { + "id": 28, + "question": "Compare and contrast Anthropic's prompt caching with Google's context caching for cost and latency reduction." + }, + { + "id": 29, + "question": "What is prompt caching and how does it work?" + }, + { + "id": 30, + "question": "How does prompt caching compare to traditional RAG in terms of cost and efficiency?" + }, + { + "id": 31, + "question": "What are the limitations of prompt caching and when is traditional RAG still a better option?" + }, + { + "id": 32, + "question": "What are some advanced RAG techniques and how do they address the limitations of basic RAG?" + }, + { + "id": 33, + "question": "Discuss the role of keyword-based search mechanisms (e.g., BM25) in improving RAG performance." + }, + { + "id": 34, + "question": "How does Anthropic's contextual retrieval approach compare to other techniques like late chunking in long context embedding models?" + }, + { + "id": 35, + "question": "Discuss the chunking strategies in RAG." + }, + { + "id": 36, + "question": "What are the potential benefits and drawbacks of 'late chunking' in terms of retrieval effectiveness and computational cost?" + }, + { + "id": 37, + "question": "How does 'late chunking' enhance retrieval system accuracy?" + } + ], + "type": "lecture" + } + ], + "5": [ + { + "video_url": [ + "https://www.youtube.com/watch?v=KxBWU96zfBY", + "https://www.youtube.com/watch?v=KliTr9N0Ayw", + "https://www.youtube.com/watch?v=B8s-FyN4UeE", + "https://www.youtube.com/watch?v=KIvl-VY8H0Y", + "https://www.youtube.com/watch?v=ob45YmYD2KI", + "https://www.youtube.com/watch?v=ut8qStGS7YM", + "https://www.youtube.com/watch?v=Rcm1kh6HVdg", + "https://www.youtube.com/watch?v=I2F9H7mNJCI", + "https://www.youtube.com/watch?v=eLiMpEIRBzY", + "https://www.youtube.com/watch?v=nrW__jof8pg", + "https://www.youtube.com/watch?v=aRHl-XS6Za0", + "https://www.youtube.com/watch?v=fDErWDOT4XE", + "https://www.youtube.com/watch?v=ybau-0ZIsMc", + "https://www.youtube.com/watch?v=VwbBBcvsWZM", + "https://www.youtube.com/watch?v=a5OW5UAyC3E", + "https://www.youtube.com/watch?v=nTOMrRSJDJw", + "https://www.youtube.com/watch?v=dOgfxt6Usok", + "https://www.youtube.com/watch?v=od6AaKhKYmg", + "https://www.youtube.com/watch?v=CdWGS0VT9gQ", + "https://www.youtube.com/watch?v=T48MPzkzMAM", + "https://www.youtube.com/watch?v=_XOCAVsr3KU", + "https://www.youtube.com/watch?v=b-wk1ZJKl0s", + "https://www.youtube.com/watch?v=rpAtVIZB72U", + "https://www.youtube.com/watch?v=C54nXpmm-6c", + "https://www.youtube.com/watch?v=28w5uFiX-po", + "https://www.youtube.com/watch?v=Ra8n_9wnHFs", + "https://www.youtube.com/watch?v=gKxUqewzNSc", + "https://www.youtube.com/watch?v=ZQ7gpMVMaKQ", + "https://www.youtube.com/watch?v=-4JHQg1jubw", + "https://www.youtube.com/watch?v=Z6UI9r1y0_M", + "https://www.youtube.com/watch?v=8qFRdk_kZIo", + "https://www.youtube.com/watch?v=Oasl9rSJNds", + "https://www.youtube.com/watch?v=_1dS6ddf4uU", + "https://www.youtube.com/watch?v=LBih635lzps", + "https://www.youtube.com/watch?v=lnWrF-xcwq0", + "https://www.youtube.com/watch?v=KYvVl0UT1Sk", + "https://www.youtube.com/watch?v=t9fynSaqE5c", + "https://www.youtube.com/watch?v=p82aMGJJLU8", + "https://www.youtube.com/watch?v=M4JMfVZ7LPQ" + ], + "description": "ai-agent-lecture", + "questions": [ + { + "id": 0, + "question": "How does MemGPT integrate local open-source LLMs for processing?" + }, + { + "id": 1, + "question": "Discuss various methods for integrating local LLMs into agent frameworks." + }, + { + "id": 2, + "question": "Compare Mistral AI's and OpenAI's agent frameworks." + }, + { + "id": 3, + "question": "How does Mistral AI's agent builder function?" + }, + { + "id": 4, + "question": "What are the key features of AutoGen Studio?" + }, + { + "id": 5, + "question": "What are the performance trade-offs of local versus cloud LLMs in MemGPT?" + }, + { + "id": 6, + "question": "How does Anthropic's Claude 3 address LLM limitations?" + }, + { + "id": 7, + "question": "How does Anthropic's system prompt design differ from typical approaches?" + }, + { + "id": 8, + "question": "What methods are used to improve LLM retrieval accuracy?" + }, + { + "id": 9, + "question": "What are the advantages and disadvantages of local vs. cloud LLMs in AutoGen Studio?" + }, + { + "id": 10, + "question": "What technical challenges arise when using local LLMs with MemGPT?" + }, + { + "id": 11, + "question": "How do proprietary data and APIs impact GPT replicability?" + }, + { + "id": 12, + "question": "How are AI agents used in financial analysis?" + }, + { + "id": 13, + "question": "How do different AI agent frameworks compare?" + }, + { + "id": 14, + "question": "What are some examples of custom GPTs discussed?" + }, + { + "id": 15, + "question": "How do Claude 3's different models compare in their system prompts?" + }, + { + "id": 16, + "question": "What are the key features of Claude 3.5 Sonnet's system prompt?" + }, + { + "id": 17, + "question": "What methods are used by OpenAI's SWARM for multi-agent system orchestration?" + }, + { + "id": 18, + "question": "How does Mistral AI's agent builder simplify agent creation?" + }, + { + "id": 19, + "question": "What limitations exist in current agentic frameworks?" + }, + { + "id": 20, + "question": "How can users modify the output video generated by InVideo AI?" + }, + { + "id": 21, + "question": "What are the key differences between Consensus GPT and Scholar GPT?" + }, + { + "id": 22, + "question": "Describe the process of using sub-agents and a super agent to analyze financial reports in a PDF format." + }, + { + "id": 23, + "question": "How do XML tags enhance prompt engineering within Claude?" + }, + { + "id": 24, + "question": "How does Claude 3.5 Sonet handle citations, considering its inability to access external databases?" + }, + { + "id": 25, + "question": "What specific instructions are given to Claude regarding the identification of humans in images?" + }, + { + "id": 26, + "question": "How does the system prompt inform Claude 3.5 Sonet about the different models within the Claude family?" + }, + { + "id": 27, + "question": "How does Cursor enable users to incorporate external documentation into their projects?" + }, + { + "id": 28, + "question": "Explain the concept of 'Fast prompts' in Cursor and their limitations." + }, + { + "id": 29, + "question": "Describe the three stages of fine-tuning LLMs, including an optional step." + }, + { + "id": 30, + "question": "What are the three main supervised fine-tuning techniques and their performance implications?" + }, + { + "id": 31, + "question": "How does the WBY voice assistant leverage function calling to perform actions on behalf of the user?" + }, + { + "id": 32, + "question": "In the context of analyzing financial data, what specific task does the 'Opus' model perform in the agent workflow?" + }, + { + "id": 33, + "question": "What specific phrase does Anthropic's system prompt instruct Claude to avoid when starting responses?" + }, + { + "id": 34, + "question": "What is the primary goal of the open-source project DEVIKA?" + }, + { + "id": 35, + "question": "What is the purpose of 'preference alignment' in the process of fine-tuning LLMs?" + }, + { + "id": 36, + "question": "How does OpenAI's approach to prompt caching differ from the implementations of Google and Anthropic?" + }, + { + "id": 37, + "question": "What is the core concept behind 'model distillation' in the context of LLMs?" + }, + { + "id": 38, + "question": "What is the potential benefit of using the 'Graph RAG' approach over traditional 'RAG' systems, particularly when working with large or complex knowledge graphs?" + }, + { + "id": 39, + "question": "What is the main limitation of open-source LLMs in relation to running advanced agents like SWE-Agent?" + }, + { + "id": 40, + "question": "What are the key differences between traditional RAG and Graph RAG?" + }, + { + "id": 41, + "question": "Which model claims to surpass GPT-4 on benchmarks?" + }, + { + "id": 42, + "question": "What are the limitations of LLMs in reasoning tasks?" + }, + { + "id": 43, + "question": "How does Ollama facilitate local LLM usage in Graph RAG?" + }, + { + "id": 44, + "question": "What challenges arise when using smaller LLMs?" + } + ], + "type": "lecture" + } + ], + "6": [ + { + "video_url": [ + "https://www.youtube.com/watch?v=RkGK0MloK0E", + "https://www.youtube.com/watch?v=1s9zZ6ERAko", + "https://www.youtube.com/watch?v=RNqXpdwd9AA", + "https://www.youtube.com/watch?v=2zaJZ_F7Xrk" + ], + "description": "daubechies-wavelet-lecture", + "questions": [ + { + "id": 0, + "question": "What are the inherent limitations of time-frequency localization?" + }, + { + "id": 1, + "question": "How do different window functions impact time-frequency analysis?" + }, + { + "id": 2, + "question": "Explain Daubechies' 'Zak transform' and its purpose." + }, + { + "id": 3, + "question": "Discuss limitations of orthonormal bases in time-frequency analysis." + }, + { + "id": 4, + "question": "What mathematical structures underpin time-frequency representations?" + }, + { + "id": 5, + "question": "Describe the role of the Heisenberg group in time-frequency analysis." + }, + { + "id": 6, + "question": "Compare and contrast windowed Fourier transforms and wavelet transforms." + }, + { + "id": 7, + "question": "What is the significance of the parameter Ωτ in windowed Fourier transforms?" + }, + { + "id": 8, + "question": "Discuss the significance of using orthogonal windows in signal analysis." + }, + { + "id": 9, + "question": "Explain the 'no-go theorem' regarding orthonormal time-frequency bases." + }, + { + "id": 10, + "question": "Explain the significance of the product ωτ = 2π in time-frequency analysis." + }, + { + "id": 11, + "question": "How does the synchrosqueezing transform enhance signal analysis?" + }, + { + "id": 12, + "question": "How does Daubechies illustrate the effectiveness of synchrosqueezing in extracting fetal heartbeats from maternal ECGs?" + }, + { + "id": 13, + "question": "What insights can be gained from analyzing bird songs using synchrosqueezing?" + }, + { + "id": 14, + "question": "What is the specific medical application that Daubechies highlights for time-frequency analysis in the context of fetal health?" + }, + { + "id": 15, + "question": "What specific features of bird songs make them interesting for analysis using time-frequency methods? What biological questions are researchers hoping to address?" + }, + { + "id": 16, + "question": "How does Daubechies describe the appearance of a spectrogram for a signal with time-varying frequencies? What features indicate changes in frequency content over time?" + }, + { + "id": 17, + "question": "What specific time-frequency patterns does Daubechies describe as indicative of singularities in a signal?" + }, + { + "id": 18, + "question": "How does Daubechies motivate the use of 'interval statistics' in signal analysis by drawing an analogy to the human auditory system?" + }, + { + "id": 19, + "question": "In the context of the synchrosqueezing transform, Daubechies discusses the use of multi-tapering with Hermite functions. What specific example does she provide to illustrate the improvement achieved by this technique?" + }, + { + "id": 20, + "question": "Daubechies describes an analysis of a simulated signal with two cosine components and varying instantaneous frequencies. What specific observations does she make about the resulting time-frequency representation?" + }, + { + "id": 21, + "question": "What limitations does Daubechies acknowledge in the use of the cepstrum for extracting signal components?" + }, + { + "id": 22, + "question": "What future directions for research does Daubechies suggest in the context of analyzing animal vocalizations using time-frequency methods?" + }, + { + "id": 23, + "question": "Throughout the lectures, Daubechies emphasizes the importance of understanding the underlying signal model. How does this perspective guide the choice and interpretation of time-frequency analysis techniques?" + }, + { + "id": 24, + "question": "What general insights does Daubechies offer about the interplay between mathematical theory, computational methods, and real-world applications in the field of time-frequency analysis?" + } + ], + "type": "lecture" + } + ], + "7": [ + { + "video_url": [ + "https://www.youtube.com/watch?v=wfmUNIpMyvo", + "https://www.youtube.com/watch?v=2rfzNw2I26Y", + "https://www.youtube.com/watch?v=Z19uz6Bol3I", + "https://www.youtube.com/watch?v=4n1vNDFqwY8" + ], + "description": "daubechies-art-and-mathematics-lecture", + "questions": [ + { + "id": 0, + "question": "How do mathematical tools enhance art historical analysis?" + }, + { + "id": 1, + "question": "What novel applications of mathematics exist within art conservation?" + }, + { + "id": 2, + "question": "How did circular harmonics aid fresco reconstruction?" + }, + { + "id": 3, + "question": "How did x-ray fluorescence aid the analysis of Van Gogh's painting?" + }, + { + "id": 4, + "question": "How did mathematical techniques aid in reconstructing the Padua frescoes?" + }, + { + "id": 5, + "question": "Describe Ingrid Daubechies's collaboration with the North Carolina Museum of Art." + }, + { + "id": 6, + "question": "How did mathematics aid in reconstructing the Padua frescoes?" + }, + { + "id": 7, + "question": "What methods were used to virtually age and rejuvenate the Giotto altarpiece?" + }, + { + "id": 8, + "question": "How did Massimo Fonacier reconstruct damaged frescoes?" + }, + { + "id": 9, + "question": "What motivations did Ingrid Daubechies express for engaging in collaborations with art historians and conservators?" + }, + { + "id": 10, + "question": "Explain the role of human expertise in projects involving the application of mathematics to art conservation. Provide specific examples." + }, + { + "id": 11, + "question": "What mathematical concept, used to analyze signal frequency, did Daubechies employ to analyze variations in scale and detail within Van Gogh's brushstrokes?" + }, + { + "id": 12, + "question": "What specific mathematical tool, based on the unique property of predictable transformation under rotation, was central to the reconstruction of the Padua frescoes?" + }, + { + "id": 13, + "question": "How did the digitization process capture the visual information of the fresco fragments?" + }, + { + "id": 14, + "question": "Explain the mathematical challenge of identifying the correct rotation of fresco fragments. How did circular harmonics help overcome this obstacle?" + }, + { + "id": 15, + "question": "What was the 'Munt Lab' and what role did it play in the reconstruction project?" + }, + { + "id": 16, + "question": "How did Daubechies's team address the challenge of 'cradle' interference in X-ray images of panel paintings? What was the impact of this work?" + }, + { + "id": 17, + "question": "In addition to her work with the North Carolina Museum of Art, what other applications of mathematics to art conservation and scientific research did Daubechies discuss?" + }, + { + "id": 18, + "question": "How has Ingrid Daubechies's work influenced the practices of art conservators and historians?" + }, + { + "id": 19, + "question": "What concerns did the North Carolina Museum of Art (NCMA) initially have about the reconstruction of the Gisi Altarpiece?" + }, + { + "id": 20, + "question": "What was the broader significance of the Gisi Altarpiece project, beyond the physical reunification of the artwork?" + } + ], + "type": "lecture" + } + ], + "8": [ + { + "video_url": [ + "https://www.youtube.com/watch?v=-GHyXBcpOZI", + "https://www.youtube.com/watch?v=YG6Tv8aoAuM", + "https://www.youtube.com/watch?v=1xTTRCIWMlg", + "https://www.youtube.com/watch?v=K1DlToP60rc" + ], + "description": "tech-ceo-lecture", + "questions": [ + { + "id": 0, + "question": "What are the main research and development priorities for Saint-Gobain?" + }, + { + "id": 1, + "question": "What is Saint-Gobain's strategy for reducing their carbon footprint in the production of flat glass?" + }, + { + "id": 2, + "question": "How does the aco ustic quality of a classroom impact teachers?" + }, + { + "id": 3, + "question": "According to Rajesh Jain, what is the most common question entrepreneurs face?" + }, + { + "id": 4, + "question": "What are the two things that Rajesh Jain believes are required to build a strong company culture?" + }, + { + "id": 5, + "question": "How does Saint-Gobain address CO2 emissions in building materials?" + }, + { + "id": 6, + "question": "What environmental challenges does Saint-Gobain address in the built environment?" + }, + { + "id": 7, + "question": "How does Saint-Gobain's organizational structure aid its sustainability goals?" + }, + { + "id": 8, + "question": "What specific low-carbon initiatives has Saint-Gobain implemented?" + }, + { + "id": 9, + "question": "How does Saint-Gobain measure the impact of its sustainability initiatives?" + }, + { + "id": 10, + "question": "What are Saint-Gobain's main sustainability-focused strategies?" + }, + { + "id": 11, + "question": "How did Saint-Gobain reorganize to address sustainability challenges?" + }, + { + "id": 12, + "question": "What is Saint-Gobain's commitment regarding carbon neutrality?" + }, + { + "id": 13, + "question": "Explain the connection between Saint-Gobain's reorganization and its commitment to sustainability." + }, + { + "id": 14, + "question": "How does Saint-Gobain address the financial barriers homeowners face when considering energy-efficient renovations?" + }, + { + "id": 15, + "question": "Explain the concept of 'Tech' as a leadership principle at Saint-Gobain and provide an example of its implementation." + }, + { + "id": 16, + "question": "Describe Saint-Gobain's approach to engaging with and aligning its diverse workforce around sustainability goals." + }, + { + "id": 17, + "question": "Explain the 'Marshall Plan' proposed by Benoit Bazin and its intended impact on the building retrofit sector." + }, + { + "id": 18, + "question": "How does Saint-Gobain leverage the economic benefits of energy-efficient renovations to promote their adoption?" + }, + { + "id": 19, + "question": "Describe the key characteristics of a 'profcon' and how it differs from a traditional unicorn startup." + }, + { + "id": 20, + "question": "Explain Rajesh Jain's philosophy on the role of failure in the entrepreneurial journey." + }, + { + "id": 21, + "question": "How does Rajesh Jain recommend entrepreneurs identify and validate potential market opportunities?" + }, + { + "id": 22, + "question": "Explain the concept of 'curious listening' and its importance in an entrepreneurial context." + }, + { + "id": 23, + "question": "Describe the two essential elements Rajesh Jain believes are necessary for building a strong company culture." + }, + { + "id": 24, + "question": "Explain the '131' method for tackling problems and its benefits in delegating and developing leadership." + }, + { + "id": 25, + "question": "Explain the '$100 Delta decision rule' and its application in simplifying decision-making." + }, + { + "id": 26, + "question": "Describe the multidisciplinary approach employed by Exponent in conducting injury biomechanics investigations." + }, + { + "id": 27, + "question": "Explain how Exponent utilizes different types of evidence to reconstruct the mechanism of an injury." + }, + { + "id": 28, + "question": "Describe the role of Anthropomorphic Test Devices (ATDs) in injury biomechanics research and how they are used to assess injury risk." + }, + { + "id": 29, + "question": "Explain the concept of 'injury tolerance' and describe the various methods used to understand human tolerance limits." + }, + { + "id": 30, + "question": "How does Exponent address the challenge of studying injury tolerance in spines with pre-existing degenerative conditions?" + } + ], + "type": "lecture" + } + ], + "9": [ + { + "video_url": [ + "https://www.youtube.com/watch?v=NePAPGxZnmE", + "https://www.youtube.com/watch?v=rqR3LeR09gc", + "https://www.youtube.com/watch?v=52wo6uvhvaw", + "https://www.youtube.com/watch?v=ycfnKPxBMck", + "https://www.youtube.com/watch?v=dTzL8OF_3i0", + "https://www.youtube.com/watch?v=_pyuxT8DXXU", + "https://www.youtube.com/watch?v=1ww8RlFC8uI", + "https://www.youtube.com/watch?v=mWrivekFZMM", + "https://www.youtube.com/watch?v=Yn0KC8Pa0Xs" + ], + "description": "dspy-lecture", + "questions": [ + { + "id": 0, + "question": "How does DSPy address limitations of LangChain prompt templates?" + }, + { + "id": 1, + "question": "How can graph neural networks improve the accuracy and efficiency of retrieval augmented generation?" + }, + { + "id": 2, + "question": "What innovative approaches address the shortcomings of existing RAG systems' information retrieval?" + }, + { + "id": 3, + "question": "What limitations hinder current RAG systems' effectiveness in retrieving relevant information?" + }, + { + "id": 4, + "question": "What are the advantages of LangGraph over legacy LangChain for multi-agent systems?" + }, + { + "id": 5, + "question": "Describe Lang Graph's user interface and functionality." + }, + { + "id": 6, + "question": "How does DSPy improve incontext learning in LLMs?" + }, + { + "id": 7, + "question": "How does the concept of 'teleprompters' in DSPy relate to prompt engineering?" + }, + { + "id": 8, + "question": "Explain how signatures in DSPy differ from traditional hard-coded prompts" + }, + { + "id": 9, + "question": "How is the concept of bootstrapping used in DSPy to improve model performance?" + }, + { + "id": 10, + "question": "How can DSPy be used to automatically fine-tune a language model?" + }, + { + "id": 11, + "question": "In what ways does the Microsoft research on RAG and fine-tuning suggest a preference for their own products and services? " + }, + { + "id": 12, + "question": "Compare and contrast the 'demonstrate', 'search' and 'predict' phases of the DSP framework." + }, + { + "id": 13, + "question": "How does DSP leverage in-context learning without the need for retraining the LLM?" + }, + { + "id": 14, + "question": "Explain the concept of multi-hop searches within the DSP framework and its significance." + }, + { + "id": 15, + "question": "Describe how DSP was used to connect Newton's laws of motion to modern space exploration." + }, + { + "id": 16, + "question": "Illustrate how DSPy can be applied to solve an extreme multi-label classification problem." + }, + { + "id": 17, + "question": "Outline the 'infer-retrieve-rank' system proposed for extreme multi-label classification using DSPy." + }, + { + "id": 18, + "question": "Explain the limitations of using traditional vector embedding techniques in RAG systems for retrieving new research data." + }, + { + "id": 19, + "question": "How do state machines, state agents, and state spaces relate to AI and LLMs?" + }, + { + "id": 20, + "question": "Discuss the strengths and weaknesses of LangChain and LangGraph in the context of LLM interactions." + }, + { + "id": 21, + "question": "Explain why directed acyclic graphs are important in various fields, including AI." + }, + { + "id": 22, + "question": "Why did certain LLMs fail to answer the prompt about sending a seventh child to Stanford?" + }, + { + "id": 23, + "question": "Discuss the challenges LLMs face when encountering a task embedded in the middle of a long text." + }, + { + "id": 24, + "question": "Explain the concept of 'Lost in the Middle' in the context of LLMs processing long texts." + }, + { + "id": 25, + "question": "Explain the concepts of 'tree traversal' and 'collapsed tree' retrieval methods as described in the Raptor framework." + }, + { + "id": 26, + "question": "Discuss the potential benefits of incorporating graph-based analysis into RAG systems to enhance the identification of relevant information and improve reasoning capabilities." + }, + { + "id": 27, + "question": "How can the incorporation of graph neural networks (GNNs) into RAG systems improve information retrieval and overcome limitations of vector-based methods?" + }, + { + "id": 28, + "question": "Why is the retrieval of a large number of text passages crucial for accurate RAG performance, especially in specialized domains or when dealing with new knowledge?" + }, + { + "id": 29, + "question": "How does the Demonstrate-Search-Predict (DSP) methodology enhance the ability of Large Language Models (LLMs) to handle complex tasks?" + }, + { + "id": 30, + "question": "In what ways can graph theory be applied to optimize the performance of AI pipelines that involve both LLMs and retrieval models?" + }, + { + "id": 31, + "question": "Explain the concept of a 'stateful graph' as it relates to LangGraph and how it impacts the interaction of nodes within the graph." + }, + { + "id": 32, + "question": "What strategies can be used to address the limitations of LLMs in handling context lengths exceeding 2K tokens?" + }, + { + "id": 33, + "question": "How does DSPy, developed at Stanford, eliminate the reliance on hard-coded prompt templates in RAG systems?" + }, + { + "id": 34, + "question": "Describe how DSPy can be utilized to bootstrap synthetic, domain-specific data for enhancing LLM performance." + }, + { + "id": 35, + "question": "What strategies can be employed to mitigate the failure of AI systems in causal reasoning tasks?" + }, + { + "id": 36, + "question": "What are the primary reasons for the failure of certain LLMs in accurately processing text containing embedded tasks within longer documents?" + }, + { + "id": 37, + "question": "Explain the concept of 'extreme multi-label classification' (XMC) and provide a real-world example of its application." + } + ], + "type": "lecture" + } + ], + "10": [ + { + "video_url": [ + "https://www.youtube.com/watch?v=_YVQN6_nkfs", + "https://www.youtube.com/watch?v=rpDdQ0N2l50" + ], + "description": "trading-for-beginners", + "questions": [ + { + "id": 0, + "question": "What strategies and techniques enable consistent trading success?" + }, + { + "id": 1, + "question": "How can traders effectively manage risk and optimize returns?" + }, + { + "id": 2, + "question": "Describe the instructor's suggested timeframe for consistent profitability in trading." + }, + { + "id": 3, + "question": "How does the instructor differentiate between a 'buy limit' order and a 'buy stop' order?" + }, + { + "id": 4, + "question": "Why do some brokers have a negative reputation in the trading community?" + }, + { + "id": 5, + "question": "Which specific feature of Trade Nation provides a significant advantage?" + }, + { + "id": 6, + "question": "How to define the 'ranging market' in terms of price movement?" + }, + { + "id": 7, + "question": "What minimum IQ does the instructor suggest is beneficial for trading success?" + }, + { + "id": 8, + "question": "How to verify the order of price movements in a historical trade setup?" + }, + { + "id": 9, + "question": "How does the act of sharing one's trading journey with others benefit the learning process?" + }, + { + "id": 10, + "question": "How can observing the behavior of professional poker players provide insights into successful trading practices?" + }, + { + "id": 11, + "question": "In the context of trading, what is a 'doji' candle, and how is it significant?" + }, + { + "id": 12, + "question": "What does 'dumb donation' mean in the context of trading fees?" + }, + { + "id": 13, + "question": "Why is it considered unwise to solely rely on trading strategies obtained from external sources?" + }, + { + "id": 14, + "question": "Besides identifying bullish and bearish trends, what other market condition should traders consider before engaging in a trade setup?" + }, + { + "id": 15, + "question": "How does the length of a trade impact the potential accumulation of fees, such as rollover costs?" + }, + { + "id": 16, + "question": "How can traders avoid the potential for emotional distress and anxiety when interacting with other traders?" + }, + { + "id": 17, + "question": "How can historical trade data be utilized to assess the potential impact of increasing position size on an equity curve?" + }, + { + "id": 18, + "question": "In the context of trading, what is the significance of understanding the distinction between a 'B book' and an 'A book' in a brokerage's operations?" + }, + { + "id": 19, + "question": "What is the purpose of conducting a quarterly review of trading journal entries?" + }, + { + "id": 20, + "question": "How can a trader identify potential areas for improvement by analyzing the results of their backtesting?" + }, + { + "id": 21, + "question": "Beyond technical performance, what personal aspects should traders track in their journals for self-evaluation?" + }, + { + "id": 22, + "question": "How can a trading journal be used to assess the effectiveness of a chosen target placement strategy?" + } + ], + "type": "lecture" + } + ], + "11": [ + { + "video_url": [ + "https://www.youtube.com/watch?v=dYX809pLH00", + "https://www.youtube.com/watch?v=5gWAZV8KoEw", + "https://www.youtube.com/watch?v=0udZzgn1UcM" + ], + "description": "primetime-emmy-awards", + "questions": [ + { + "id": 0, + "question": "How do local news stations engage viewers and earn their trust?" + }, + { + "id": 1, + "question": "Why is the truth and factual reporting important, especially in local news?" + }, + { + "id": 2, + "question": "What challenges do local news journalists face in adapting to an ever-changing news environment?" + }, + { + "id": 3, + "question": "How do television programs reflect the diversity of Los Angeles?" + }, + { + "id": 4, + "question": "Why is it important for television programs to reflect the diversity of their audience?" + }, + { + "id": 5, + "question": "What are some of the ways that journalists can use their platform to bring attention to social causes?" + }, + { + "id": 6, + "question": "Analyze Giselle Fernandez's career highlights." + }, + { + "id": 7, + "question": "What recurring themes emerged across all Emmy ceremonies?" + }, + { + "id": 8, + "question": "How did the awards reflect L.A.'s diverse community?" + }, + { + "id": 9, + "question": "What challenges do journalists face while striving to report truthfully?" + }, + { + "id": 10, + "question": "How does the work of Amy J. Johnson ('A Doll Like Me') exemplify the kind of positivity Karapetyan discusses?" + }, + { + "id": 11, + "question": "What is the significance of being able to tell in-depth stories in a 'bite-sized' media environment?" + }, + { + "id": 12, + "question": "How do the anecdotes about people changing their schedules for KTLA stories illustrate the community's trust in the station?" + }, + { + "id": 13, + "question": "What challenges did John Vande Wege and his team encounter while covering food distributions during the pandemic?" + }, + { + "id": 14, + "question": "How do the comments from the cast of 'Martin' highlight the evolution of societal norms and humor from the 1990s to today?" + }, + { + "id": 15, + "question": "How do the comments from the cast of 'Cheers' illustrate the lasting impact of a successful television show on its actors?" + }, + { + "id": 16, + "question": "How does the acceptance speech on behalf of the writing team for 'Last Week Tonight' highlight the crucial role of various departments in television production?" + } + ], + "type": "entertainment" + } + ], + "12": [ + { + "video_url": [ + "https://www.youtube.com/watch?v=sArECbQ9kQQ" + ], + "description": "journey-through-china", + "questions": [ + { + "id": 0, + "question": "What cultural influences shaped Xinjiang's unique identity?" + }, + { + "id": 1, + "question": "How does the video portray the relationship between tourism and local culture in China?" + }, + { + "id": 2, + "question": "How did the vloggers' experience in Turpan differ from their expectations?" + }, + { + "id": 3, + "question": "How did the vloggers' experience in Chengdu differ from Xinjiang?" + }, + { + "id": 4, + "question": "What contrasts exist between China's urban and rural landscapes?" + }, + { + "id": 5, + "question": "How did the vloggers describe the atmosphere of Urumqi's Grand Bazaar?" + }, + { + "id": 6, + "question": "Describe the culinary scene in Ürümqi, focusing on the types of food, preparation methods, and local specialties observed in the video." + }, + { + "id": 7, + "question": "Compare the vloggers' experiences at the Grand Bazaar in Ürümqi and the local market in Yining" + }, + { + "id": 8, + "question": "How does the video portray the cultural diversity of Xinjiang province? Consider the languages spoken, clothing styles, religious practices, and architectural designs." + }, + { + "id": 9, + "question": "Explain the significance of the Flaming Mountains in Turpan, drawing on the legends and historical context provided in the video." + }, + { + "id": 10, + "question": "Discuss the challenges the vloggers faced while traveling independently in Xinjiang." + }, + { + "id": 11, + "question": "How do vloggers' culinary experiences shape their understanding of different regions and cultures?" + }, + { + "id": 12, + "question": "How does the video portray the relationship between tourism and local culture in China?" + }, + { + "id": 13, + "question": "Examine the impact of government policies on the landscapes and communities depicted in the video. Consider the development of infrastructure, preservation of historical sites, and the promotion of tourism." + }, + { + "id": 14, + "question": "How do vloggers' experiences in bustling cities like Chongqing and Guangzhou compare to those in smaller towns and villages?" + }, + { + "id": 15, + "question": "Compare the vloggers' experiences at the all-inclusive spa in Chengdu with their other accommodations in China" + }, + { + "id": 16, + "question": "Explain the significance of the People's Liberation Monument in Chongqing, based on the historical context provided in the video" + }, + { + "id": 17, + "question": "Discuss the architectural styles observed in Guangzhou, highlighting the city's colonial heritage and modern urban development." + }, + { + "id": 18, + "question": "How does the video portray the importance of family and tradition in Chinese culture?" + }, + { + "id": 19, + "question": "Compare the vloggers' experiences with street food in different parts of China. What regional variations did they observe?" + }, + { + "id": 20, + "question": "Explain the process of making 'nang', the grilled bread frequently observed in Xinjiang, detailing ingredients, tools, and cooking techniques." + }, + { + "id": 21, + "question": "Discuss the economic activities prevalent in the regions visited, highlighting agricultural practices, trade, and industries showcased in the video." + }, + { + "id": 22, + "question": "Compare the transportation infrastructure in Xinjiang and Sichuan." + }, + { + "id": 23, + "question": "Examine the significance of tea in Chinese culture, focusing on the types of tea consumed, social contexts, and rituals surrounding tea preparation and consumption." + }, + { + "id": 24, + "question": "How does the video portray the relationship between urban and rural life in China?" + }, + { + "id": 25, + "question": "Explain the historical and cultural significance of the Bezeklik Thousand Buddha Caves in Turpan." + }, + { + "id": 26, + "question": "Discuss the vloggers' reflections on the importance of preserving cultural heritage." + } + ], + "type": "entertainment" + } + ], + "13": [ + { + "video_url": [ + "https://www.youtube.com/watch?v=39uFM-1HrG0" + ], + "description": "fia-awards", + "questions": [ + { + "id": 0, + "question": "How did technological advancements impact the racing season?" + }, + { + "id": 1, + "question": "Compare and contrast the driving styles of rally raid champions Nasser Al-Attiyah and Sébastien Loeb, based on their respective backgrounds in motorsports." + }, + { + "id": 2, + "question": "In what ways did the weather conditions at the final race of the FIA World Rallycross Championship affect the outcome?" + }, + { + "id": 3, + "question": "How did the integration of electric cars into the FIA World Rallycross Championship contribute to the excitement and competitiveness of the 2024 season?" + }, + { + "id": 4, + "question": "What factors might have led to the dominance of Toyota Gazoo Racing in both the FIA World Rally Raid Championship and the FIA World Endurance Championship?" + }, + { + "id": 5, + "question": "How do the emotions and pressures experienced by a rally driver, as described by Ott Tänak, compare to those of a Formula 1 driver?" + }, + { + "id": 6, + "question": "How has the FIA's approach to diversity and inclusivity in motorsports manifested in the 2024 season and awards ceremony?" + }, + { + "id": 7, + "question": "What specific measures has the FIA taken to reduce the cost of entry-level motorsports, such as karting and cross-car racing?" + }, + { + "id": 8, + "question": "Discuss the impact of social media and fan engagement on the FIA's selection process for the 'Action of the Year' awards." + }, + { + "id": 9, + "question": "How did the partnership between Jaguar and TCS racing contribute to their success in the 2024 Formula E World Championship?" + }, + { + "id": 10, + "question": "Analyze the factors that have contributed to the resurgence of McLaren as a dominant force in Formula 1 after a 26-year hiatus." + }, + { + "id": 11, + "question": "What role did technological innovations play in shaping the competitive landscape of the 2024 Formula 1 season?" + }, + { + "id": 12, + "question": "Assess the impact of the rivalry between Max Verstappen and Lando Norris on the dynamics of the 2024 Formula 1 season." + }, + { + "id": 13, + "question": "Compare and contrast the challenges faced by drivers in endurance racing, such as the 24 Hours of Le Mans, with those encountered in sprint races like Formula 1." + }, + { + "id": 14, + "question": "How does the FIA's recognition of Michelle Mouton's lifetime achievements highlight the evolving role of women in motorsports?" + }, + { + "id": 15, + "question": "What can we learn about the evolution of safety standards in motorsports from Michelle Mouton's reflections on her experiences as a rally driver in the 1980s?" + }, + { + "id": 16, + "question": "How does the FIA's support for grassroots motorsports initiatives, such as the one highlighted in Rwanda, contribute to the long-term sustainability and growth of the sport?" + }, + { + "id": 17, + "question": "Explain the significance of the FIA’s presence in Africa for the first time in its 120-year history, and what this signifies for the future of motorsports on the continent." + }, + { + "id": 18, + "question": "Discuss the unique challenges and rewards of competing in the Dakar Rally, as highlighted by the experiences of the winning co-driver, Edouard Boulanger." + }, + { + "id": 19, + "question": "How has the FIA World Rally Championship adapted to the changing landscape of motorsports, with the introduction of new events and the incorporation of hybrid technology?" + }, + { + "id": 20, + "question": "Analyze the competitive dynamics of the 2024 Formula 1 season, particularly the shifting balance of power between the established teams like Red Bull and Ferrari, and the resurgence of McLaren." + }, + { + "id": 21, + "question": "Discuss the impact of driver rivalries, such as the one between Max Verstappen and Lando Norris, on the excitement and drama of Formula 1 racing." + }, + { + "id": 22, + "question": "Explain the significance of the FIA's efforts to promote cost-effective grassroots motorsports programs, particularly in developing nations like Rwanda." + }, + { + "id": 23, + "question": "Describe the role of the FIA Drivers' Commission in advocating for the interests and safety of drivers across various motorsport disciplines." + }, + { + "id": 24, + "question": "Analyze the factors that contributed to the success of Toyota Gazoo Racing in both the FIA World Rally Raid Championship and the FIA World Endurance Championship, drawing on insights from team personnel and drivers." + }, + { + "id": 25, + "question": "Discuss the role of cultural performances, such as the one by Kenny Solheimano and Bukuru, in enriching the FIA Awards ceremony and showcasing the host country's artistic heritage." + }, + { + "id": 26, + "question": "How does the FIA Formula 2 Championship serve as a crucial stepping stone for aspiring Formula 1 drivers, based on the experiences of champions like Gabriele Bortoleto?" + } + ], + "type": "entertainment" + } + ], + "14": [ + { + "video_url": [ + "https://www.youtube.com/watch?v=w8Wt3K1DgDw", + "https://www.youtube.com/watch?v=b5FzjqAIT60", + "https://www.youtube.com/watch?v=S4WoxYG_yzE", + "https://www.youtube.com/watch?v=jienkuqNrJI", + "https://www.youtube.com/watch?v=cebyAzB50K4", + "https://www.youtube.com/watch?v=x3xrGMnJleI" + ], + "description": "education-united-nations", + "questions": [ + { + "id": 0, + "question": "How can technology revolutionize global education systems?" + }, + { + "id": 1, + "question": "What specific global figures on children's education are highlighted?" + }, + { + "id": 2, + "question": "How does Education Cannot Wait's model facilitate its reach?" + }, + { + "id": 3, + "question": "Analyze funding sources for 'Education Cannot Wait'." + }, + { + "id": 4, + "question": "What are the goals of the Transforming Education Summit?" + }, + { + "id": 5, + "question": "How has the LEGO Foundation contributed to efforts to improve education for children in crisis and emergency settings?" + }, + { + "id": 6, + "question": "In what ways can the redistribution of wealth contribute to achieving education goals by 2030?" + }, + { + "id": 7, + "question": "What specific challenges do girls in Afghanistan face in accessing secondary education, and how has Education Cannot Wait addressed these challenges?" + }, + { + "id": 8, + "question": "According to the speakers, what is the role of the Organization of Islamic States (OIC) in promoting education?" + }, + { + "id": 9, + "question": "How can digital connectivity be leveraged to improve education for all children, particularly those who lack access to traditional learning environments?" + }, + { + "id": 10, + "question": "What is the 'Giga initiative', and how does it aim to address the digital divide in education?" + }, + { + "id": 11, + "question": "How does the COVID-19 pandemic impact literacy rates globally, and what strategies are proposed to address learning loss?" + }, + { + "id": 12, + "question": "What actions can governments and teachers take to address the global shortage of teachers and improve the quality of teaching?" + }, + { + "id": 13, + "question": "How does the crisis in foundational learning affect education systems globally, and what steps can be taken to address it?" + }, + { + "id": 14, + "question": "How can education be used to address social issues such as gender inequality and HIV/AIDS?" + }, + { + "id": 15, + "question": "What are some examples of the challenges faced by young girls in accessing education due to the climate crisis?" + }, + { + "id": 16, + "question": "How can non-governmental organizations (NGOs) and grassroots organizations effectively reach marginalized populations with agendas like the SDGs, particularly in low- and middle-income countries (LMICs)?" + }, + { + "id": 17, + "question": "What are some of the arguments for and against paid internships within the United Nations?" + }, + { + "id": 18, + "question": "What evidence is presented in the videos to demonstrate progress in girls' education since Malala Yousafzai began advocating for this issue?" + }, + { + "id": 19, + "question": "Assess the UN's response to Afghan girls' education restrictions." + }, + { + "id": 20, + "question": "Analyze the role of the United Nations in coordinating efforts to achieve Sustainable Development Goal 4 (SDG4)." + }, + { + "id": 21, + "question": "Compare and contrast the perspectives of world leaders and young people on the key challenges facing education today." + }, + { + "id": 22, + "question": "Explain the significance of the 'Youth Declaration' developed as part of the Transforming Education Summit." + }, + { + "id": 23, + "question": "Illustrate how Education Cannot Wait bridges the gap between humanitarian aid and development assistance in crisis-affected contexts." + }, + { + "id": 24, + "question": "Discuss the role of digital technology in promoting access to quality education, particularly for marginalized learners." + }, + { + "id": 25, + "question": "Assess the potential of innovative financing mechanisms, such as the International Finance Facility for Education, in addressing the global education funding gap." + }, + { + "id": 26, + "question": "Explain how education systems can be made more resilient to shocks and crises, such as the COVID-19 pandemic and climate change." + }, + { + "id": 27, + "question": "Elaborate on the concept of 'transformational teaching' and its role in fostering student leadership and empowerment." + }, + { + "id": 28, + "question": "Describe the challenges faced by teachers in conflict-affected areas and how these challenges can be addressed." + }, + { + "id": 29, + "question": "Explain how education can contribute to promoting peace and social cohesion in societies affected by conflict and division." + }, + { + "id": 30, + "question": "Illustrate how education can empower girls and women and break down gender stereotypes." + }, + { + "id": 31, + "question": "Discuss the importance of intercultural dialogue and understanding in promoting quality education for all learners." + }, + { + "id": 32, + "question": "Explain how education can contribute to addressing the climate crisis and promoting sustainable development." + }, + { + "id": 33, + "question": "Analyze the role of parents and families in supporting children's education and fostering lifelong learning." + }, + { + "id": 34, + "question": "Discuss the importance of investing in early childhood education and its long-term impact on individual and societal development." + }, + { + "id": 35, + "question": "Explain how education can contribute to creating decent work opportunities and promoting economic growth." + }, + { + "id": 36, + "question": "Discuss the role of research and innovation in driving progress towards achieving quality education for all." + }, + { + "id": 37, + "question": "Describe the challenges faced by children with disabilities in accessing quality education and how these challenges can be overcome." + }, + { + "id": 38, + "question": "Explain how education can contribute to promoting global citizenship and a sense of shared responsibility for the future of the planet." + } + ], + "type": "documentary" + } + ], + "15": [ + { + "video_url": [ + "https://www.youtube.com/watch?v=h-WGw4BypDY", + "https://www.youtube.com/watch?v=YI1dXSw17Y8" + ], + "description": "game-awards", + "questions": [ + { + "id": 0, + "question": "How has the role of the Game Awards evolved over the past 10 years, according to Hideo Kojima?" + }, + { + "id": 1, + "question": "How does the success of 'League of Legends' and its adaptation, 'Arcane', reflect the growing influence of multimedia franchises in the gaming industry?" + }, + { + "id": 2, + "question": "Are there any recurring trends or patterns in the types of games that have been nominated for Game of the Year over the past decade?" + }, + { + "id": 3, + "question": "What insights does the story of Amir Sapot, the TGA Game Changer, provide about the challenges and resilience within the game development community?" + }, + { + "id": 4, + "question": "Does the announcement of 'The Witcher 4' confirm or refute previous leaks and rumors regarding the game's development and protagonist? " + }, + { + "id": 5, + "question": "Based on the trailers showcased, are there any noticeable advancements in game graphics or technology compared to previous years?" + }, + { + "id": 6, + "question": "Given the host's reaction to the announcement of 'Elden Ring: Nights', how does the game's development potentially impact expectations within the gaming community?" + }, + { + "id": 7, + "question": "Considering the variety of games showcased, how does the Game Awards reflect the diversity of gaming experiences and player preferences?" + }, + { + "id": 8, + "question": "Based on the games nominated for Best Ongoing Game, what factors contribute to a game maintaining player engagement and longevity?" + }, + { + "id": 9, + "question": "What does the inclusion of a performance inspired by 'Arcane' indicate about the relationship between gaming and other forms of media, such as television?" + }, + { + "id": 10, + "question": "How do the acceptance speeches of developers like Nicolas Doucet ('Astrobot') highlight the collaborative nature of game development?" + }, + { + "id": 11, + "question": "Judging from the trailers and announcements, what upcoming games are likely to generate significant discussion or anticipation among players?" + }, + { + "id": 12, + "question": "Based on the comments made by developers, how do accessibility features factor into the design philosophy of modern games?" + }, + { + "id": 13, + "question": "Does the success of games like 'Batro' shed light on the evolving role and impact of indie game developers within the gaming industry?" + }, + { + "id": 14, + "question": "Given the emphasis on online connectivity and multiplayer experiences, what role does community play in shaping the gaming landscape?" + }, + { + "id": 15, + "question": "How do the announcements of mobile adaptations of popular franchises like 'Monster Hunter' reflect the changing platforms and accessibility of gaming?" + }, + { + "id": 16, + "question": "Considering the presence of figures like Snoop Dogg and Christopher Tin, how does the Game Awards highlight the intersection of gaming with music and other artistic disciplines? " + }, + { + "id": 17, + "question": "What major trends defined the 2024 Game Awards?" + } + ], + "type": "entertainment" + } + ], + "16": [ + { + "video_url": [ + "https://www.youtube.com/watch?v=hjRxht__2wI", + "https://www.youtube.com/watch?v=HwkyhIiG8V0", + "https://www.youtube.com/watch?v=v7UDNvciQ38", + "https://www.youtube.com/watch?v=cstrfin0bYo", + "https://www.youtube.com/watch?v=1ZqR-wGKHGQ", + "https://www.youtube.com/watch?v=JLs5iQrkVng", + "https://www.youtube.com/watch?v=HuXhtV6Yi-k", + "https://www.youtube.com/watch?v=SbCmu7SeXCQ", + "https://www.youtube.com/watch?v=Msaer6b6QU8", + "https://www.youtube.com/watch?v=AUJeIG3Gjis", + "https://www.youtube.com/watch?v=W_ijgMs7wNg" + ], + "description": "ahp-superdecision", + "questions": [ + { + "id": 0, + "question": "What are the benefits of using a ratings model over pairwise comparisons in AHP, and when is it particularly useful?" + }, + { + "id": 1, + "question": "How can an AHP model be converted into an ANP model in Super Decisions, and what additional judgments are required?" + }, + { + "id": 2, + "question": "How can the results of AHP analysis be visualized in Super Decisions, including the priorities of alternatives and criteria?" + }, + { + "id": 3, + "question": "How is the unweighted supermatrix used in AHP, and how is it generated in Super Decisions?" + }, + { + "id": 4, + "question": "How can the global priorities of alternatives be calculated in AHP using Super Decisions?" + }, + { + "id": 5, + "question": "How can the inconsistency ratio be improved in Super Decisions software?" + }, + { + "id": 6, + "question": "How can sub-criteria be incorporated into the AHP hierarchy using Super Decisions software?" + }, + { + "id": 7, + "question": "Why is absolute measurement preferred over relative measurement when dealing with a large number of alternatives in AHP?" + }, + { + "id": 8, + "question": "How are ratings and intensities used in the absolute measurement process of AHP?" + }, + { + "id": 9, + "question": "In SuperDecisions software, what steps are involved in performing sensitivity analysis for an AHP model?" + }, + { + "id": 10, + "question": "Describe the two phases involved in implementing absolute measurement in SuperDecisions software." + }, + { + "id": 11, + "question": "What are the advantages of using the ratings mode in SuperDecisions software for absolute measurement?" + }, + { + "id": 12, + "question": "Explain the process of creating an AHP hierarchy in SuperDecisions software, including defining the goal, criteria, and alternatives." + }, + { + "id": 13, + "question": "How to establish connections between different elements in an AHP hierarchy within SuperDecisions?" + }, + { + "id": 14, + "question": "What are pairwise comparisons in AHP, and how are they conducted in SuperDecisions software?" + }, + { + "id": 15, + "question": "What is the significance of the consistency ratio (CR) in AHP, and how is it addressed in SuperDecisions?" + }, + { + "id": 16, + "question": "Explain the process of synthesizing an AHP model in SuperDecisions to obtain global priorities for alternatives." + }, + { + "id": 17, + "question": "What are the considerations and benefits of using a ratings model in SuperDecisions compared to a pairwise comparison approach?" + }, + { + "id": 18, + "question": "How to define and utilize rating scales in SuperDecisions when implementing a ratings model? " + }, + { + "id": 19, + "question": "What are the different visualization options available in SuperDecisions for displaying sensitivity analysis results?" + }, + { + "id": 20, + "question": "Describe the steps involved in connecting criteria and sub-criteria to alternatives in SuperDecisions, ensuring proper linkage in the hierarchy." + }, + { + "id": 21, + "question": "How can the 'inconsistency report' feature in SuperDecisions be used to identify and rectify inconsistencies in pairwise comparison judgments? " + }, + { + "id": 22, + "question": "How does the process of synthesizing a model with sub-criteria differ from synthesizing a model without sub-criteria in SuperDecisions?" + }, + { + "id": 23, + "question": "When converting an AHP model to an ANP model, what new connections need to be added, and how do these connections reflect the concept of interdependence?" + } + ], + "type": "lecture" + } + ], + "17": [ + { + "video_url": [ + "https://www.youtube.com/watch?v=EDJqyU_hl1M", + "https://www.youtube.com/watch?v=zWj1WXn4rrg", + "https://www.youtube.com/watch?v=z5g_KcCZVco", + "https://www.youtube.com/watch?v=CbeYwlo-Fvc" + ], + "description": "decision-making-science", + "questions": [ + { + "id": 0, + "question": "How does the Decision Lens software improve upon traditional AHP methods?" + }, + { + "id": 1, + "question": "How has the definition of a 'powerful' company shifted from the 20th century to the 21st century?" + }, + { + "id": 2, + "question": "How does the concept of 'trust' play a role in the decision to move from on-premise solutions to the cloud?" + }, + { + "id": 3, + "question": "How does Microsoft's approach to data privacy differ in China, Germany, and the United States?" + }, + { + "id": 4, + "question": "How does Decision Lens software address the challenge of planning in silos?" + }, + { + "id": 5, + "question": "Describe how the concept of 'agility' can be interpreted differently by various stakeholders and why clear terminology is crucial in developing AHP models." + }, + { + "id": 6, + "question": "How does the concept of 'disrupting the economics of an attack' help organizations prioritize security investments?" + }, + { + "id": 7, + "question": "Describe how John Yokim's early experiences with computers at the University of Pittsburgh shaped his career trajectory." + }, + { + "id": 8, + "question": "In the context of the shift from product-based to service-based models, how does the peloton example showcase the changing dynamics of customer engagement and value creation?" + }, + { + "id": 9, + "question": "Illustrate how Decision Lens software helped the United Network for Organ Sharing (UNOS) make more equitable and effective organ allocation decisions." + }, + { + "id": 10, + "question": "Explain how the Defense Health Agency balances the well-being of individual warfighters with the overall mission effectiveness of the Department of Defense when making resource allocation decisions." + }, + { + "id": 11, + "question": "Discuss the criteria used by the National Park Service in prioritizing environmental cleanup projects." + }, + { + "id": 12, + "question": "Illustrate how the '7Rs' framework presented by Gartner can guide decision-making for established companies considering technology upgrades or replacements." + }, + { + "id": 13, + "question": "Describe how Decision Lens has been used by professional sports teams to improve player evaluation and selection, particularly in identifying and quantifying intangible attributes." + }, + { + "id": 14, + "question": "How does Decision Lens software facilitate collaboration and consensus-building among stakeholders with diverse perspectives and priorities during the planning process?" + }, + { + "id": 15, + "question": "Discuss the challenges of quantifying the value of security investments and how organizations can demonstrate the return on investment (ROI) for cybersecurity measures that prevent incidents rather than just respond to them." + }, + { + "id": 16, + "question": "Explain Dan Menicucci's perspective on cybersecurity threats." + }, + { + "id": 17, + "question": "Explain how the transition from episodic to continuous planning is changing the way organizations use tools like Decision Lens and the AHP. " + }, + { + "id": 18, + "question": "Considering the example of the U.S. Navy, describe how a geopolitical event like the conflict in Ukraine can impact an organization's planning priorities and resource allocation." + }, + { + "id": 19, + "question": "How does Decision Lens software address the challenges of planning in silos, and why is integrated planning critical for organizations like the U.S. Navy?" + }, + { + "id": 20, + "question": "Describe the role of the Analytic Hierarchy Process (AHP) in helping organizations like the state of Washington make difficult resource allocation decisions, particularly in the context of emerging industries or policy changes." + }, + { + "id": 21, + "question": "How did the United Network for Organ Sharing (UNOS) leverage the AHP and Decision Lens software to reduce the number of deaths on organ transplant waiting lists?" + }, + { + "id": 22, + "question": "Explain the importance of considering post-transplant outcomes, such as patient survival rates and the avoidance of 'futile transplants', in organ allocation decisions." + }, + { + "id": 23, + "question": "Describe how Decision Lens software enables organizations to perform resource optimization, taking into account both strategic value and cost constraints to identify the best mix of investments." + }, + { + "id": 24, + "question": "Explain how the availability of advanced analytics and decision-making tools like Decision Lens is transforming scouting and player selection in professional sports." + }, + { + "id": 25, + "question": "Discuss the impact of small differences in performance data, such as a tenth of a second in a 40-yard dash, on player evaluation and strategic decisions in professional sports." + } + ], + "type": "lecture" + } + ], + "18": [ + { + "video_url": [ + "https://www.youtube.com/watch?v=Kbk9BiPhm7o" + ], + "description": "elon-musk", + "questions": [ + { + "id": 0, + "question": "What ethical considerations arise from Neuralink's advancements?" + }, + { + "id": 1, + "question": "How does slowing down the pace of communication affect one's perception?" + }, + { + "id": 2, + "question": "Describe the metaphorical 'tech tree' that Elon Musk uses to explain Neuralink's development stages?" + }, + { + "id": 3, + "question": "What specific sensory enhancements does Elon Musk envision Neuralink providing, drawing parallels to science fiction?" + }, + { + "id": 4, + "question": "What potential application of Neuralink relates to memory restoration, and how does it connect to the human experience of remembering?" + }, + { + "id": 5, + "question": "What is the observation regarding the activity levels of most neurons, and what comparison is made to concepts in astrophysics?" + }, + { + "id": 6, + "question": "What motivates Elon Musk's endeavors, and how does this connect to his vision for Grok and SpaceX?" + }, + { + "id": 7, + "question": "What industry does Elon Musk compare the production of humanoid robots to in terms of global capacity?" + }, + { + "id": 8, + "question": "What is the mission of xAI and Grok?" + }, + { + "id": 9, + "question": "What are the first several years of Neuralink focused on solving?" + }, + { + "id": 10, + "question": "What vision enhancement capabilities does Musk envision for Neuralink?" + }, + { + "id": 11, + "question": "What, according to Musk, is the most important thing his biological neural net comes up with?" + }, + { + "id": 12, + "question": "What is DJ Seo’s area of expertise at Neuralink?" + } + ], + "type": "documentary" + } + ], + "19": [ + { + "video_url": [ + "https://www.youtube.com/watch?v=s71nJQqzYRQ", + "https://www.youtube.com/watch?v=DcWqzZ3I2cY", + "https://www.youtube.com/watch?v=zN1PyNwjHpc" + ], + "description": "jeff-bezos", + "questions": [ + { + "id": 0, + "question": "What shaped Jeff Bezos's problem-solving approach?" + }, + { + "id": 1, + "question": "What is Bezos's 'Day One' philosophy?" + }, + { + "id": 2, + "question": "What are Bezos's key principles for effective leadership?" + }, + { + "id": 3, + "question": "How does Bezos envision humanity's future in space?" + }, + { + "id": 4, + "question": "What motivated Bezos's decision regarding the Washington Post's endorsements?" + }, + { + "id": 5, + "question": "How has Bezos's experience as a physicist informed his approach to business and innovation?" + }, + { + "id": 6, + "question": "Can Bezos's philanthropic endeavors be seen as an extension of his business philosophy?" + }, + { + "id": 7, + "question": "How does Bezos balance intuition with data-driven decision-making?" + }, + { + "id": 8, + "question": "In what ways does Bezos's leadership style encourage a culture of truth-seeking at Amazon?" + }, + { + "id": 9, + "question": "How does Bezos's perspective on regulation affect his outlook on the role of government in fostering innovation? " + }, + { + "id": 10, + "question": "How has the internet impacted the traditional business model of newspapers, and how has The Washington Post adapted? " + }, + { + "id": 11, + "question": "What are the key technical challenges and innovations involved in developing reusable rockets like New Glenn?" + }, + { + "id": 12, + "question": "What are the advantages and disadvantages of using the moon as a base for further space exploration?" + }, + { + "id": 13, + "question": "Describe Bezos's approach to customer service and how it relates to his overall business strategy." + }, + { + "id": 14, + "question": "How does Bezos's personal experience with The Washington Post's coverage inform his views on the role of media in society?" + }, + { + "id": 15, + "question": "What are the potential implications of large language models becoming more specialized and domain-specific?" + }, + { + "id": 16, + "question": "Describe the process of developing and refining a 'crisp document' for meetings at Amazon." + }, + { + "id": 17, + "question": "What are the key factors that contribute to Bezos's belief that Blue Origin will be his most successful business venture?" + }, + { + "id": 18, + "question": "How does Bezos reconcile the potential benefits and risks of artificial intelligence?" + }, + { + "id": 19, + "question": "Describe the 'overview effect' and its impact on Bezos's perspective on space exploration." + }, + { + "id": 20, + "question": "How does Bezos's concept of 'messy meetings' foster innovation and effective problem-solving?" + }, + { + "id": 21, + "question": "Describe Bezos's rationale for not negotiating the purchase price of The Washington Post" + }, + { + "id": 22, + "question": "Explain the concept of 'truth-telling' in the context of Amazon's corporate culture." + }, + { + "id": 23, + "question": "How does Bezos's childhood experience in Montessori schools influence his views on education?" + }, + { + "id": 24, + "question": "Compare and contrast Bezos's leadership styles at Amazon and Blue Origin." + }, + { + "id": 25, + "question": "How does Bezos view the relationship between regulation and innovation?" + }, + { + "id": 26, + "question": "Explain the significance of the 'escape system' in the design of New Shepard." + }, + { + "id": 27, + "question": "How does Bezos's concept of 'one-way doors' and 'two-way doors' guide his decision-making process?" + }, + { + "id": 28, + "question": "How does Bezos's view of human nature influence his leadership style and business decisions?" + }, + { + "id": 29, + "question": "Explain the role of 'wandering' in Bezos's thinking and problem-solving process." + }, + { + "id": 30, + "question": "Describe Bezos's approach to managing his time and energy across multiple ventures." + }, + { + "id": 31, + "question": "How does Bezos's investment philosophy reflect his belief in the importance of supporting bold, unconventional ideas?" + }, + { + "id": 32, + "question": "How does Bezos's view of competition shape his approach to business and innovation?" + }, + { + "id": 33, + "question": "Describe Bezos's belief in the importance of developing the 'next generation of leaders'." + } + ], + "type": "documentary" + } + ], + "20": [ + { + "video_url": [ + "https://www.youtube.com/watch?v=iBfQTnA2n2s", + "https://www.youtube.com/watch?v=yCIYS9fx56U", + "https://www.youtube.com/watch?v=2jKVx2vyZOY", + "https://www.youtube.com/watch?v=qZ0ImE41pVs", + "https://www.youtube.com/watch?v=mBhkD0iFf4w", + "https://www.youtube.com/watch?v=NIQDnWlwYyQ", + "https://www.youtube.com/watch?v=FcB97h3vrzk", + "https://www.youtube.com/watch?v=OzgNJJ2ErEE", + "https://www.youtube.com/watch?v=14leJ1fg4Pw", + "https://www.youtube.com/watch?v=LWa6OHeNK3s", + "https://www.youtube.com/watch?v=g_qxoznfa7E", + "https://www.youtube.com/watch?v=SKBG1sqdyIU" + ], + "description": "12-days-of-openai", + "questions": [ + { + "id": 0, + "question": "What advancements does OpenAI's o1 model offer compared to its predecessors?" + }, + { + "id": 1, + "question": "How does OpenAI's Canvas tool enhance ChatGPT's capabilities?" + }, + { + "id": 2, + "question": "How does the ChatGPT Pro tier enhance user experience and capabilities?" + }, + { + "id": 3, + "question": "What are the future development plans for o1 and the ChatGPT platform?" + }, + { + "id": 4, + "question": "Why is linking feedback to specific parts of writing challenging without Canvas?" + }, + { + "id": 5, + "question": "Explain how code execution is made possible within Canvas." + }, + { + "id": 6, + "question": "Evaluate the new features added to ChatGPT's search function." + }, + { + "id": 7, + "question": "Describe the primary function and benefits of the 'Explore' feature in Sora." + }, + { + "id": 8, + "question": "Explain the key reasons why video is an important development area for OpenAI" + }, + { + "id": 9, + "question": "Compare and contrast the 'Remix' and 'Recut' editing tools within Sora." + }, + { + "id": 10, + "question": "How does the concept of 'presets' enhance the creative process in Sora?" + }, + { + "id": 11, + "question": "Detail the process of using the 'Storyboard' feature to direct a multi-action video." + }, + { + "id": 12, + "question": "How does Sora facilitate the creation of looping videos?" + }, + { + "id": 13, + "question": "Illustrate the process of using an image as the starting point for video generation in Sora." + }, + { + "id": 14, + "question": "How does ChatGPT's knowledge of context benefit users when using the Search feature?" + }, + { + "id": 15, + "question": "Explain the advantages of using ChatGPT Search as a default search engine in a web browser." + }, + { + "id": 16, + "question": "Explain how the integration of search enhances the Advanced Voice Mode in ChatGPT." + }, + { + "id": 17, + "question": "Describe the benefits of using Advanced Voice Mode compared to traditional text-based interactions with ChatGPT." + }, + { + "id": 18, + "question": "How does the o1 model handle ambiguity in problem descriptions, such as unspecified parameters?" + }, + { + "id": 19, + "question": "Compare the performance of o1, o1 Preview, and o1 Mini on the protein identification task described by Justin Ree." + }, + { + "id": 20, + "question": "Explain the purpose and functionality of 'graders' in the context of reinforcement fine-tuning." + }, + { + "id": 21, + "question": "Describe how Thompson Reuters utilized reinforcement fine-tuning to enhance their Co-Counsel AI tool." + }, + { + "id": 22, + "question": "Why is the limited number of examples needed for reinforcement fine-tuning significant?" + }, + { + "id": 23, + "question": "What new features are included in ChatGPT's iOS integration?" + }, + { + "id": 24, + "question": "Describe the improvements to ChatGPT's search function." + }, + { + "id": 25, + "question": "In what ways could the ability to adjust o1's reasoning effort benefit developers working with limited computational resources?" + }, + { + "id": 26, + "question": "How might preference fine-tuning change the way developers approach customizing language models for specific applications?" + }, + { + "id": 27, + "question": "What challenges might OpenAI face in expanding global access to advanced features while addressing ethical and regulatory concerns?" + }, + { + "id": 28, + "question": "Describe the potential benefits of integrating WebRTC support into the Realtime API for developers creating real-time audio experiences." + }, + { + "id": 29, + "question": "Discuss the implications of Sora's video generation capabilities for the future of content creation and storytelling." + }, + { + "id": 30, + "question": "How might the introduction of 'storyboard' in Sora change the way users approach video creation and editing?" + }, + { + "id": 31, + "question": "How might the reduced cost of audio tokens for GPT-4 and GPT-4 Mini in the Realtime API influence the adoption of voice-based AI applications?" + }, + { + "id": 32, + "question": "Explain the role of 'blend' in Sora and its implications for combining different video elements to create unique visual experiences." + }, + { + "id": 33, + "question": "Explain the benefits of using projects in ChatGPT for organizing conversations and incorporating external data sources." + }, + { + "id": 34, + "question": "Discuss the role of community feedback and the 'explore' feature in Sora in shaping the future development and creative applications of the platform." + } + ], + "type": "lecture" + } + ], + "21": [ + { + "video_url": [ + "https://www.youtube.com/watch?v=PUPO2tTyPOo", + "https://www.youtube.com/watch?v=vU2S6dVf79M", + "https://www.youtube.com/watch?v=10FCv-gCKug", + "https://www.youtube.com/watch?v=FHXmiAvloUg", + "https://www.youtube.com/watch?v=VJ6bK81meu8", + "https://www.youtube.com/watch?v=oAsJVZlDOgQ", + "https://www.youtube.com/watch?v=zdcCD--IieY", + "https://www.youtube.com/watch?v=f5Qr8xUeSH4", + "https://www.youtube.com/watch?v=-m7wHrOD1o8", + "https://www.youtube.com/watch?v=aJGdt9q7sS0", + "https://www.youtube.com/watch?v=bMWXXPoDnDs", + "https://www.youtube.com/watch?v=bEld-pRTsO8", + "https://www.youtube.com/watch?v=64Oy7pWEZIA", + "https://www.youtube.com/watch?v=JjVvYDPVrAQ", + "https://www.youtube.com/watch?v=mJfHMlKL7Qc", + "https://www.youtube.com/watch?v=Jv_e6Rt4vWE", + "https://www.youtube.com/watch?v=L8UsPlT0nAA", + "https://www.youtube.com/watch?v=KCoBVe-3Rp8", + "https://www.youtube.com/watch?v=y7wMTwJN7rA", + "https://www.youtube.com/watch?v=4ZqJSfV4818", + "https://www.youtube.com/watch?v=mUEFwUU0IfE", + "https://www.youtube.com/watch?v=byPbxEH5V8E", + "https://www.youtube.com/watch?v=QNFDNVf8lCA" + ], + "description": "autogen", + "questions": [ + { + "id": 0, + "question": "How does AutoGen manage diverse LLM configurations and agent types?" + }, + { + "id": 1, + "question": "What are AutoGen's key capabilities beyond multi-agent frameworks?" + }, + { + "id": 2, + "question": "How does AutoGen facilitate efficient caching and performance tuning?" + }, + { + "id": 3, + "question": "How does AutoGen integrate with local LLMs and open-source models?" + }, + { + "id": 4, + "question": "How does AutoGen enable complex LLM-based workflows using multi-agent conversations?" + }, + { + "id": 5, + "question": "How can AutoGen agents be customized and allow human participation?" + }, + { + "id": 6, + "question": "In what ways does AutoGen navigate the imperfect generation and reasoning abilities of LLMs?" + }, + { + "id": 7, + "question": "How does AutoGen simplify and unify the implementation of complex LLM workflows?" + }, + { + "id": 8, + "question": "Describe how AutoGen leverages a hierarchical agent setup to solve complex problems such as multi-agent coding scenarios or supply chain optimization." + }, + { + "id": 9, + "question": "How can users access bots running in specific roles to obtain desired outputs?" + }, + { + "id": 10, + "question": "What is the role of Beautiful Soup in web scraping?" + }, + { + "id": 11, + "question": "Why is the ability to run AutoGen models locally or with open source models significant?" + }, + { + "id": 12, + "question": "What are the key philosophies of AutoGen in simplifying complex workflows as automated agent chats?" + }, + { + "id": 13, + "question": "How is the Amadeus flight server utilized in an AutoGen workflow?" + }, + { + "id": 14, + "question": "How is data schema created and used in Neon within the context of an AutoGen workflow?" + }, + { + "id": 15, + "question": "Explain the concept of text-to-SQL conversion within an AutoGen agent workflow." + }, + { + "id": 16, + "question": "What are some challenges and considerations in orchestrating the behavior of multiple large language model-powered agents?" + }, + { + "id": 17, + "question": "Describe the roles and responsibilities of different agents (User Proxy, Analyst, Senior Analyst) within an AutoGen workflow." + }, + { + "id": 18, + "question": "How does the senior analyst agent achieve managerial oversight within an AutoGen workflow?" + }, + { + "id": 19, + "question": "Explain the purpose and structure of the messages.py file within an AutoGen project." + }, + { + "id": 20, + "question": "Explain how a user proxy agent functions within AutoGen." + }, + { + "id": 21, + "question": "Describe the process of connecting MemGPT, AutoGen, and local LLMs using RunPods." + }, + { + "id": 22, + "question": "Explain the significance of the 'context' parameter within LLM configurations in AutoGen." + }, + { + "id": 23, + "question": "Describe the purpose of using a 'seed' value for caching in AutoGen." + }, + { + "id": 24, + "question": "Explain the relationship between AutoGen, agent teams, and real-world use cases." + }, + { + "id": 25, + "question": "How can the concept of 'flipping the script' be applied to understanding the evolution of AI agents?" + }, + { + "id": 26, + "question": "Describe how one might create a simple snake game using AutoGen." + }, + { + "id": 27, + "question": "How does AutoGen facilitate the testing and improvement of a basic snake game?" + }, + { + "id": 28, + "question": "Explain the process of extracting facts and knowledge about a user from their past emails to improve AI assistant responses." + }, + { + "id": 29, + "question": "How does one create a knowledge retrieval system for an AI assistant using past email data?" + }, + { + "id": 30, + "question": "What is the role of lead qualification in an AI-powered email assistant, and how is it implemented?" + }, + { + "id": 31, + "question": "Describe the procedure for using past email data to train a GPT model for improved email responses." + }, + { + "id": 32, + "question": "What are the potential benefits of utilizing a multi-agent framework like AutoGen for cryptocurrency analysis?" + }, + { + "id": 33, + "question": "Explain the process of integrating a MemGPT agent into an AutoGen framework for enhanced memory capabilities." + }, + { + "id": 34, + "question": "How does the use of local LLMs potentially reduce the cost of running AI agents?" + }, + { + "id": 35, + "question": "Describe the steps involved in setting up a local LLM environment using RunPods and Text Generation Web UI." + }, + { + "id": 36, + "question": "Explain the significance of function calling capabilities in small open-source LLMs when used with AutoGen." + }, + { + "id": 37, + "question": "Discuss the challenges faced when using small open-source LLMs with less than 13 billion parameters for multi-agent systems." + }, + { + "id": 38, + "question": "Explain how prompt engineering techniques can be adapted for use in a multi-agent system like AutoGen." + }, + { + "id": 39, + "question": "How can AutoGen be utilized to create a SaaS AI product, such as a customer survey application?" + }, + { + "id": 40, + "question": "What are the advantages of AutoGen's design philosophy?" + }, + { + "id": 41, + "question": "How is AutoGen's caching mechanism implemented?" + }, + { + "id": 42, + "question": "Describe AutoGen's use in building a SaaS product." + }, + { + "id": 43, + "question": "Explain the differences in prompting strategies when designing agents for tasks such as code generation versus creative writing in AutoGen." + } + ], + "type": "lecture" + } + ] +} \ No newline at end of file diff --git a/longervideos/download.sh b/longervideos/download.sh new file mode 100755 index 0000000..1cd158e --- /dev/null +++ b/longervideos/download.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +set -ex + +courses="0-fights-in-animal-kingdom 1-nature-scenes 2-climate-week-at-columbia-engineering 3-black-myth-wukong 4-rag-lecture 5-ai-agent-lecture 6-daubechies-wavelet-lecture 7-daubechies-art-and-mathematics-lecture 8-tech-ceo-lecture 9-dspy-lecture 10-trading-for-beginners 11-primetime-emmy-awards 12-journey-through-china 13-fia-awards 14-education-united-nations 15-game-awards 16-ahp-superdecision 17-decision-making-science 18-elon-musk 19-jeff-bezos 20-12-days-of-openai 21-autogen" + +for course in $courses; do + mkdir -p ./$course/videos + yt-dlp -o "%(id)s.%(ext)s" -S "res:720" -a "./$course/videos.txt" -P "./$course/videos" >> ./download_log.txt 2>&1 + wait +done \ No newline at end of file diff --git a/longervideos/prepare_data.py b/longervideos/prepare_data.py new file mode 100644 index 0000000..2a0a55d --- /dev/null +++ b/longervideos/prepare_data.py @@ -0,0 +1,18 @@ +import os +import json + +with open('./dataset.json', 'rb') as f: + longervideos = json.load(f) + +collections = [] +for _id in longervideos: + collection = longervideos[_id][0] + collection_name = f"{_id}-{collection['description']}" + collections.append(collection_name) + os.makedirs(os.path.join(collection_name, 'videos'), exist_ok=True) + with open(os.path.join(collection_name, 'videos.txt'), 'w') as f: + for i in range(len(collection['video_url'])): + _url = collection['video_url'][i] + f.write(f'{_url}') + if i != len(collection['video_url']) - 1: + f.write(f'\n') \ No newline at end of file diff --git a/longervideos/videorag_experiment.py b/longervideos/videorag_experiment.py new file mode 100644 index 0000000..bd7e564 --- /dev/null +++ b/longervideos/videorag_experiment.py @@ -0,0 +1,55 @@ +import os +import json +import logging +import warnings +import multiprocessing + +warnings.filterwarnings("ignore") +logging.getLogger("httpx").setLevel(logging.WARNING) + +import argparse +parser = argparse.ArgumentParser(description="Set sub-category and CUDA device.") +parser.add_argument('--collection', type=str, default='4-rag-lecture') +parser.add_argument('--cuda', type=str, default='0') +args = parser.parse_args() +sub_category = args.sub_category + +os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda +os.environ["OPENAI_API_KEY"] = "" + +from videorag._llm import * +from videorag import VideoRAG, QueryParam + +if __name__ == '__main__': + multiprocessing.set_start_method('spawn') + + ## learn + video_base_path = f'./{sub_category}/videos/' + video_files = sorted(os.listdir(video_base_path)) + video_paths = [os.path.join(video_base_path, f) for f in video_files] + videorag = VideoRAG(cheap_model_func=gpt_4o_mini_complete, best_model_func=gpt_4o_mini_complete, working_dir=f"./videorag-workdir/{sub_category}") + videorag.insert_video(video_path_list=video_paths) + + ## inference + with open(f'./dataset.json', 'r') as f: + longervideos = json.load(f) + + videorag = VideoRAG(cheap_model_func=gpt_4o_mini_complete, best_model_func=gpt_4o_mini_complete, working_dir=f"./videorag-workdir/{sub_category}") + videorag.load_caption_model(debug=False) + + answer_folder = f'./videorag-answers/{sub_category}' + os.makedirs(answer_folder, exist_ok=True) + + collection_id = sub_category.split('-')[0] + querys = longervideos[collection_id][0]['questions'] + for i in range(len(querys)): + query_id = querys[i]['id'] + query = querys[i]['question'] + param = QueryParam(mode="videorag") + param.wo_reference = True + print("Query: ", query) + + response = videorag.query(query=query, param=param) + print(response) + with open(os.path.join(answer_folder, f'/answer_{query_id}.md'), 'w') as f: + f.write(response) \ No newline at end of file diff --git a/reproduce/quantitative_comparison/batch_quant_eval_calculate.py b/reproduce/quantitative_comparison/batch_quant_eval_calculate.py new file mode 100755 index 0000000..9dcec1a --- /dev/null +++ b/reproduce/quantitative_comparison/batch_quant_eval_calculate.py @@ -0,0 +1,100 @@ +import os +import json +import random +import numpy as np +from tqdm import tqdm +from copy import deepcopy + +baseline_model = 'naiverag' +evaluate_model = [ + 'llamavid', + 'videoagent', + 'notebooklm', + 'videorag' +] + +metrics = ['Comprehensiveness', 'Empowerment', 'Trustworthiness', 'Depth', 'Density', 'Overall Score'] + +base_dir = 'overall_comparison_video_understanding' +# Please enter the parsed result files ending with .json below. +result_file = [ + '', + '', + '', + '', + '' +] + +domain_list = ['lecture', 'documentary', 'entertainment'] + +with open('../../longervideos/dataset.json', 'r') as f: + all_data = json.load(f) + +overall_score = {} +for _model in evaluate_model: + overall_score[_model] = {} + for _metric in metrics: + overall_score[_model][_metric] = [] + +category_domain_dict = {} +for category_id in all_data: + _domain = all_data[category_id][0]['type'] + category_domain_dict[category_id] = _domain + +domain_score = {} +for domain in domain_list: + domain_score[domain] = {} + for _model in evaluate_model: + domain_score[domain][_model] = {} + for _metric in metrics: + domain_score[domain][_model][_metric] = [] + +query_count = 0 +for category_id in tqdm(all_data): + category = f"{category_id}-{all_data[category_id][0]['description']}" + querys = all_data[category_id][0]['questions'] + query_count += len(querys) + + score = {} + for _model in evaluate_model: + score[_model] = {} + for _metric in metrics: + score[_model][_metric] = [] + + for _file in result_file: + result_path = f'./batch_requests/{base_dir}/{_file}' + with open(result_path, 'r') as f: + results = json.loads(f.read()) + + for i in range(len(querys)): + for _model in evaluate_model: + query_id = querys[i]['id'] + evaluation_result = results[f'{category}++query{query_id}++base++answers-{baseline_model}++evaluate++answers-{_model}'] + + for _metric in metrics: + _metric_score = evaluation_result[_metric]['Score'] + score[_model][_metric].append(_metric_score) + overall_score[_model][_metric].append(_metric_score) + domain_score[category_domain_dict[category_id]][_model][_metric].append(_metric_score) + +with open(f'batch_requests/{base_dir}/{base_dir}.txt', 'a') as f: + print(query_count) + f.write(f'{query_count}\n') + for _model in evaluate_model: + print(_model) + f.write(_model + '\n') + for _domain in domain_list: + print(_domain) + f.write(_domain + '\n') + for _metric in metrics: + print(f'{np.array(domain_score[_domain][_model][_metric]).mean():.2f}', _metric) + f.write(f'{np.array(domain_score[_domain][_model][_metric]).mean():.2f} {_metric}\n') + print('----') + f.write('----\n') + print('All') + f.write('All\n') + for _metric in metrics: + print(f'{np.array(overall_score[_model][_metric]).mean():.2f}', _metric) + f.write(f'{np.array(overall_score[_model][_metric]).mean():.2f} {_metric}\n') + print('====' * 8) + f.write('====' * 8 + '\n') \ No newline at end of file diff --git a/reproduce/quantitative_comparison/batch_quant_eval_download.py b/reproduce/quantitative_comparison/batch_quant_eval_download.py new file mode 100755 index 0000000..1a066a2 --- /dev/null +++ b/reproduce/quantitative_comparison/batch_quant_eval_download.py @@ -0,0 +1,53 @@ +import os +os.environ["OPENAI_API_KEY"] = "" +import re +import time +import json +import jsonlines +import tiktoken + +from tqdm import tqdm +from openai import OpenAI + +client = OpenAI() + +def obtain_ouput_file_id(batches): + for batch in batches: + print(client.batches.retrieve(batch)) + print(client.batches.retrieve(batch).output_file_id) + +def download_result(result_files, base_dir): + for _file in result_files: + content = client.files.content(_file).content + with open(f"batch_requests/{base_dir}/{_file}.temp", "wb") as f: + f.write(content) + results = [] + with open(f"batch_requests/{base_dir}/{_file}.temp", 'r') as f: + for line in tqdm(f): + json_object = json.loads(line.strip()) + results.append(json_object) + with open(f"batch_requests/{base_dir}/{_file}.json", "w") as json_file: + json.dump(results, json_file, indent=4) + os.remove(f"batch_requests/{base_dir}/{_file}.temp") + +# ================================ + +# Please enter the relevant batch ID here to obtain the output file ID. +batches = [ + '', + '', + '', + '', + '' +] +obtain_ouput_file_id(batches) + +# Second Step: Please enter the output file ID below to download the output files. +# result_files = [ +# '', +# '', +# '', +# '', +# '' +# ] +# download_result(result_files, base_dir='overall_comparison_video_understanding') \ No newline at end of file diff --git a/reproduce/quantitative_comparison/batch_quant_eval_parse.py b/reproduce/quantitative_comparison/batch_quant_eval_parse.py new file mode 100755 index 0000000..80a9732 --- /dev/null +++ b/reproduce/quantitative_comparison/batch_quant_eval_parse.py @@ -0,0 +1,88 @@ +import os +os.environ["OPENAI_API_KEY"] = "" +import time +import json +import threading +from tqdm import tqdm +from openai import OpenAI +from setproctitle import setproctitle + +base_dir = 'overall_comparison_video_understanding' +# The JSON file contains the batch of requests created when the batch request was uploaded. +request_file = '' +# Please enter the output file ID below, which corresponds to the downloaded output files. +result_files = [ + '', + '', + '', + '', + '' +] + +setproctitle(f"parse-result-{base_dir}") +print(f"Start parsing result files in {base_dir}...") + +def check_response_valid(data): + valid_keys = ['Comprehensiveness', 'Empowerment', 'Trustworthiness', 'Depth', 'Density', 'Overall Score'] + assert len(data) == 6 + assert set(list(data.keys())) == set(valid_keys) + for _key in valid_keys: + assert data[_key]["Score"] in [1, 2, 3, 4, 5] + assert "Explanation" in list(data[_key].keys()) + +def process_file(_file, request_dict): + client = OpenAI() + with open(f'batch_requests/{base_dir}/{_file}.json', 'r') as f: + data = json.load(f) + assert len(data) == len(request_dict) + parse_results = {} + for i in range(len(data)): + dp = data[i] + custom_id = dp["custom_id"] + try: + json_data = json.loads(dp["response"]["body"]["choices"][0]["message"]["content"]) + check_response_valid(json_data) + parse_results[custom_id] = json_data + except Exception as e: + print(f"{_file} ({i}/{len(data)}) Find error when parsing {custom_id} ({e}), re-request OpenAI") + while True: + try: + response = client.chat.completions.create( + model=request_dict[custom_id]["model"], + messages=request_dict[custom_id]["messages"], + response_format=request_dict[custom_id]["response_format"] + ) + json_data = json.loads(response.choices[0].message.content) + check_response_valid(json_data) + parse_results[custom_id] = json_data + print(f"{_file} ({i}/{len(data)}) success re-request!") + time.sleep(1) + break + except Exception as e: + print(f"{_file} ({i}/{len(data)}) {e}") + print(f"{_file} ({i}/{len(data)}) continue re-request OpenAI") + continue + with open(f'batch_requests/{base_dir}/{_file}-parse-result.json', 'w') as f: + json.dump(parse_results, f, indent=4, ensure_ascii=False) + +request_dict = {} +with open(f'batch_requests/{base_dir}/{request_file}', 'r') as f: + for _line in f.readlines(): + json_data = json.loads(_line) + request_dict[json_data["custom_id"]] = { + "model": json_data["body"]["model"], + "messages": json_data["body"]["messages"], + "response_format": json_data["body"]["response_format"] + } + +thread_list = [] +for _file in result_files: + thread = threading.Thread(target=process_file, args=(_file, request_dict)) + thread_list.append(thread) + +for thread in thread_list: + thread.setDaemon(True) + thread.start() + +for thread in thread_list: + thread.join() diff --git a/reproduce/quantitative_comparison/batch_quant_eval_upload.py b/reproduce/quantitative_comparison/batch_quant_eval_upload.py new file mode 100755 index 0000000..a5de8e9 --- /dev/null +++ b/reproduce/quantitative_comparison/batch_quant_eval_upload.py @@ -0,0 +1,172 @@ +import os +os.environ["OPENAI_API_KEY"] = "" +import re +import time +import json +import jsonlines +import tiktoken +import itertools +from pydantic import BaseModel, Field +from typing import Literal + +from tqdm import tqdm +from openai import OpenAI +from openai.lib._pydantic import to_strict_json_schema +from openai.lib._parsing._completions import type_to_response_format_param + +encoding = tiktoken.encoding_for_model('gpt-4o-mini') + +sys_prompt = """ +---Role--- +You are an expert evaluating an answer against a baseline answer based on these criteria: **Comprehensiveness**, **Empowerment**, **Trustworthiness**, **Depth** and **Density**. +""" + +prompt = """ +You are an expert evaluating an answer against a baseline answer based on these criteria: **Comprehensiveness**, **Empowerment**, **Trustworthiness**, **Depth** and **Density**. + +- **Comprehensiveness**: How much detail does the answer provide to cover all aspects and details of the question? +- **Empowerment**: How well does the answer help the reader understand and make informed judgments about the topic? +- **Trustworthiness**: Does the answer provide sufficient detail and align with common knowledge, enhancing its credibility? +- **Depth**: Does the answer provide in-depth analysis or details, rather than just superficial information? +- **Density**: Does the answer contain relevant information without less informative or redundant content? + +For the evaluated answer labeled "Evaluation Answer," assign a score from 1 to 5 for each criterion compared to the baseline answer labeled "Baseline Answer." Then, assign an overall score based on these criteria. +The evaluation scores are defined as follows: +- 1: Strongly worse than the baseline answer +- 2: Weakly worse than the baseline answer +- 3: Moderate compared to the baseline answer +- 4: Weakly better than the baseline answer +- 5: Strongly better than the baseline answer + + +Here is the question: +{query} + +Here are the answers: + +**Baseline Answer:** +{baseline_answer} + +**Evaluation Answer:** +{evaluation_answer} + + +Evaluate the answer using the criteria listed above and provide detailed explanations for the scores. + +Output your evaluation in the following JSON format: + +{{ + "Comprehensiveness": {{ + "Score": "[1 - 5]", + "Explanation": "[Provide explanation here]" + }}, + "Empowerment": {{ + "Score": "[1 - 5]", + "Explanation": "[Provide explanation here]" + }}, + "Trustworthiness": {{ + "Score": "[1 - 5]", + "Explanation": "[Provide explanation here]" + }}, + "Depth": {{ + "Score": "[1 - 5]", + "Explanation": "[Provide explanation here]" + }}, + "Density": {{ + "Score": "[1 - 5]", + "Explanation": "[Provide explanation here]" + }} + "Overall Score": {{ + "Score": "[1 - 5]", + "Explanation": "[Provide explanation here]" + }} +}} +""" + +class Criterion(BaseModel): + Score: int + Explanation: str + +class Result(BaseModel): + Comprehensiveness: Criterion + Empowerment: Criterion + Trustworthiness: Criterion + Depth: Criterion + Density: Criterion + Overall_Score: Criterion = Field(alias="Overall Score") + +result_response_format = type_to_response_format_param(Result) + +if __name__ == "__main__": + with open('../../longervideos/dataset.json', 'r') as f: + questions = json.load(f) + + baseline_answer_dir = 'answers-naiverag' + base_dir = 'overall_comparison_video_understanding' + evaluation_answer_dir = [ + 'answers-videorag', + 'answers-notebooklm', + 'answers-llamavid', + 'answers-videoagent' + ] + + requests = [] + total_token_count = 0 + for _id in questions: + video_list_name = questions[_id][0]['description'] + video_querys = questions[_id][0]['questions'] + data_path = f"../all_answers/{_id}-{video_list_name}" + for _evaluation_answer_dir in evaluation_answer_dir: + baseline_work_dir = os.path.join(data_path, baseline_answer_dir) + evaluation_work_dir = os.path.join(data_path, _evaluation_answer_dir) + for i in range(len(questions[_id][0]['questions'])): + # query + query_id = questions[_id][0]['questions'][i]["id"] + query = questions[_id][0]['questions'][i]["question"] + # baseline answer + with open(os.path.join(baseline_work_dir, f'answer_{query_id}.md'), 'r') as f: + baseline_answer = f.read() + # evaluation answer + with open(os.path.join(evaluation_work_dir, f'answer_{query_id}.md'), 'r') as f: + evaluation_answer = f.read() + request_prompt = prompt.format(query=query, baseline_answer=baseline_answer, evaluation_answer=evaluation_answer) + + request_data = { + "custom_id": f"{_id}-{video_list_name}++query{query_id}++base++{baseline_answer_dir}++evaluate++{_evaluation_answer_dir}", + "method": "POST", + "url": "/v1/chat/completions", + "body": { + "model": "gpt-4o-mini", + "messages": [ + {"role": "system", "content": sys_prompt}, + {"role": "user", "content": request_prompt}, + ], + "response_format": result_response_format + }, + } + requests.append(request_data) + total_token_count += len(encoding.encode(request_prompt)) + + run_time = 5 + os.makedirs(f'batch_requests/{base_dir}', exist_ok=True) + request_json_file_path = f'batch_requests/{base_dir}/{int(time.time())}.json' + with jsonlines.open(request_json_file_path, mode="w") as writer: + for request in requests: + writer.write(request) + print(f"Batch API requests written to {request_json_file_path}") + print(f"Price: {total_token_count / 1000000 * 0.075 * run_time}$") + + for k in range(run_time): + client = OpenAI() + batch_input_file = client.files.create( + file=open(request_json_file_path, "rb"), purpose="batch" + ) + batch_input_file_id = batch_input_file.id + + batch = client.batches.create( + input_file_id=batch_input_file_id, + endpoint="/v1/chat/completions", + completion_window="24h", + metadata={"description": f"runtime{k} - a very nice and successful eval job: {request_json_file_path}"}, + ) + print(f"RunTime {k}: Batch {batch.id} has been created.") \ No newline at end of file diff --git a/reproduce/winrate_comparison/batch_winrate_eval_calculate.py b/reproduce/winrate_comparison/batch_winrate_eval_calculate.py new file mode 100755 index 0000000..cbf8a09 --- /dev/null +++ b/reproduce/winrate_comparison/batch_winrate_eval_calculate.py @@ -0,0 +1,99 @@ +import json +from tqdm import tqdm + +# the model_a in fixed as videorag +model_a = 'videorag' +# pick the model_b from ['naiverag', 'graphrag-local', 'graphrag-global', 'lightrag-hybrid'] +model_b = 'naiverag' + +metrics = ['Comprehensiveness', 'Empowerment', 'Trustworthiness', 'Depth', 'Density', 'Overall Winner'] + +base_dir = 'overall_comparison_rag' +# Please enter the parsed result files ending with .json below. +result_file = [ + '', + '', + '', + '', + '' +] + +domain_list = ['lecture', 'documentary', 'entertainment'] + +with open('../../longervideos/dataset.json', 'r') as f: + all_data = json.load(f) + +overall_win_count = {} +for _metric in metrics: + overall_win_count[_metric] = {'a': 0, 'b': 0} + +category_domain_dict = {} +for category_id in all_data: + _domain = all_data[category_id][0]['type'] + category_domain_dict[category_id] = _domain + +domain_win_count = {} +for domain in domain_list: + domain_win_count[domain] = {} + for _metric in metrics: + domain_win_count[domain][_metric] = {'a': 0, 'b': 0} + +query_count = 0 +for category_id in tqdm(all_data): + category = f"{category_id}-{all_data[category_id][0]['description']}" + querys = all_data[category_id][0]['questions'] + query_count += len(querys) + win_count = {} + for _metric in metrics: + win_count[_metric] = {'a': 0, 'b': 0} + + for _file in result_file: + result_path = f'./batch_requests/{base_dir}/{_file}' + with open(result_path, 'r') as f: + results = json.loads(f.read()) + + for i in range(len(querys)): + query_id = querys[i]['id'] + ori_result = results[f'{category}++query{query_id}++answers-{model_a}++answers-{model_b}++ori'] + rev_result = results[f'{category}++query{query_id}++answers-{model_b}++answers-{model_a}++rev'] + assert ori_result[_metric]['Winner'] in ['Answer 1', 'Answer 2'] + # original order + for _metric in metrics: + winner = 'a' if ('1' in ori_result[_metric]['Winner']) else 'b' + win_count[_metric][winner] += 1 + domain_win_count[category_domain_dict[category_id]][_metric][winner] += 1 + overall_win_count[_metric][winner] += 1 + # reverse order + for _metric in metrics: + winner = 'b' if ('1' in rev_result[_metric]['Winner']) else 'a' + win_count[_metric][winner] += 1 + domain_win_count[category_domain_dict[category_id]][_metric][winner] += 1 + overall_win_count[_metric][winner] += 1 + + +with open(f'batch_requests/{base_dir}/{base_dir}.txt', 'a') as f: + print(query_count) + print('a', model_a) + f.write('a ' + model_a + '\n') + print('b', model_b) + f.write('b ' + model_b + '\n') + for domain in domain_list: + print(f'(left) {model_a} : (right) {model_b} \t {domain}') + f.write(f'(left) {model_a} : (right) {model_b} \t {domain}' + '\n') + for _metric in metrics: + total_count = domain_win_count[domain][_metric]['a'] + domain_win_count[domain][_metric]['b'] + win_a_percentage = (domain_win_count[domain][_metric]['a'] / total_count) * 100 + win_b_percentage = (domain_win_count[domain][_metric]['b'] / total_count) * 100 + print(f'{win_a_percentage:.2f}% : {win_b_percentage:.2f}%', domain_win_count[domain][_metric], _metric) + f.write(f'{win_a_percentage:.2f}% : {win_b_percentage:.2f}% {domain_win_count[domain][_metric]} {_metric} \n') + print('----'*8) + f.write('----'*8 + '\n') + print(f'(left) {model_a} : (right) {model_b} \t overall comparision') + f.write(f'(left) {model_a} : (right) {model_b} \t overall comparision\n') + for _metric in metrics: + total_count = overall_win_count[_metric]['a'] + overall_win_count[_metric]['b'] + win_a_percentage = (overall_win_count[_metric]['a'] / total_count) * 100 + win_b_percentage = (overall_win_count[_metric]['b'] / total_count) * 100 + print(f'{win_a_percentage:.2f}% : {win_b_percentage:.2f}%', overall_win_count[_metric], _metric) + f.write(f'{win_a_percentage:.2f}% : {win_b_percentage:.2f}% {overall_win_count[_metric]} {_metric} \n') + f.write('====' * 8 + '\n\n') \ No newline at end of file diff --git a/reproduce/winrate_comparison/batch_winrate_eval_download.py b/reproduce/winrate_comparison/batch_winrate_eval_download.py new file mode 100755 index 0000000..da48544 --- /dev/null +++ b/reproduce/winrate_comparison/batch_winrate_eval_download.py @@ -0,0 +1,53 @@ +import os +os.environ["OPENAI_API_KEY"] = "" +import re +import time +import json +import jsonlines +import tiktoken + +from tqdm import tqdm +from openai import OpenAI + +client = OpenAI() + +def obtain_ouput_file_id(batches): + for batch in batches: + print(client.batches.retrieve(batch)) + print(client.batches.retrieve(batch).output_file_id) + +def download_result(result_files, base_dir): + for _file in result_files: + content = client.files.content(_file).content + with open(f"batch_requests/{base_dir}/{_file}.temp", "wb") as f: + f.write(content) + results = [] + with open(f"batch_requests/{base_dir}/{_file}.temp", 'r') as f: + for line in tqdm(f): + json_object = json.loads(line.strip()) + results.append(json_object) + with open(f"batch_requests/{base_dir}/{_file}.json", "w") as json_file: + json.dump(results, json_file, indent=4) + os.remove(f"batch_requests/{base_dir}/{_file}.temp") + +# ================================ + +# Please enter the relevant batch ID here to obtain the output file ID. +batches = [ + '', + '', + '', + '', + '' +] +obtain_ouput_file_id(batches) + +# Second Step: Please enter the output file ID below to download the output files. +# result_files = [ +# '', +# '', +# '', +# '', +# '' +# ] +# download_result(result_files, base_dir='overall_comparison_rag') \ No newline at end of file diff --git a/reproduce/winrate_comparison/batch_winrate_eval_parse.py b/reproduce/winrate_comparison/batch_winrate_eval_parse.py new file mode 100755 index 0000000..d213767 --- /dev/null +++ b/reproduce/winrate_comparison/batch_winrate_eval_parse.py @@ -0,0 +1,89 @@ +import os +os.environ["OPENAI_API_KEY"] = "" +import time +import json +import threading +from tqdm import tqdm +from openai import OpenAI +from setproctitle import setproctitle + +# overall comparison - rag methods +base_dir = 'overall_comparison_rag' +# The JSON file contains the batch of requests created when the batch request was uploaded. +request_file = '' +# Please enter the output file ID below, which corresponds to the downloaded output files. +result_files = [ + '', + '', + '', + '', + '' +] + +setproctitle(f"parse-result-{base_dir}") +print(f"Start parsing result files in {base_dir}...") + +def check_response_valid(data): + valid_keys = ['Comprehensiveness', 'Empowerment', 'Trustworthiness', 'Depth', 'Density', 'Overall Winner'] + assert len(data) == 6 + assert set(list(data.keys())) == set(valid_keys) + for _key in valid_keys: + assert data[_key]["Winner"] in ['Answer 1', 'Answer 2'] + assert "Explanation" in list(data[_key].keys()) + +def process_file(_file, request_dict): + client = OpenAI() + with open(f'batch_requests/{base_dir}/{_file}.json', 'r') as f: + data = json.load(f) + assert len(data) == len(request_dict) + parse_results = {} + for i in range(len(data)): + dp = data[i] + custom_id = dp["custom_id"] + try: + json_data = json.loads(dp["response"]["body"]["choices"][0]["message"]["content"]) + check_response_valid(json_data) + parse_results[custom_id] = json_data + except Exception as e: + print(f"{_file} ({i}/{len(data)}) Find error when parsing {custom_id} ({e}), re-request OpenAI") + while True: + try: + response = client.chat.completions.create( + model=request_dict[custom_id]["model"], + messages=request_dict[custom_id]["messages"], + response_format=request_dict[custom_id]["response_format"] + ) + json_data = json.loads(response.choices[0].message.content) + check_response_valid(json_data) + parse_results[custom_id] = json_data + print(f"{_file} ({i}/{len(data)}) success re-request!") + time.sleep(1) + break + except Exception as e: + print(f"{_file} ({i}/{len(data)}) {e}") + print(f"{_file} ({i}/{len(data)}) continue re-request OpenAI") + continue + with open(f'batch_requests/{base_dir}/{_file}-parse-result.json', 'w') as f: + json.dump(parse_results, f, indent=4, ensure_ascii=False) + +request_dict = {} +with open(f'batch_requests/{base_dir}/{request_file}', 'r') as f: + for _line in f.readlines(): + json_data = json.loads(_line) + request_dict[json_data["custom_id"]] = { + "model": json_data["body"]["model"], + "messages": json_data["body"]["messages"], + "response_format": json_data["body"]["response_format"] + } + +thread_list = [] +for _file in result_files: + thread = threading.Thread(target=process_file, args=(_file, request_dict)) + thread_list.append(thread) + +for thread in thread_list: + thread.setDaemon(True) + thread.start() + +for thread in thread_list: + thread.join() diff --git a/reproduce/winrate_comparison/batch_winrate_eval_upload.py b/reproduce/winrate_comparison/batch_winrate_eval_upload.py new file mode 100755 index 0000000..9762fee --- /dev/null +++ b/reproduce/winrate_comparison/batch_winrate_eval_upload.py @@ -0,0 +1,181 @@ +import os +os.environ["OPENAI_API_KEY"] = "" +import re +import time +import json +import jsonlines +import tiktoken +from pydantic import BaseModel, Field +from typing import Literal + +from tqdm import tqdm +from openai import OpenAI +from openai.lib._pydantic import to_strict_json_schema +from openai.lib._parsing._completions import type_to_response_format_param + +encoding = tiktoken.encoding_for_model('gpt-4o-mini') + +sys_prompt = """ +---Role--- +You are an expert tasked with evaluating two answers to the same question based on these criteria: **Comprehensiveness**, **Empowerment**, **Trustworthiness**, **Depth** and **Density**. +""" + +prompt = """ +You will evaluate two answers to the same question based on these criteria: **Comprehensiveness**, **Empowerment**, **Trustworthiness**, **Depth** and **Density**. + +- **Comprehensiveness**: How much detail does the answer provide to cover all aspects and details of the question? +- **Empowerment**: How well does the answer help the reader understand and make informed judgments about the topic? +- **Trustworthiness**: Does the answer provide sufficient detail and align with common knowledge, enhancing its credibility? +- **Depth**: Does the answer provide in-depth analysis or details, rather than just superficial information? +- **Density**: Does the answer contain relevant information without less informative or redundant content? + +For each criterion, choose the better answer (either Answer 1 or Answer 2) and explain why. Then, select an overall winner based on these criteria. + +Here is the question: +{query} + +Here are the two answers: + +**Answer 1:** +{answer1} + +**Answer 2:** +{answer2} + +Evaluate both answers using the criteria listed above and provide detailed explanations for each criterion. + +Output your evaluation in the following JSON format: + +{{ + "Comprehensiveness": {{ + "Winner": "[Answer 1 or Answer 2]", + "Explanation": "[Provide explanation here]" + }}, + "Empowerment": {{ + "Winner": "[Answer 1 or Answer 2]", + "Explanation": "[Provide explanation here]" + }}, + "Trustworthiness": {{ + "Winner": "[Answer 1 or Answer 2]", + "Explanation": "[Provide explanation here]" + }}, + "Depth": {{ + "Winner": "[Answer 1 or Answer 2]", + "Explanation": "[Provide explanation here]" + }}, + "Density": {{ + "Winner": "[Answer 1 or Answer 2]", + "Explanation": "[Provide explanation here]" + }}, + "Overall Winner": {{ + "Winner": "[Answer 1 or Answer 2]", + "Explanation": "[Summarize why this answer is the overall winner on the above criteria]" + }} +}} +""" + +class Criterion(BaseModel): + Winner: Literal["Answer 1", "Answer 2"] + Explanation: str + +class Result(BaseModel): + Comprehensiveness: Criterion + Empowerment: Criterion + Trustworthiness: Criterion + Depth: Criterion + Density: Criterion + Overall_Winner: Criterion = Field(alias="Overall Winner") + +result_response_format = type_to_response_format_param(Result) + +if __name__ == "__main__": + with open('../../longervideos/dataset.json', 'r') as f: + questions = json.load(f) + our_answer_dir = 'answers-videorag' + + # overall comparsion - rag + base_dir = 'overall_comparison_rag' + com_answer_dir = [ + 'answers-naiverag', + 'answers-graphrag-local', + 'answers-graphrag-global', + 'answers-lightrag-hybrid', + ] + + requests = [] + total_token_count = 0 + for _id in questions: + video_list_name = questions[_id][0]['description'] + video_querys = questions[_id][0]['questions'] + data_path = f"../all_answers/{_id}-{video_list_name}" + for _com_answer_dir in com_answer_dir: + our_work_dir = os.path.join(data_path, our_answer_dir) + com_work_dir = os.path.join(data_path, _com_answer_dir) + for i in range(len(questions[_id][0]['questions'])): + # query + query_id = questions[_id][0]['questions'][i]["id"] + query = questions[_id][0]['questions'][i]["question"] + # our answer + with open(os.path.join(our_work_dir, f'answer_{query_id}.md'), 'r') as f: + our_answer = f.read() + # com answer + with open(os.path.join(com_work_dir, f'answer_{query_id}.md'), 'r') as f: + com_answer = f.read() + ori_prompt = prompt.format(query=query, answer1=our_answer, answer2=com_answer) + rev_prompt = prompt.format(query=query, answer1=com_answer, answer2=our_answer) + + ori_request_data = { + "custom_id": f"{_id}-{video_list_name}++query{query_id}++{our_answer_dir}++{_com_answer_dir}++ori", + "method": "POST", + "url": "/v1/chat/completions", + "body": { + "model": "gpt-4o-mini", + "messages": [ + {"role": "system", "content": sys_prompt}, + {"role": "user", "content": ori_prompt}, + ], + "response_format": result_response_format + }, + } + rev_request_data = { + "custom_id": f"{_id}-{video_list_name}++query{query_id}++{_com_answer_dir}++{our_answer_dir}++rev", + "method": "POST", + "url": "/v1/chat/completions", + "body": { + "model": "gpt-4o-mini", + "messages": [ + {"role": "system", "content": sys_prompt}, + {"role": "user", "content": rev_prompt}, + ], + "response_format": result_response_format + }, + } + requests.append(ori_request_data) + requests.append(rev_request_data) + + total_token_count += len(encoding.encode(ori_prompt)) + total_token_count += len(encoding.encode(rev_prompt)) + + run_time = 5 + os.makedirs(f'batch_requests/{base_dir}', exist_ok=True) + request_json_file_path = f'batch_requests/{base_dir}/{int(time.time())}.json' + with jsonlines.open(request_json_file_path, mode="w") as writer: + for request in requests: + writer.write(request) + print(f"Batch API requests written to {request_json_file_path}") + print(f"Price: {total_token_count / 1000000 * 0.075 * run_time}$") + + for k in range(run_time): + client = OpenAI() + batch_input_file = client.files.create( + file=open(request_json_file_path, "rb"), purpose="batch" + ) + batch_input_file_id = batch_input_file.id + + batch = client.batches.create( + input_file_id=batch_input_file_id, + endpoint="/v1/chat/completions", + completion_window="24h", + metadata={"description": f"runtime{k}: {request_json_file_path}"}, + ) + print(f"RunTime {k}: Batch {batch.id} has been created.") \ No newline at end of file diff --git a/videorag/__init__.py b/videorag/__init__.py new file mode 100755 index 0000000..d0de5bf --- /dev/null +++ b/videorag/__init__.py @@ -0,0 +1 @@ +from .videorag import VideoRAG, QueryParam \ No newline at end of file diff --git a/videorag/__pycache__/__init__.cpython-311.pyc b/videorag/__pycache__/__init__.cpython-311.pyc new file mode 100755 index 0000000..ccde740 Binary files /dev/null and b/videorag/__pycache__/__init__.cpython-311.pyc differ diff --git a/videorag/__pycache__/__init__.cpython-39.pyc b/videorag/__pycache__/__init__.cpython-39.pyc new file mode 100755 index 0000000..da5cb22 Binary files /dev/null and b/videorag/__pycache__/__init__.cpython-39.pyc differ diff --git a/videorag/__pycache__/_llm.cpython-311.pyc b/videorag/__pycache__/_llm.cpython-311.pyc new file mode 100755 index 0000000..ee08f3c Binary files /dev/null and b/videorag/__pycache__/_llm.cpython-311.pyc differ diff --git a/videorag/__pycache__/_op.cpython-311.pyc b/videorag/__pycache__/_op.cpython-311.pyc new file mode 100755 index 0000000..0bba1bc Binary files /dev/null and b/videorag/__pycache__/_op.cpython-311.pyc differ diff --git a/videorag/__pycache__/_splitter.cpython-311.pyc b/videorag/__pycache__/_splitter.cpython-311.pyc new file mode 100755 index 0000000..98db949 Binary files /dev/null and b/videorag/__pycache__/_splitter.cpython-311.pyc differ diff --git a/videorag/__pycache__/_utils.cpython-311.pyc b/videorag/__pycache__/_utils.cpython-311.pyc new file mode 100755 index 0000000..1f4ab6b Binary files /dev/null and b/videorag/__pycache__/_utils.cpython-311.pyc differ diff --git a/videorag/__pycache__/base.cpython-311.pyc b/videorag/__pycache__/base.cpython-311.pyc new file mode 100755 index 0000000..a2da037 Binary files /dev/null and b/videorag/__pycache__/base.cpython-311.pyc differ diff --git a/videorag/__pycache__/prompt.cpython-311.pyc b/videorag/__pycache__/prompt.cpython-311.pyc new file mode 100755 index 0000000..c4a10b1 Binary files /dev/null and b/videorag/__pycache__/prompt.cpython-311.pyc differ diff --git a/videorag/__pycache__/videorag.cpython-311.pyc b/videorag/__pycache__/videorag.cpython-311.pyc new file mode 100755 index 0000000..82776f9 Binary files /dev/null and b/videorag/__pycache__/videorag.cpython-311.pyc differ diff --git a/videorag/__pycache__/videorag.cpython-39.pyc b/videorag/__pycache__/videorag.cpython-39.pyc new file mode 100755 index 0000000..631b1f5 Binary files /dev/null and b/videorag/__pycache__/videorag.cpython-39.pyc differ diff --git a/videorag/_llm.py b/videorag/_llm.py new file mode 100755 index 0000000..f658234 --- /dev/null +++ b/videorag/_llm.py @@ -0,0 +1,178 @@ +import numpy as np + +from openai import AsyncOpenAI, AsyncAzureOpenAI, APIConnectionError, RateLimitError + +from tenacity import ( + retry, + stop_after_attempt, + wait_exponential, + retry_if_exception_type, +) +import os + +from ._utils import compute_args_hash, wrap_embedding_func_with_attrs +from .base import BaseKVStorage + +global_openai_async_client = None +global_azure_openai_async_client = None + + +def get_openai_async_client_instance(): + global global_openai_async_client + if global_openai_async_client is None: + global_openai_async_client = AsyncOpenAI() + return global_openai_async_client + + +def get_azure_openai_async_client_instance(): + global global_azure_openai_async_client + if global_azure_openai_async_client is None: + global_azure_openai_async_client = AsyncAzureOpenAI() + return global_azure_openai_async_client + + +@retry( + stop=stop_after_attempt(5), + wait=wait_exponential(multiplier=1, min=4, max=10), + retry=retry_if_exception_type((RateLimitError, APIConnectionError)), +) +async def openai_complete_if_cache( + model, prompt, system_prompt=None, history_messages=[], **kwargs +) -> str: + openai_async_client = get_openai_async_client_instance() + hashing_kv: BaseKVStorage = kwargs.pop("hashing_kv", None) + messages = [] + if system_prompt: + messages.append({"role": "system", "content": system_prompt}) + messages.extend(history_messages) + messages.append({"role": "user", "content": prompt}) + if hashing_kv is not None: + args_hash = compute_args_hash(model, messages) + if_cache_return = await hashing_kv.get_by_id(args_hash) + if if_cache_return is not None: + return if_cache_return["return"] + + response = await openai_async_client.chat.completions.create( + model=model, messages=messages, **kwargs + ) + + if hashing_kv is not None: + await hashing_kv.upsert( + {args_hash: {"return": response.choices[0].message.content, "model": model}} + ) + await hashing_kv.index_done_callback() + return response.choices[0].message.content + + +async def gpt_4o_complete( + prompt, system_prompt=None, history_messages=[], **kwargs +) -> str: + return await openai_complete_if_cache( + "gpt-4o", + prompt, + system_prompt=system_prompt, + history_messages=history_messages, + **kwargs, + ) + + +async def gpt_4o_mini_complete( + prompt, system_prompt=None, history_messages=[], **kwargs +) -> str: + return await openai_complete_if_cache( + "gpt-4o-mini", + prompt, + system_prompt=system_prompt, + history_messages=history_messages, + **kwargs, + ) + + +@wrap_embedding_func_with_attrs(embedding_dim=1536, max_token_size=8192) +@retry( + stop=stop_after_attempt(5), + wait=wait_exponential(multiplier=1, min=4, max=10), + retry=retry_if_exception_type((RateLimitError, APIConnectionError)), +) +async def openai_embedding(texts: list[str]) -> np.ndarray: + openai_async_client = get_openai_async_client_instance() + response = await openai_async_client.embeddings.create( + model="text-embedding-3-small", input=texts, encoding_format="float" + ) + return np.array([dp.embedding for dp in response.data]) + + +@retry( + stop=stop_after_attempt(3), + wait=wait_exponential(multiplier=1, min=4, max=10), + retry=retry_if_exception_type((RateLimitError, APIConnectionError)), +) +async def azure_openai_complete_if_cache( + deployment_name, prompt, system_prompt=None, history_messages=[], **kwargs +) -> str: + azure_openai_client = get_azure_openai_async_client_instance() + hashing_kv: BaseKVStorage = kwargs.pop("hashing_kv", None) + messages = [] + if system_prompt: + messages.append({"role": "system", "content": system_prompt}) + messages.extend(history_messages) + messages.append({"role": "user", "content": prompt}) + if hashing_kv is not None: + args_hash = compute_args_hash(deployment_name, messages) + if_cache_return = await hashing_kv.get_by_id(args_hash) + if if_cache_return is not None: + return if_cache_return["return"] + + response = await azure_openai_client.chat.completions.create( + model=deployment_name, messages=messages, **kwargs + ) + + if hashing_kv is not None: + await hashing_kv.upsert( + { + args_hash: { + "return": response.choices[0].message.content, + "model": deployment_name, + } + } + ) + await hashing_kv.index_done_callback() + return response.choices[0].message.content + + +async def azure_gpt_4o_complete( + prompt, system_prompt=None, history_messages=[], **kwargs +) -> str: + return await azure_openai_complete_if_cache( + "gpt-4o", + prompt, + system_prompt=system_prompt, + history_messages=history_messages, + **kwargs, + ) + + +async def azure_gpt_4o_mini_complete( + prompt, system_prompt=None, history_messages=[], **kwargs +) -> str: + return await azure_openai_complete_if_cache( + "gpt-4o-mini", + prompt, + system_prompt=system_prompt, + history_messages=history_messages, + **kwargs, + ) + + +@wrap_embedding_func_with_attrs(embedding_dim=1536, max_token_size=8192) +@retry( + stop=stop_after_attempt(3), + wait=wait_exponential(multiplier=1, min=4, max=10), + retry=retry_if_exception_type((RateLimitError, APIConnectionError)), +) +async def azure_openai_embedding(texts: list[str]) -> np.ndarray: + azure_openai_client = get_azure_openai_async_client_instance() + response = await azure_openai_client.embeddings.create( + model="text-embedding-3-small", input=texts, encoding_format="float" + ) + return np.array([dp.embedding for dp in response.data]) diff --git a/videorag/_op.py b/videorag/_op.py new file mode 100755 index 0000000..bc92a59 --- /dev/null +++ b/videorag/_op.py @@ -0,0 +1,744 @@ +import re +import json +import openai +import asyncio +import tiktoken +from typing import Union +from collections import Counter, defaultdict +from ._splitter import SeparatorSplitter +from ._utils import ( + logger, + clean_str, + compute_mdhash_id, + decode_tokens_by_tiktoken, + encode_string_by_tiktoken, + is_float_regex, + list_of_list_to_csv, + pack_user_ass_to_openai_messages, + split_string_by_multi_markers, + truncate_list_by_token_size, +) +from .base import ( + BaseGraphStorage, + BaseKVStorage, + BaseVectorStorage, + SingleCommunitySchema, + CommunitySchema, + TextChunkSchema, + QueryParam, +) +from .prompt import GRAPH_FIELD_SEP, PROMPTS +from ._videoutil import ( + retrieved_segment_caption, +) + +def chunking_by_token_size( + tokens_list: list[list[int]], + doc_keys, + tiktoken_model, + overlap_token_size=128, + max_token_size=1024, +): + + results = [] + for index, tokens in enumerate(tokens_list): + chunk_token = [] + lengths = [] + for start in range(0, len(tokens), max_token_size - overlap_token_size): + + chunk_token.append(tokens[start : start + max_token_size]) + lengths.append(min(max_token_size, len(tokens) - start)) + + # here somehow tricky, since the whole chunk tokens is list[list[list[int]]] for corpus(doc(chunk)),so it can't be decode entirely + chunk_token = tiktoken_model.decode_batch(chunk_token) + for i, chunk in enumerate(chunk_token): + + results.append( + { + "tokens": lengths[i], + "content": chunk.strip(), + "chunk_order_index": i, + "full_doc_id": doc_keys[index], + } + ) + + return results + + +def chunking_by_video_segments( + tokens_list: list[list[int]], + doc_keys, + tiktoken_model, + max_token_size=1024, +): + # make sure each segment is not larger than max_token_size + for index in range(len(tokens_list)): + if len(tokens_list[index]) > max_token_size: + tokens_list[index] = tokens_list[index][:max_token_size] + + results = [] + chunk_token = [] + chunk_segment_ids = [] + chunk_order_index = 0 + for index, tokens in enumerate(tokens_list): + + if len(chunk_token) + len(tokens) <= max_token_size: + # add new segment + chunk_token += tokens.copy() + chunk_segment_ids.append(doc_keys[index]) + else: + # save the current chunk + chunk = tiktoken_model.decode(chunk_token) + results.append( + { + "tokens": len(chunk_token), + "content": chunk.strip(), + "chunk_order_index": chunk_order_index, + "video_segment_id": chunk_segment_ids, + } + ) + # new chunk with current segment as begin + chunk_token = [] + chunk_segment_ids = [] + chunk_token += tokens.copy() + chunk_segment_ids.append(doc_keys[index]) + chunk_order_index += 1 + + # save the last chunk + if len(chunk_token) > 0: + chunk = tiktoken_model.decode(chunk_token) + results.append( + { + "tokens": len(chunk_token), + "content": chunk.strip(), + "chunk_order_index": chunk_order_index, + "video_segment_id": chunk_segment_ids, + } + ) + + return results + + +def chunking_by_seperators( + tokens_list: list[list[int]], + doc_keys, + tiktoken_model, + overlap_token_size=128, + max_token_size=1024, +): + + splitter = SeparatorSplitter( + separators=[ + tiktoken_model.encode(s) for s in PROMPTS["default_text_separator"] + ], + chunk_size=max_token_size, + chunk_overlap=overlap_token_size, + ) + results = [] + for index, tokens in enumerate(tokens_list): + chunk_token = splitter.split_tokens(tokens) + lengths = [len(c) for c in chunk_token] + + # here somehow tricky, since the whole chunk tokens is list[list[list[int]]] for corpus(doc(chunk)),so it can't be decode entirely + chunk_token = tiktoken_model.decode_batch(chunk_token) + for i, chunk in enumerate(chunk_token): + + results.append( + { + "tokens": lengths[i], + "content": chunk.strip(), + "chunk_order_index": i, + "full_doc_id": doc_keys[index], + } + ) + + return results + + +def get_chunks(new_videos, chunk_func=chunking_by_video_segments, **chunk_func_params): + inserting_chunks = {} + + new_videos_list = list(new_videos.keys()) + for video_name in new_videos_list: + segment_id_list = list(new_videos[video_name].keys()) + docs = [new_videos[video_name][index]["content"] for index in segment_id_list] + doc_keys = [f'{video_name}_{index}' for index in segment_id_list] + + ENCODER = tiktoken.encoding_for_model("gpt-4o") + tokens = ENCODER.encode_batch(docs, num_threads=16) + chunks = chunk_func( + tokens, doc_keys=doc_keys, tiktoken_model=ENCODER, **chunk_func_params + ) + + for chunk in chunks: + inserting_chunks.update( + {compute_mdhash_id(chunk["content"], prefix="chunk-"): chunk} + ) + + return inserting_chunks + + +async def _handle_entity_relation_summary( + entity_or_relation_name: str, + description: str, + global_config: dict, +) -> str: + use_llm_func: callable = global_config["cheap_model_func"] + llm_max_tokens = global_config["cheap_model_max_token_size"] + tiktoken_model_name = global_config["tiktoken_model_name"] + summary_max_tokens = global_config["entity_summary_to_max_tokens"] + + tokens = encode_string_by_tiktoken(description, model_name=tiktoken_model_name) + if len(tokens) < summary_max_tokens: # No need for summary + return description + prompt_template = PROMPTS["summarize_entity_descriptions"] + use_description = decode_tokens_by_tiktoken( + tokens[:llm_max_tokens], model_name=tiktoken_model_name + ) + context_base = dict( + entity_name=entity_or_relation_name, + description_list=use_description.split(GRAPH_FIELD_SEP), + ) + use_prompt = prompt_template.format(**context_base) + logger.debug(f"Trigger summary: {entity_or_relation_name}") + summary = await use_llm_func(use_prompt, max_tokens=summary_max_tokens) + return summary + + +async def _handle_single_entity_extraction( + record_attributes: list[str], + chunk_key: str, +): + if len(record_attributes) < 4 or record_attributes[0] != '"entity"': + return None + # add this record as a node in the G + entity_name = clean_str(record_attributes[1].upper()) + if not entity_name.strip(): + return None + entity_type = clean_str(record_attributes[2].upper()) + entity_description = clean_str(record_attributes[3]) + entity_source_id = chunk_key + return dict( + entity_name=entity_name, + entity_type=entity_type, + description=entity_description, + source_id=entity_source_id, + ) + + +async def _handle_single_relationship_extraction( + record_attributes: list[str], + chunk_key: str, +): + if len(record_attributes) < 5 or record_attributes[0] != '"relationship"': + return None + # add this record as edge + source = clean_str(record_attributes[1].upper()) + target = clean_str(record_attributes[2].upper()) + edge_description = clean_str(record_attributes[3]) + edge_source_id = chunk_key + weight = ( + float(record_attributes[-1]) if is_float_regex(record_attributes[-1]) else 1.0 + ) + return dict( + src_id=source, + tgt_id=target, + weight=weight, + description=edge_description, + source_id=edge_source_id, + ) + + +async def _merge_nodes_then_upsert( + entity_name: str, + nodes_data: list[dict], + knowledge_graph_inst: BaseGraphStorage, + global_config: dict, +): + already_entitiy_types = [] + already_source_ids = [] + already_description = [] + + already_node = await knowledge_graph_inst.get_node(entity_name) + if already_node is not None: + already_entitiy_types.append(already_node["entity_type"]) + already_source_ids.extend( + split_string_by_multi_markers(already_node["source_id"], [GRAPH_FIELD_SEP]) + ) + already_description.append(already_node["description"]) + + entity_type = sorted( + Counter( + [dp["entity_type"] for dp in nodes_data] + already_entitiy_types + ).items(), + key=lambda x: x[1], + reverse=True, + )[0][0] + description = GRAPH_FIELD_SEP.join( + sorted(set([dp["description"] for dp in nodes_data] + already_description)) + ) + source_id = GRAPH_FIELD_SEP.join( + set([dp["source_id"] for dp in nodes_data] + already_source_ids) + ) + description = await _handle_entity_relation_summary( + entity_name, description, global_config + ) + node_data = dict( + entity_type=entity_type, + description=description, + source_id=source_id, + ) + await knowledge_graph_inst.upsert_node( + entity_name, + node_data=node_data, + ) + node_data["entity_name"] = entity_name + return node_data + + +async def _merge_edges_then_upsert( + src_id: str, + tgt_id: str, + edges_data: list[dict], + knowledge_graph_inst: BaseGraphStorage, + global_config: dict, +): + already_weights = [] + already_source_ids = [] + already_description = [] + already_order = [] + if await knowledge_graph_inst.has_edge(src_id, tgt_id): + already_edge = await knowledge_graph_inst.get_edge(src_id, tgt_id) + already_weights.append(already_edge["weight"]) + already_source_ids.extend( + split_string_by_multi_markers(already_edge["source_id"], [GRAPH_FIELD_SEP]) + ) + already_description.append(already_edge["description"]) + already_order.append(already_edge.get("order", 1)) + + # [numberchiffre]: `Relationship.order` is only returned from DSPy's predictions + order = min([dp.get("order", 1) for dp in edges_data] + already_order) + weight = sum([dp["weight"] for dp in edges_data] + already_weights) + description = GRAPH_FIELD_SEP.join( + sorted(set([dp["description"] for dp in edges_data] + already_description)) + ) + source_id = GRAPH_FIELD_SEP.join( + set([dp["source_id"] for dp in edges_data] + already_source_ids) + ) + for need_insert_id in [src_id, tgt_id]: + if not (await knowledge_graph_inst.has_node(need_insert_id)): + await knowledge_graph_inst.upsert_node( + need_insert_id, + node_data={ + "source_id": source_id, + "description": description, + "entity_type": '"UNKNOWN"', + }, + ) + description = await _handle_entity_relation_summary( + (src_id, tgt_id), description, global_config + ) + await knowledge_graph_inst.upsert_edge( + src_id, + tgt_id, + edge_data=dict( + weight=weight, description=description, source_id=source_id, order=order + ), + ) + return_edge_data = dict( + src_tgt=(src_id, tgt_id), + description=description, + weight=weight + ) + return return_edge_data + + +async def extract_entities( + chunks: dict[str, TextChunkSchema], + knowledge_graph_inst: BaseGraphStorage, + entity_vdb: BaseVectorStorage, + global_config: dict, +) -> Union[BaseGraphStorage, None]: + use_llm_func: callable = global_config["best_model_func"] + entity_extract_max_gleaning = global_config["entity_extract_max_gleaning"] + + ordered_chunks = list(chunks.items()) + + entity_extract_prompt = PROMPTS["entity_extraction"] + context_base = dict( + tuple_delimiter=PROMPTS["DEFAULT_TUPLE_DELIMITER"], + record_delimiter=PROMPTS["DEFAULT_RECORD_DELIMITER"], + completion_delimiter=PROMPTS["DEFAULT_COMPLETION_DELIMITER"], + entity_types=",".join(PROMPTS["DEFAULT_ENTITY_TYPES"]), + ) + continue_prompt = PROMPTS["entiti_continue_extraction"] + if_loop_prompt = PROMPTS["entiti_if_loop_extraction"] + + already_processed = 0 + already_entities = 0 + already_relations = 0 + + async def _process_single_content(chunk_key_dp: tuple[str, TextChunkSchema]): + nonlocal already_processed, already_entities, already_relations + chunk_key = chunk_key_dp[0] + chunk_dp = chunk_key_dp[1] + content = chunk_dp["content"] + hint_prompt = entity_extract_prompt.format(**context_base, input_text=content) + final_result = await use_llm_func(hint_prompt) + + history = pack_user_ass_to_openai_messages(hint_prompt, final_result) + for now_glean_index in range(entity_extract_max_gleaning): + glean_result = await use_llm_func(continue_prompt, history_messages=history) + + history += pack_user_ass_to_openai_messages(continue_prompt, glean_result) + final_result += glean_result + if now_glean_index == entity_extract_max_gleaning - 1: + break + + if_loop_result: str = await use_llm_func( + if_loop_prompt, history_messages=history + ) + if_loop_result = if_loop_result.strip().strip('"').strip("'").lower() + if if_loop_result != "yes": + break + + records = split_string_by_multi_markers( + final_result, + [context_base["record_delimiter"], context_base["completion_delimiter"]], + ) + + maybe_nodes = defaultdict(list) + maybe_edges = defaultdict(list) + for record in records: + record = re.search(r"\((.*)\)", record) + if record is None: + continue + record = record.group(1) + record_attributes = split_string_by_multi_markers( + record, [context_base["tuple_delimiter"]] + ) + if_entities = await _handle_single_entity_extraction( + record_attributes, chunk_key + ) + if if_entities is not None: + maybe_nodes[if_entities["entity_name"]].append(if_entities) + continue + + if_relation = await _handle_single_relationship_extraction( + record_attributes, chunk_key + ) + if if_relation is not None: + maybe_edges[(if_relation["src_id"], if_relation["tgt_id"])].append( + if_relation + ) + already_processed += 1 + already_entities += len(maybe_nodes) + already_relations += len(maybe_edges) + now_ticks = PROMPTS["process_tickers"][ + already_processed % len(PROMPTS["process_tickers"]) + ] + print( + f"{now_ticks} Processed {already_processed} chunks, {already_entities} entities(duplicated), {already_relations} relations(duplicated)\r", + end="", + flush=True, + ) + return dict(maybe_nodes), dict(maybe_edges) + + # use_llm_func is wrapped in ascynio.Semaphore, limiting max_async callings + results = await asyncio.gather( + *[_process_single_content(c) for c in ordered_chunks] + ) + print() # clear the progress bar + maybe_nodes = defaultdict(list) + maybe_edges = defaultdict(list) + for m_nodes, m_edges in results: + for k, v in m_nodes.items(): + maybe_nodes[k].extend(v) + for k, v in m_edges.items(): + # it's undirected graph + maybe_edges[tuple(sorted(k))].extend(v) + all_entities_data = await asyncio.gather( + *[ + _merge_nodes_then_upsert(k, v, knowledge_graph_inst, global_config) + for k, v in maybe_nodes.items() + ] + ) + all_edges_data = await asyncio.gather( + *[ + _merge_edges_then_upsert(k[0], k[1], v, knowledge_graph_inst, global_config) + for k, v in maybe_edges.items() + ] + ) + if not len(all_entities_data): + logger.warning("Didn't extract any entities, maybe your LLM is not working") + return None + if entity_vdb is not None: + data_for_vdb = { + compute_mdhash_id(dp["entity_name"], prefix="ent-"): { + "content": dp["entity_name"] + dp["description"], + "entity_name": dp["entity_name"], + } + for dp in all_entities_data + } + await entity_vdb.upsert(data_for_vdb) + return knowledge_graph_inst, all_entities_data, all_edges_data + + +async def _find_most_related_segments_from_entities( + topk_chunks: int, + node_datas: list[dict], + text_chunks_db: BaseKVStorage[TextChunkSchema], + knowledge_graph_inst: BaseGraphStorage, +): + text_units = [ + split_string_by_multi_markers(dp["source_id"], [GRAPH_FIELD_SEP]) + for dp in node_datas + ] + edges = await asyncio.gather( + *[knowledge_graph_inst.get_node_edges(dp["entity_name"]) for dp in node_datas] + ) + all_one_hop_nodes = set() + for this_edges in edges: + if not this_edges: + continue + all_one_hop_nodes.update([e[1] for e in this_edges]) + all_one_hop_nodes = list(all_one_hop_nodes) + all_one_hop_nodes_data = await asyncio.gather( + *[knowledge_graph_inst.get_node(e) for e in all_one_hop_nodes] + ) + all_one_hop_text_units_lookup = { + k: set(split_string_by_multi_markers(v["source_id"], [GRAPH_FIELD_SEP])) + for k, v in zip(all_one_hop_nodes, all_one_hop_nodes_data) + if v is not None + } + all_text_units_lookup = {} + for index, (this_text_units, this_edges) in enumerate(zip(text_units, edges)): + for c_id in this_text_units: + if c_id in all_text_units_lookup: + continue + relation_counts = 0 + for e in this_edges: + if ( + e[1] in all_one_hop_text_units_lookup + and c_id in all_one_hop_text_units_lookup[e[1]] + ): + relation_counts += 1 + all_text_units_lookup[c_id] = { + "data": await text_chunks_db.get_by_id(c_id), + "order": index, + "relation_counts": relation_counts, + } + if any([v is None for v in all_text_units_lookup.values()]): + logger.warning("Text chunks are missing, maybe the storage is damaged") + all_text_units = [ + {"id": k, **v} for k, v in all_text_units_lookup.items() if v is not None + ] + sorted_text_units = sorted( + all_text_units, key=lambda x: -x["relation_counts"] + )[:topk_chunks] + + chunk_related_segments = set() + for _chunk_data in sorted_text_units: + for s_id in _chunk_data['data']['video_segment_id']: + chunk_related_segments.add(s_id) + + return chunk_related_segments + +async def _refine_entity_retrieval_query( + query, + query_param: QueryParam, + global_config: dict, +): + use_llm_func: callable = global_config["cheap_model_func"] + query_rewrite_prompt = PROMPTS["query_rewrite_for_entity_retrieval"] + query_rewrite_prompt = query_rewrite_prompt.format(input_text=query) + final_result = await use_llm_func(query_rewrite_prompt) + return final_result + +async def _refine_visual_retrieval_query( + query, + query_param: QueryParam, + global_config: dict, +): + use_llm_func: callable = global_config["cheap_model_func"] + query_rewrite_prompt = PROMPTS["query_rewrite_for_visual_retrieval"] + query_rewrite_prompt = query_rewrite_prompt.format(input_text=query) + final_result = await use_llm_func(query_rewrite_prompt) + return final_result + +async def _extract_keywords_query( + query, + query_param: QueryParam, + global_config: dict, +): + use_llm_func: callable = global_config["cheap_model_func"] + keywords_prompt = PROMPTS["keywords_extraction"] + keywords_prompt = keywords_prompt.format(input_text=query) + final_result = await use_llm_func(keywords_prompt) + return final_result + +async def videorag_query( + query, + entities_vdb, + text_chunks_db, + chunks_vdb, + video_path_db, + video_segments, + video_segment_feature_vdb, + knowledge_graph_inst, + caption_model, + caption_tokenizer, + query_param: QueryParam, + global_config: dict, +) -> str: + use_model_func = global_config["best_model_func"] + query = query + + # naive chunks + results = await chunks_vdb.query(query, top_k=query_param.top_k) + if not len(results): + return PROMPTS["fail_response"] + chunks_ids = [r["id"] for r in results] + chunks = await text_chunks_db.get_by_ids(chunks_ids) + + maybe_trun_chunks = truncate_list_by_token_size( + chunks, + key=lambda x: x["content"], + max_token_size=query_param.naive_max_token_for_text_unit, + ) + logger.info(f"Truncate {len(chunks)} to {len(maybe_trun_chunks)} chunks") + section = "-----New Chunk-----\n".join([c["content"] for c in maybe_trun_chunks]) + retreived_chunk_context = section + + # visual retrieval + query_for_entity_retrieval = await _refine_entity_retrieval_query( + query, + query_param, + global_config, + ) + entity_results = await entities_vdb.query(query_for_entity_retrieval, top_k=query_param.top_k) + entity_retrieved_segments = set() + if len(entity_results): + node_datas = await asyncio.gather( + *[knowledge_graph_inst.get_node(r["entity_name"]) for r in entity_results] + ) + if not all([n is not None for n in node_datas]): + logger.warning("Some nodes are missing, maybe the storage is damaged") + node_degrees = await asyncio.gather( + *[knowledge_graph_inst.node_degree(r["entity_name"]) for r in entity_results] + ) + node_datas = [ + {**n, "entity_name": k["entity_name"], "rank": d} + for k, n, d in zip(entity_results, node_datas, node_degrees) + if n is not None + ] + entity_retrieved_segments = entity_retrieved_segments.union(await _find_most_related_segments_from_entities( + global_config["retrieval_topk_chunks"], node_datas, text_chunks_db, knowledge_graph_inst + )) + + # visual retrieval + query_for_visual_retrieval = await _refine_visual_retrieval_query( + query, + query_param, + global_config, + ) + segment_results = await video_segment_feature_vdb.query(query_for_visual_retrieval) + visual_retrieved_segments = set() + if len(segment_results): + for n in segment_results: + visual_retrieved_segments.add(n['__id__']) + + # caption + retrieved_segments = list(entity_retrieved_segments.union(visual_retrieved_segments)) + retrieved_segments = sorted( + retrieved_segments, + key=lambda x: ( + '_'.join(x.split('_')[:-1]), # video_name + eval(x.split('_')[-1]) # index + ) + ) + print(query_for_entity_retrieval) + print(f"Retrieved Text Segments {entity_retrieved_segments}") + print(query_for_visual_retrieval) + print(f"Retrieved Visual Segments {visual_retrieved_segments}") + + already_processed = 0 + async def _filter_single_segment(knowledge: str, segment_key_dp: tuple[str, str]): + nonlocal use_model_func, already_processed + segment_key = segment_key_dp[0] + segment_content = segment_key_dp[1] + filter_prompt = PROMPTS["filtering_segment"] + filter_prompt = filter_prompt.format(caption=segment_content, knowledge=knowledge) + result = await use_model_func(filter_prompt) + already_processed += 1 + now_ticks = PROMPTS["process_tickers"][ + already_processed % len(PROMPTS["process_tickers"]) + ] + print( + f"{now_ticks} Checked {already_processed} segments\r", + end="", + flush=True, + ) + return (segment_key, result) + + rough_captions = {} + for s_id in retrieved_segments: + video_name = '_'.join(s_id.split('_')[:-1]) + index = s_id.split('_')[-1] + rough_captions[s_id] = video_segments._data[video_name][index]["content"] + results = await asyncio.gather( + *[_filter_single_segment(query, (s_id, rough_captions[s_id])) for s_id in rough_captions] + ) + remain_segments = [x[0] for x in results if 'yes' in x[1].lower()] + print(f"{len(remain_segments)} Video Segments remain after filtering") + if len(remain_segments) == 0: + print("Since no segments remain after filtering, we utilized all the retrieved segments.") + remain_segments = retrieved_segments + print(f"Remain segments {remain_segments}") + + # visual retrieval + keywords_for_caption = await _extract_keywords_query( + query, + query_param, + global_config, + ) + print(f"Keywords: {keywords_for_caption}") + caption_results = retrieved_segment_caption( + caption_model, + caption_tokenizer, + keywords_for_caption, + remain_segments, + video_path_db, + video_segments, + num_sampled_frames=global_config['fine_num_frames_per_segment'] + ) + + ## data table + text_units_section_list = [["video_name", "start_time", "end_time", "content"]] + for s_id in caption_results: + video_name = '_'.join(s_id.split('_')[:-1]) + index = s_id.split('_')[-1] + start_time = eval(video_segments._data[video_name][index]["time"].split('-')[0]) + end_time = eval(video_segments._data[video_name][index]["time"].split('-')[1]) + start_time = f"{start_time // 3600}:{(start_time % 3600) // 60}:{start_time % 60}" + end_time = f"{end_time // 3600}:{(end_time % 3600) // 60}:{end_time % 60}" + text_units_section_list.append([video_name, start_time, end_time, caption_results[s_id]]) + text_units_context = list_of_list_to_csv(text_units_section_list) + + retreived_video_context = f"\n-----Retrieved Knowledge From Videos-----\n```csv\n{text_units_context}\n```\n" + + if query_param.wo_reference: + sys_prompt_temp = PROMPTS["videorag_response_wo_reference"] + else: + sys_prompt_temp = PROMPTS["videorag_response"] + + sys_prompt = sys_prompt_temp.format( + video_data=retreived_video_context, + chunk_data=retreived_chunk_context, + response_type=query_param.response_type + ) + response = await use_model_func( + query, + system_prompt=sys_prompt, + ) + return response diff --git a/videorag/_splitter.py b/videorag/_splitter.py new file mode 100755 index 0000000..1054d17 --- /dev/null +++ b/videorag/_splitter.py @@ -0,0 +1,94 @@ +from typing import List, Optional, Union, Literal + +class SeparatorSplitter: + def __init__( + self, + separators: Optional[List[List[int]]] = None, + keep_separator: Union[bool, Literal["start", "end"]] = "end", + chunk_size: int = 4000, + chunk_overlap: int = 200, + length_function: callable = len, + ): + self._separators = separators or [] + self._keep_separator = keep_separator + self._chunk_size = chunk_size + self._chunk_overlap = chunk_overlap + self._length_function = length_function + + def split_tokens(self, tokens: List[int]) -> List[List[int]]: + splits = self._split_tokens_with_separators(tokens) + return self._merge_splits(splits) + + def _split_tokens_with_separators(self, tokens: List[int]) -> List[List[int]]: + splits = [] + current_split = [] + i = 0 + while i < len(tokens): + separator_found = False + for separator in self._separators: + if tokens[i:i+len(separator)] == separator: + if self._keep_separator in [True, "end"]: + current_split.extend(separator) + if current_split: + splits.append(current_split) + current_split = [] + if self._keep_separator == "start": + current_split.extend(separator) + i += len(separator) + separator_found = True + break + if not separator_found: + current_split.append(tokens[i]) + i += 1 + if current_split: + splits.append(current_split) + return [s for s in splits if s] + + def _merge_splits(self, splits: List[List[int]]) -> List[List[int]]: + if not splits: + return [] + + merged_splits = [] + current_chunk = [] + + for split in splits: + if not current_chunk: + current_chunk = split + elif self._length_function(current_chunk) + self._length_function(split) <= self._chunk_size: + current_chunk.extend(split) + else: + merged_splits.append(current_chunk) + current_chunk = split + + if current_chunk: + merged_splits.append(current_chunk) + + if len(merged_splits) == 1 and self._length_function(merged_splits[0]) > self._chunk_size: + return self._split_chunk(merged_splits[0]) + + if self._chunk_overlap > 0: + return self._enforce_overlap(merged_splits) + + return merged_splits + + def _split_chunk(self, chunk: List[int]) -> List[List[int]]: + result = [] + for i in range(0, len(chunk), self._chunk_size - self._chunk_overlap): + new_chunk = chunk[i:i + self._chunk_size] + if len(new_chunk) > self._chunk_overlap: # 只有当 chunk 长度大于 overlap 时才添加 + result.append(new_chunk) + return result + + def _enforce_overlap(self, chunks: List[List[int]]) -> List[List[int]]: + result = [] + for i, chunk in enumerate(chunks): + if i == 0: + result.append(chunk) + else: + overlap = chunks[i-1][-self._chunk_overlap:] + new_chunk = overlap + chunk + if self._length_function(new_chunk) > self._chunk_size: + new_chunk = new_chunk[:self._chunk_size] + result.append(new_chunk) + return result + diff --git a/videorag/_storage/__init__.py b/videorag/_storage/__init__.py new file mode 100755 index 0000000..99adae6 --- /dev/null +++ b/videorag/_storage/__init__.py @@ -0,0 +1,5 @@ +from .gdb_networkx import NetworkXStorage +from .gdb_neo4j import Neo4jStorage +from .vdb_hnswlib import HNSWVectorStorage +from .vdb_nanovectordb import NanoVectorDBStorage, NanoVectorDBVideoSegmentStorage +from .kv_json import JsonKVStorage diff --git a/videorag/_storage/__pycache__/__init__.cpython-311.pyc b/videorag/_storage/__pycache__/__init__.cpython-311.pyc new file mode 100755 index 0000000..d52f0fa Binary files /dev/null and b/videorag/_storage/__pycache__/__init__.cpython-311.pyc differ diff --git a/videorag/_storage/__pycache__/gdb_neo4j.cpython-311.pyc b/videorag/_storage/__pycache__/gdb_neo4j.cpython-311.pyc new file mode 100755 index 0000000..dc49ffe Binary files /dev/null and b/videorag/_storage/__pycache__/gdb_neo4j.cpython-311.pyc differ diff --git a/videorag/_storage/__pycache__/gdb_networkx.cpython-311.pyc b/videorag/_storage/__pycache__/gdb_networkx.cpython-311.pyc new file mode 100755 index 0000000..ac53056 Binary files /dev/null and b/videorag/_storage/__pycache__/gdb_networkx.cpython-311.pyc differ diff --git a/videorag/_storage/__pycache__/kv_json.cpython-311.pyc b/videorag/_storage/__pycache__/kv_json.cpython-311.pyc new file mode 100755 index 0000000..7ab1fd3 Binary files /dev/null and b/videorag/_storage/__pycache__/kv_json.cpython-311.pyc differ diff --git a/videorag/_storage/__pycache__/vdb_hnswlib.cpython-311.pyc b/videorag/_storage/__pycache__/vdb_hnswlib.cpython-311.pyc new file mode 100755 index 0000000..93534c6 Binary files /dev/null and b/videorag/_storage/__pycache__/vdb_hnswlib.cpython-311.pyc differ diff --git a/videorag/_storage/__pycache__/vdb_nanovectordb.cpython-311.pyc b/videorag/_storage/__pycache__/vdb_nanovectordb.cpython-311.pyc new file mode 100755 index 0000000..437f0a3 Binary files /dev/null and b/videorag/_storage/__pycache__/vdb_nanovectordb.cpython-311.pyc differ diff --git a/videorag/_storage/gdb_neo4j.py b/videorag/_storage/gdb_neo4j.py new file mode 100755 index 0000000..e45634a --- /dev/null +++ b/videorag/_storage/gdb_neo4j.py @@ -0,0 +1,330 @@ +import json +import asyncio +from collections import defaultdict +from neo4j import AsyncGraphDatabase +from dataclasses import dataclass +from typing import Union +from ..base import BaseGraphStorage, SingleCommunitySchema +from .._utils import logger +from ..prompt import GRAPH_FIELD_SEP + +neo4j_lock = asyncio.Lock() + + +def make_path_idable(path): + return path.replace(".", "_").replace("/", "__").replace("-", "_") + + +@dataclass +class Neo4jStorage(BaseGraphStorage): + def __post_init__(self): + self.neo4j_url = self.global_config["addon_params"].get("neo4j_url", None) + self.neo4j_auth = self.global_config["addon_params"].get("neo4j_auth", None) + self.namespace = ( + f"{make_path_idable(self.global_config['working_dir'])}__{self.namespace}" + ) + logger.info(f"Using the label {self.namespace} for Neo4j as identifier") + if self.neo4j_url is None or self.neo4j_auth is None: + raise ValueError("Missing neo4j_url or neo4j_auth in addon_params") + self.async_driver = AsyncGraphDatabase.driver( + self.neo4j_url, auth=self.neo4j_auth + ) + + # async def create_database(self): + # async with self.async_driver.session() as session: + # try: + # constraints = await session.run("SHOW CONSTRAINTS") + # # TODO I don't know why CREATE CONSTRAINT IF NOT EXISTS still trigger error + # # so have to check if the constrain exists + # constrain_exists = False + + # async for record in constraints: + # if ( + # self.namespace in record["labelsOrTypes"] + # and "id" in record["properties"] + # and record["type"] == "UNIQUENESS" + # ): + # constrain_exists = True + # break + # if not constrain_exists: + # await session.run( + # f"CREATE CONSTRAINT FOR (n:{self.namespace}) REQUIRE n.id IS UNIQUE" + # ) + # logger.info(f"Add constraint for namespace: {self.namespace}") + + # except Exception as e: + # logger.error(f"Error accessing or setting up the database: {str(e)}") + # raise + + async def _init_workspace(self): + await self.async_driver.verify_authentication() + await self.async_driver.verify_connectivity() + # TODOLater: create database if not exists always cause an error when async + # await self.create_database() + + async def index_start_callback(self): + logger.info("Init Neo4j workspace") + await self._init_workspace() + + async def has_node(self, node_id: str) -> bool: + async with self.async_driver.session() as session: + result = await session.run( + f"MATCH (n:{self.namespace}) WHERE n.id = $node_id RETURN COUNT(n) > 0 AS exists", + node_id=node_id, + ) + record = await result.single() + return record["exists"] if record else False + + async def has_edge(self, source_node_id: str, target_node_id: str) -> bool: + async with self.async_driver.session() as session: + result = await session.run( + f"MATCH (s:{self.namespace})-[r]->(t:{self.namespace}) " + "WHERE s.id = $source_id AND t.id = $target_id " + "RETURN COUNT(r) > 0 AS exists", + source_id=source_node_id, + target_id=target_node_id, + ) + record = await result.single() + return record["exists"] if record else False + + async def node_degree(self, node_id: str) -> int: + async with self.async_driver.session() as session: + result = await session.run( + f"MATCH (n:{self.namespace}) WHERE n.id = $node_id " + f"RETURN COUNT {{(n)-[]-(:{self.namespace})}} AS degree", + node_id=node_id, + ) + record = await result.single() + return record["degree"] if record else 0 + + async def edge_degree(self, src_id: str, tgt_id: str) -> int: + async with self.async_driver.session() as session: + result = await session.run( + f"MATCH (s:{self.namespace}), (t:{self.namespace}) " + "WHERE s.id = $src_id AND t.id = $tgt_id " + f"RETURN COUNT {{(s)-[]-(:{self.namespace})}} + COUNT {{(t)-[]-(:{self.namespace})}} AS degree", + src_id=src_id, + tgt_id=tgt_id, + ) + record = await result.single() + return record["degree"] if record else 0 + + async def get_node(self, node_id: str) -> Union[dict, None]: + async with self.async_driver.session() as session: + result = await session.run( + f"MATCH (n:{self.namespace}) WHERE n.id = $node_id RETURN properties(n) AS node_data", + node_id=node_id, + ) + record = await result.single() + raw_node_data = record["node_data"] if record else None + if raw_node_data is None: + return None + raw_node_data["clusters"] = json.dumps( + [ + { + "level": index, + "cluster": cluster_id, + } + for index, cluster_id in enumerate( + raw_node_data.get("communityIds", []) + ) + ] + ) + return raw_node_data + + async def get_edge( + self, source_node_id: str, target_node_id: str + ) -> Union[dict, None]: + async with self.async_driver.session() as session: + result = await session.run( + f"MATCH (s:{self.namespace})-[r]->(t:{self.namespace}) " + "WHERE s.id = $source_id AND t.id = $target_id " + "RETURN properties(r) AS edge_data", + source_id=source_node_id, + target_id=target_node_id, + ) + record = await result.single() + return record["edge_data"] if record else None + + async def get_node_edges( + self, source_node_id: str + ) -> Union[list[tuple[str, str]], None]: + async with self.async_driver.session() as session: + result = await session.run( + f"MATCH (s:{self.namespace})-[r]->(t:{self.namespace}) WHERE s.id = $source_id " + "RETURN s.id AS source, t.id AS target", + source_id=source_node_id, + ) + edges = [] + async for record in result: + edges.append((record["source"], record["target"])) + return edges + + async def upsert_node(self, node_id: str, node_data: dict[str, str]): + node_type = node_data.get("entity_type", "UNKNOWN").strip('"') + async with self.async_driver.session() as session: + await session.run( + f"MERGE (n:{self.namespace}:{node_type} {{id: $node_id}}) " + "SET n += $node_data", + node_id=node_id, + node_data=node_data, + ) + + async def upsert_edge( + self, source_node_id: str, target_node_id: str, edge_data: dict[str, str] + ): + edge_data.setdefault("weight", 0.0) + async with self.async_driver.session() as session: + await session.run( + f"MATCH (s:{self.namespace}), (t:{self.namespace}) " + "WHERE s.id = $source_id AND t.id = $target_id " + "MERGE (s)-[r:RELATED]->(t) " # Added relationship type 'RELATED' + "SET r += $edge_data", + source_id=source_node_id, + target_id=target_node_id, + edge_data=edge_data, + ) + + async def clustering(self, algorithm: str): + if algorithm != "leiden": + raise ValueError( + f"Clustering algorithm {algorithm} not supported in Neo4j implementation" + ) + + random_seed = self.global_config["graph_cluster_seed"] + max_level = self.global_config["max_graph_cluster_size"] + async with self.async_driver.session() as session: + try: + # Project the graph with undirected relationships + await session.run( + f""" + CALL gds.graph.project( + 'graph_{self.namespace}', + ['{self.namespace}'], + {{ + RELATED: {{ + orientation: 'UNDIRECTED', + properties: ['weight'] + }} + }} + ) + """ + ) + + # Run Leiden algorithm + result = await session.run( + f""" + CALL gds.leiden.write( + 'graph_{self.namespace}', + {{ + writeProperty: 'communityIds', + includeIntermediateCommunities: True, + relationshipWeightProperty: "weight", + maxLevels: {max_level}, + tolerance: 0.0001, + gamma: 1.0, + theta: 0.01, + randomSeed: {random_seed} + }} + ) + YIELD communityCount, modularities; + """ + ) + result = await result.single() + community_count: int = result["communityCount"] + modularities = result["modularities"] + logger.info( + f"Performed graph clustering with {community_count} communities and modularities {modularities}" + ) + finally: + # Drop the projected graph + await session.run(f"CALL gds.graph.drop('graph_{self.namespace}')") + + async def community_schema(self) -> dict[str, SingleCommunitySchema]: + results = defaultdict( + lambda: dict( + level=None, + title=None, + edges=set(), + nodes=set(), + chunk_ids=set(), + occurrence=0.0, + sub_communities=[], + ) + ) + + async with self.async_driver.session() as session: + # Fetch community data + result = await session.run( + f""" + MATCH (n:{self.namespace}) + WITH n, n.communityIds AS communityIds, [(n)-[]-(m:{self.namespace}) | m.id] AS connected_nodes + RETURN n.id AS node_id, n.source_id AS source_id, + communityIds AS cluster_key, + connected_nodes + """ + ) + + # records = await result.fetch() + + max_num_ids = 0 + async for record in result: + for index, c_id in enumerate(record["cluster_key"]): + node_id = str(record["node_id"]) + source_id = record["source_id"] + level = index + cluster_key = str(c_id) + connected_nodes = record["connected_nodes"] + + results[cluster_key]["level"] = level + results[cluster_key]["title"] = f"Cluster {cluster_key}" + results[cluster_key]["nodes"].add(node_id) + results[cluster_key]["edges"].update( + [ + tuple(sorted([node_id, str(connected)])) + for connected in connected_nodes + if connected != node_id + ] + ) + chunk_ids = source_id.split(GRAPH_FIELD_SEP) + results[cluster_key]["chunk_ids"].update(chunk_ids) + max_num_ids = max( + max_num_ids, len(results[cluster_key]["chunk_ids"]) + ) + + # Process results + for k, v in results.items(): + v["edges"] = [list(e) for e in v["edges"]] + v["nodes"] = list(v["nodes"]) + v["chunk_ids"] = list(v["chunk_ids"]) + v["occurrence"] = len(v["chunk_ids"]) / max_num_ids + + # Compute sub-communities (this is a simplified approach) + for cluster in results.values(): + cluster["sub_communities"] = [ + sub_key + for sub_key, sub_cluster in results.items() + if sub_cluster["level"] > cluster["level"] + and set(sub_cluster["nodes"]).issubset(set(cluster["nodes"])) + ] + + return dict(results) + + async def index_done_callback(self): + await self.async_driver.close() + + async def _debug_delete_all_node_edges(self): + async with self.async_driver.session() as session: + try: + # Delete all relationships in the namespace + await session.run(f"MATCH (n:{self.namespace})-[r]-() DELETE r") + + # Delete all nodes in the namespace + await session.run(f"MATCH (n:{self.namespace}) DELETE n") + + logger.info( + f"All nodes and edges in namespace '{self.namespace}' have been deleted." + ) + except Exception as e: + logger.error(f"Error deleting nodes and edges: {str(e)}") + raise diff --git a/videorag/_storage/gdb_networkx.py b/videorag/_storage/gdb_networkx.py new file mode 100755 index 0000000..e29bf3e --- /dev/null +++ b/videorag/_storage/gdb_networkx.py @@ -0,0 +1,238 @@ +import html +import json +import os +from collections import defaultdict +from dataclasses import dataclass +from typing import Any, Union, cast +import networkx as nx +import numpy as np + +from .._utils import logger +from ..base import ( + BaseGraphStorage, + SingleCommunitySchema, +) +from ..prompt import GRAPH_FIELD_SEP + + +@dataclass +class NetworkXStorage(BaseGraphStorage): + @staticmethod + def load_nx_graph(file_name) -> nx.Graph: + if os.path.exists(file_name): + return nx.read_graphml(file_name) + return None + + @staticmethod + def write_nx_graph(graph: nx.Graph, file_name): + logger.info( + f"Writing graph with {graph.number_of_nodes()} nodes, {graph.number_of_edges()} edges" + ) + nx.write_graphml(graph, file_name) + + @staticmethod + def stable_largest_connected_component(graph: nx.Graph) -> nx.Graph: + """Refer to https://github.com/microsoft/graphrag/index/graph/utils/stable_lcc.py + Return the largest connected component of the graph, with nodes and edges sorted in a stable way. + """ + from graspologic.utils import largest_connected_component + + graph = graph.copy() + graph = cast(nx.Graph, largest_connected_component(graph)) + node_mapping = {node: html.unescape(node.upper().strip()) for node in graph.nodes()} # type: ignore + graph = nx.relabel_nodes(graph, node_mapping) + return NetworkXStorage._stabilize_graph(graph) + + @staticmethod + def _stabilize_graph(graph: nx.Graph) -> nx.Graph: + """Refer to https://github.com/microsoft/graphrag/index/graph/utils/stable_lcc.py + Ensure an undirected graph with the same relationships will always be read the same way. + """ + fixed_graph = nx.DiGraph() if graph.is_directed() else nx.Graph() + + sorted_nodes = graph.nodes(data=True) + sorted_nodes = sorted(sorted_nodes, key=lambda x: x[0]) + + fixed_graph.add_nodes_from(sorted_nodes) + edges = list(graph.edges(data=True)) + + if not graph.is_directed(): + + def _sort_source_target(edge): + source, target, edge_data = edge + if source > target: + temp = source + source = target + target = temp + return source, target, edge_data + + edges = [_sort_source_target(edge) for edge in edges] + + def _get_edge_key(source: Any, target: Any) -> str: + return f"{source} -> {target}" + + edges = sorted(edges, key=lambda x: _get_edge_key(x[0], x[1])) + + fixed_graph.add_edges_from(edges) + return fixed_graph + + def __post_init__(self): + self._graphml_xml_file = os.path.join( + self.global_config["working_dir"], f"graph_{self.namespace}.graphml" + ) + preloaded_graph = NetworkXStorage.load_nx_graph(self._graphml_xml_file) + if preloaded_graph is not None: + logger.info( + f"Loaded graph from {self._graphml_xml_file} with {preloaded_graph.number_of_nodes()} nodes, {preloaded_graph.number_of_edges()} edges" + ) + self._graph = preloaded_graph or nx.Graph() + self._clustering_algorithms = { + "leiden": self._leiden_clustering, + } + self._node_embed_algorithms = { + "node2vec": self._node2vec_embed, + } + + async def index_done_callback(self): + NetworkXStorage.write_nx_graph(self._graph, self._graphml_xml_file) + + async def has_node(self, node_id: str) -> bool: + return self._graph.has_node(node_id) + + async def has_edge(self, source_node_id: str, target_node_id: str) -> bool: + return self._graph.has_edge(source_node_id, target_node_id) + + async def get_node(self, node_id: str) -> Union[dict, None]: + return self._graph.nodes.get(node_id) + + async def node_degree(self, node_id: str) -> int: + # [numberchiffre]: node_id not part of graph returns `DegreeView({})` instead of 0 + return self._graph.degree(node_id) if self._graph.has_node(node_id) else 0 + + async def edge_degree(self, src_id: str, tgt_id: str) -> int: + return (self._graph.degree(src_id) if self._graph.has_node(src_id) else 0) + ( + self._graph.degree(tgt_id) if self._graph.has_node(tgt_id) else 0 + ) + + async def get_edge( + self, source_node_id: str, target_node_id: str + ) -> Union[dict, None]: + return self._graph.edges.get((source_node_id, target_node_id)) + + async def get_node_edges(self, source_node_id: str): + if self._graph.has_node(source_node_id): + return list(self._graph.edges(source_node_id)) + return None + + async def upsert_node(self, node_id: str, node_data: dict[str, str]): + self._graph.add_node(node_id, **node_data) + + async def upsert_edge( + self, source_node_id: str, target_node_id: str, edge_data: dict[str, str] + ): + self._graph.add_edge(source_node_id, target_node_id, **edge_data) + + async def clustering(self, algorithm: str): + if algorithm not in self._clustering_algorithms: + raise ValueError(f"Clustering algorithm {algorithm} not supported") + await self._clustering_algorithms[algorithm]() + + async def community_schema(self) -> dict[str, SingleCommunitySchema]: + results = defaultdict( + lambda: dict( + level=None, + title=None, + edges=set(), + nodes=set(), + chunk_ids=set(), + occurrence=0.0, + sub_communities=[], + ) + ) + max_num_ids = 0 + levels = defaultdict(set) + for node_id, node_data in self._graph.nodes(data=True): + if "clusters" not in node_data: + continue + clusters = json.loads(node_data["clusters"]) + this_node_edges = self._graph.edges(node_id) + + for cluster in clusters: + level = cluster["level"] + cluster_key = str(cluster["cluster"]) + levels[level].add(cluster_key) + results[cluster_key]["level"] = level + results[cluster_key]["title"] = f"Cluster {cluster_key}" + results[cluster_key]["nodes"].add(node_id) + results[cluster_key]["edges"].update( + [tuple(sorted(e)) for e in this_node_edges] + ) + results[cluster_key]["chunk_ids"].update( + node_data["source_id"].split(GRAPH_FIELD_SEP) + ) + max_num_ids = max(max_num_ids, len(results[cluster_key]["chunk_ids"])) + + ordered_levels = sorted(levels.keys()) + for i, curr_level in enumerate(ordered_levels[:-1]): + next_level = ordered_levels[i + 1] + this_level_comms = levels[curr_level] + next_level_comms = levels[next_level] + # compute the sub-communities by nodes intersection + for comm in this_level_comms: + results[comm]["sub_communities"] = [ + c + for c in next_level_comms + if results[c]["nodes"].issubset(results[comm]["nodes"]) + ] + + for k, v in results.items(): + v["edges"] = list(v["edges"]) + v["edges"] = [list(e) for e in v["edges"]] + v["nodes"] = list(v["nodes"]) + v["chunk_ids"] = list(v["chunk_ids"]) + v["occurrence"] = len(v["chunk_ids"]) / max_num_ids + return dict(results) + + def _cluster_data_to_subgraphs(self, cluster_data: dict[str, list[dict[str, str]]]): + for node_id, clusters in cluster_data.items(): + self._graph.nodes[node_id]["clusters"] = json.dumps(clusters) + + async def _leiden_clustering(self): + from graspologic.partition import hierarchical_leiden + + graph = NetworkXStorage.stable_largest_connected_component(self._graph) + community_mapping = hierarchical_leiden( + graph, + max_cluster_size=self.global_config["max_graph_cluster_size"], + random_seed=self.global_config["graph_cluster_seed"], + ) + + node_communities: dict[str, list[dict[str, str]]] = defaultdict(list) + __levels = defaultdict(set) + for partition in community_mapping: + level_key = partition.level + cluster_id = partition.cluster + node_communities[partition.node].append( + {"level": level_key, "cluster": cluster_id} + ) + __levels[level_key].add(cluster_id) + node_communities = dict(node_communities) + __levels = {k: len(v) for k, v in __levels.items()} + logger.info(f"Each level has communities: {dict(__levels)}") + self._cluster_data_to_subgraphs(node_communities) + + async def embed_nodes(self, algorithm: str) -> tuple[np.ndarray, list[str]]: + if algorithm not in self._node_embed_algorithms: + raise ValueError(f"Node embedding algorithm {algorithm} not supported") + return await self._node_embed_algorithms[algorithm]() + + async def _node2vec_embed(self): + from graspologic import embed + + embeddings, nodes = embed.node2vec_embed( + self._graph, + **self.global_config["node2vec_params"], + ) + + nodes_ids = [self._graph.nodes[node_id]["id"] for node_id in nodes] + return embeddings, nodes_ids diff --git a/videorag/_storage/kv_json.py b/videorag/_storage/kv_json.py new file mode 100755 index 0000000..b802f26 --- /dev/null +++ b/videorag/_storage/kv_json.py @@ -0,0 +1,46 @@ +import os +from dataclasses import dataclass + +from .._utils import load_json, logger, write_json +from ..base import ( + BaseKVStorage, +) + + +@dataclass +class JsonKVStorage(BaseKVStorage): + def __post_init__(self): + working_dir = self.global_config["working_dir"] + self._file_name = os.path.join(working_dir, f"kv_store_{self.namespace}.json") + self._data = load_json(self._file_name) or {} + logger.info(f"Load KV {self.namespace} with {len(self._data)} data") + + async def all_keys(self) -> list[str]: + return list(self._data.keys()) + + async def index_done_callback(self): + write_json(self._data, self._file_name) + + async def get_by_id(self, id): + return self._data.get(id, None) + + async def get_by_ids(self, ids, fields=None): + if fields is None: + return [self._data.get(id, None) for id in ids] + return [ + ( + {k: v for k, v in self._data[id].items() if k in fields} + if self._data.get(id, None) + else None + ) + for id in ids + ] + + async def filter_keys(self, data: list[str]) -> set[str]: + return set([s for s in data if s not in self._data]) + + async def upsert(self, data: dict[str, dict]): + self._data.update(data) + + async def drop(self): + self._data = {} diff --git a/videorag/_storage/vdb_hnswlib.py b/videorag/_storage/vdb_hnswlib.py new file mode 100755 index 0000000..3e98c95 --- /dev/null +++ b/videorag/_storage/vdb_hnswlib.py @@ -0,0 +1,141 @@ +import asyncio +import os +from dataclasses import dataclass, field +from typing import Any +import pickle +import hnswlib +import numpy as np +import xxhash + +from .._utils import logger +from ..base import BaseVectorStorage + + +@dataclass +class HNSWVectorStorage(BaseVectorStorage): + ef_construction: int = 100 + M: int = 16 + max_elements: int = 1000000 + ef_search: int = 50 + num_threads: int = -1 + _index: Any = field(init=False) + _metadata: dict[str, dict] = field(default_factory=dict) + _current_elements: int = 0 + + def __post_init__(self): + self._index_file_name = os.path.join( + self.global_config["working_dir"], f"{self.namespace}_hnsw.index" + ) + self._metadata_file_name = os.path.join( + self.global_config["working_dir"], f"{self.namespace}_hnsw_metadata.pkl" + ) + self._embedding_batch_num = self.global_config.get("embedding_batch_num", 100) + + hnsw_params = self.global_config.get("vector_db_storage_cls_kwargs", {}) + self.ef_construction = hnsw_params.get("ef_construction", self.ef_construction) + self.M = hnsw_params.get("M", self.M) + self.max_elements = hnsw_params.get("max_elements", self.max_elements) + self.ef_search = hnsw_params.get("ef_search", self.ef_search) + self.num_threads = hnsw_params.get("num_threads", self.num_threads) + self._index = hnswlib.Index( + space="cosine", dim=self.embedding_func.embedding_dim + ) + + if os.path.exists(self._index_file_name) and os.path.exists( + self._metadata_file_name + ): + self._index.load_index( + self._index_file_name, max_elements=self.max_elements + ) + with open(self._metadata_file_name, "rb") as f: + self._metadata, self._current_elements = pickle.load(f) + logger.info( + f"Loaded existing index for {self.namespace} with {self._current_elements} elements" + ) + else: + self._index.init_index( + max_elements=self.max_elements, + ef_construction=self.ef_construction, + M=self.M, + ) + self._index.set_ef(self.ef_search) + self._metadata = {} + self._current_elements = 0 + logger.info(f"Created new index for {self.namespace}") + + async def upsert(self, data: dict[str, dict]) -> np.ndarray: + logger.info(f"Inserting {len(data)} vectors to {self.namespace}") + if not data: + logger.warning("You insert an empty data to vector DB") + return [] + + if self._current_elements + len(data) > self.max_elements: + raise ValueError( + f"Cannot insert {len(data)} elements. Current: {self._current_elements}, Max: {self.max_elements}" + ) + + list_data = [ + { + "id": k, + **{k1: v1 for k1, v1 in v.items() if k1 in self.meta_fields}, + } + for k, v in data.items() + ] + contents = [v["content"] for v in data.values()] + batch_size = min(self._embedding_batch_num, len(contents)) + embeddings = np.concatenate( + await asyncio.gather( + *[ + self.embedding_func(contents[i : i + batch_size]) + for i in range(0, len(contents), batch_size) + ] + ) + ) + + ids = np.fromiter( + (xxhash.xxh32_intdigest(d["id"].encode()) for d in list_data), + dtype=np.uint32, + count=len(list_data), + ) + self._metadata.update( + { + id_int: { + k: v for k, v in d.items() if k in self.meta_fields or k == "id" + } + for id_int, d in zip(ids, list_data) + } + ) + self._index.add_items(data=embeddings, ids=ids, num_threads=self.num_threads) + self._current_elements = self._index.get_current_count() + return ids + + async def query(self, query: str, top_k: int = 5) -> list[dict]: + if self._current_elements == 0: + return [] + + top_k = min(top_k, self._current_elements) + + if top_k > self.ef_search: + logger.warning( + f"Setting ef_search to {top_k} because top_k is larger than ef_search" + ) + self._index.set_ef(top_k) + + embedding = await self.embedding_func([query]) + labels, distances = self._index.knn_query( + data=embedding[0], k=top_k, num_threads=self.num_threads + ) + + return [ + { + **self._metadata.get(label, {}), + "distance": distance, + "similarity": 1 - distance, + } + for label, distance in zip(labels[0], distances[0]) + ] + + async def index_done_callback(self): + self._index.save_index(self._index_file_name) + with open(self._metadata_file_name, "wb") as f: + pickle.dump((self._metadata, self._current_elements), f) diff --git a/videorag/_storage/vdb_nanovectordb.py b/videorag/_storage/vdb_nanovectordb.py new file mode 100755 index 0000000..ffeeaae --- /dev/null +++ b/videorag/_storage/vdb_nanovectordb.py @@ -0,0 +1,145 @@ +import asyncio +import os +import torch +from dataclasses import dataclass +import numpy as np +from nano_vectordb import NanoVectorDB +from tqdm import tqdm +from imagebind.models import imagebind_model + +from .._utils import logger +from ..base import BaseVectorStorage +from .._videoutil import encode_video_segments, encode_string_query + + +@dataclass +class NanoVectorDBStorage(BaseVectorStorage): + cosine_better_than_threshold: float = 0.2 + + def __post_init__(self): + + self._client_file_name = os.path.join( + self.global_config["working_dir"], f"vdb_{self.namespace}.json" + ) + self._max_batch_size = self.global_config["embedding_batch_num"] + self._client = NanoVectorDB( + self.embedding_func.embedding_dim, storage_file=self._client_file_name + ) + self.cosine_better_than_threshold = self.global_config.get( + "query_better_than_threshold", self.cosine_better_than_threshold + ) + + async def upsert(self, data: dict[str, dict]): + logger.info(f"Inserting {len(data)} vectors to {self.namespace}") + if not len(data): + logger.warning("You insert an empty data to vector DB") + return [] + list_data = [ + { + "__id__": k, + **{k1: v1 for k1, v1 in v.items() if k1 in self.meta_fields}, + } + for k, v in data.items() + ] + contents = [v["content"] for v in data.values()] + batches = [ + contents[i : i + self._max_batch_size] + for i in range(0, len(contents), self._max_batch_size) + ] + embeddings_list = await asyncio.gather( + *[self.embedding_func(batch) for batch in batches] + ) + embeddings = np.concatenate(embeddings_list) + for i, d in enumerate(list_data): + d["__vector__"] = embeddings[i] + results = self._client.upsert(datas=list_data) + return results + + async def query(self, query: str, top_k=5): + embedding = await self.embedding_func([query]) + embedding = embedding[0] + results = self._client.query( + query=embedding, + top_k=top_k, + better_than_threshold=self.cosine_better_than_threshold, + ) + results = [ + {**dp, "id": dp["__id__"], "distance": dp["__metrics__"]} for dp in results + ] + return results + + async def index_done_callback(self): + self._client.save() + + +@dataclass +class NanoVectorDBVideoSegmentStorage(BaseVectorStorage): + embedding_func = None + segment_retrieval_top_k: float = 2 + + def __post_init__(self): + + self._client_file_name = os.path.join( + self.global_config["working_dir"], f"vdb_{self.namespace}.json" + ) + self._max_batch_size = self.global_config["video_embedding_batch_num"] + self._client = NanoVectorDB( + self.global_config["video_embedding_dim"], storage_file=self._client_file_name + ) + self.top_k = self.global_config.get( + "segment_retrieval_top_k", self.segment_retrieval_top_k + ) + + async def upsert(self, video_name, segment_index2name, video_output_format): + embedder = imagebind_model.imagebind_huge(pretrained=True).cuda() + embedder.eval() + + logger.info(f"Inserting {len(segment_index2name)} segments to {self.namespace}") + if not len(segment_index2name): + logger.warning("You insert an empty data to vector DB") + return [] + list_data, video_paths = [], [] + cache_path = os.path.join(self.global_config["working_dir"], '_cache', video_name) + index_list = list(segment_index2name.keys()) + for index in index_list: + list_data.append({ + "__id__": f"{video_name}_{index}", + "__video_name__": video_name, + "__index__": index, + }) + segment_name = segment_index2name[index] + video_file = os.path.join(cache_path, f"{segment_name}.{video_output_format}") + video_paths.append(video_file) + batches = [ + video_paths[i: i + self._max_batch_size] + for i in range(0, len(video_paths), self._max_batch_size) + ] + embeddings = [] + for _batch in tqdm(batches, desc=f"Encoding Video Segments {video_name}"): + batch_embeddings = encode_video_segments(_batch, embedder) + embeddings.append(batch_embeddings) + embeddings = torch.concat(embeddings, dim=0) + embeddings = embeddings.numpy() + for i, d in enumerate(list_data): + d["__vector__"] = embeddings[i] + results = self._client.upsert(datas=list_data) + return results + + async def query(self, query: str): + embedder = imagebind_model.imagebind_huge(pretrained=True).cuda() + embedder.eval() + + embedding = encode_string_query(query, embedder) + embedding = embedding[0] + results = self._client.query( + query=embedding, + top_k=self.top_k, + better_than_threshold=-1, + ) + results = [ + {**dp, "id": dp["__id__"], "distance": dp["__metrics__"]} for dp in results + ] + return results + + async def index_done_callback(self): + self._client.save() \ No newline at end of file diff --git a/videorag/_utils.py b/videorag/_utils.py new file mode 100755 index 0000000..5185959 --- /dev/null +++ b/videorag/_utils.py @@ -0,0 +1,193 @@ +import asyncio +import html +import json +import logging +import os +import re +import numbers +from dataclasses import dataclass +from functools import wraps +from hashlib import md5 +from typing import Any, Union + +import numpy as np +import tiktoken + +logger = logging.getLogger("nano-graphrag") +ENCODER = None + + +def always_get_an_event_loop() -> asyncio.AbstractEventLoop: + try: + # If there is already an event loop, use it. + loop = asyncio.get_event_loop() + except RuntimeError: + # If in a sub-thread, create a new event loop. + logger.info("Creating a new event loop in a sub-thread.") + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + return loop + + +def locate_json_string_body_from_string(content: str) -> Union[str, None]: + """Locate the JSON string body from a string""" + maybe_json_str = re.search(r"{.*}", content, re.DOTALL) + if maybe_json_str is not None: + return maybe_json_str.group(0) + else: + return None + + +def convert_response_to_json(response: str) -> dict: + json_str = locate_json_string_body_from_string(response) + assert json_str is not None, f"Unable to parse JSON from response: {response}" + try: + data = json.loads(json_str) + return data + except json.JSONDecodeError as e: + logger.error(f"Failed to parse JSON: {json_str}") + raise e from None + + +def encode_string_by_tiktoken(content: str, model_name: str = "gpt-4o"): + global ENCODER + if ENCODER is None: + ENCODER = tiktoken.encoding_for_model(model_name) + tokens = ENCODER.encode(content) + return tokens + + +def decode_tokens_by_tiktoken(tokens: list[int], model_name: str = "gpt-4o"): + global ENCODER + if ENCODER is None: + ENCODER = tiktoken.encoding_for_model(model_name) + content = ENCODER.decode(tokens) + return content + + +def truncate_list_by_token_size(list_data: list, key: callable, max_token_size: int): + """Truncate a list of data by token size""" + if max_token_size <= 0: + return [] + tokens = 0 + for i, data in enumerate(list_data): + tokens += len(encode_string_by_tiktoken(key(data))) + if tokens > max_token_size: + return list_data[:i] + return list_data + + +def compute_mdhash_id(content, prefix: str = ""): + return prefix + md5(content.encode()).hexdigest() + + +def write_json(json_obj, file_name): + with open(file_name, "w", encoding="utf-8") as f: + json.dump(json_obj, f, indent=2, ensure_ascii=False) + + +def load_json(file_name): + if not os.path.exists(file_name): + return None + with open(file_name, encoding="utf-8") as f: + return json.load(f) + + +# it's dirty to type, so it's a good way to have fun +def pack_user_ass_to_openai_messages(*args: str): + roles = ["user", "assistant"] + return [ + {"role": roles[i % 2], "content": content} for i, content in enumerate(args) + ] + + +def is_float_regex(value): + return bool(re.match(r"^[-+]?[0-9]*\.?[0-9]+$", value)) + + +def compute_args_hash(*args): + return md5(str(args).encode()).hexdigest() + + +def split_string_by_multi_markers(content: str, markers: list[str]) -> list[str]: + """Split a string by multiple markers""" + if not markers: + return [content] + results = re.split("|".join(re.escape(marker) for marker in markers), content) + return [r.strip() for r in results if r.strip()] + + +def enclose_string_with_quotes(content: Any) -> str: + """Enclose a string with quotes""" + if isinstance(content, numbers.Number): + return str(content) + content = str(content) + content = content.strip().strip("'").strip('"') + return f'"{content}"' + + +def list_of_list_to_csv(data: list[list]): + return "\n".join( + [ + ",\t".join([f"{enclose_string_with_quotes(data_dd)}" for data_dd in data_d]) + for data_d in data + ] + ) + + +# ----------------------------------------------------------------------------------- +# Refer the utils functions of the official GraphRAG implementation: +# https://github.com/microsoft/graphrag +def clean_str(input: Any) -> str: + """Clean an input string by removing HTML escapes, control characters, and other unwanted characters.""" + # If we get non-string input, just give it back + if not isinstance(input, str): + return input + + result = html.unescape(input.strip()) + # https://stackoverflow.com/questions/4324790/removing-control-characters-from-a-string-in-python + return re.sub(r"[\x00-\x1f\x7f-\x9f]", "", result) + + +# Utils types ----------------------------------------------------------------------- +@dataclass +class EmbeddingFunc: + embedding_dim: int + max_token_size: int + func: callable + + async def __call__(self, *args, **kwargs) -> np.ndarray: + return await self.func(*args, **kwargs) + + +# Decorators ------------------------------------------------------------------------ +def limit_async_func_call(max_size: int, waitting_time: float = 0.0001): + """Add restriction of maximum async calling times for a async func""" + + def final_decro(func): + """Not using async.Semaphore to aovid use nest-asyncio""" + __current_size = 0 + + @wraps(func) + async def wait_func(*args, **kwargs): + nonlocal __current_size + while __current_size >= max_size: + await asyncio.sleep(waitting_time) + __current_size += 1 + result = await func(*args, **kwargs) + __current_size -= 1 + return result + + return wait_func + + return final_decro + + +def wrap_embedding_func_with_attrs(**kwargs): + """Wrap a function with attributes""" + + def final_decro(func) -> EmbeddingFunc: + new_func = EmbeddingFunc(**kwargs, func=func) + return new_func + + return final_decro diff --git a/videorag/_videoutil/__init__.py b/videorag/_videoutil/__init__.py new file mode 100755 index 0000000..1acc956 --- /dev/null +++ b/videorag/_videoutil/__init__.py @@ -0,0 +1,4 @@ +from .split import split_video, saving_video_segments +from .asr import speech_to_text +from .caption import segment_caption, merge_segment_information, retrieved_segment_caption +from .feature import encode_video_segments, encode_string_query \ No newline at end of file diff --git a/videorag/_videoutil/__pycache__/__init__.cpython-311.pyc b/videorag/_videoutil/__pycache__/__init__.cpython-311.pyc new file mode 100755 index 0000000..8abfe6f Binary files /dev/null and b/videorag/_videoutil/__pycache__/__init__.cpython-311.pyc differ diff --git a/videorag/_videoutil/__pycache__/asr.cpython-311.pyc b/videorag/_videoutil/__pycache__/asr.cpython-311.pyc new file mode 100755 index 0000000..1d3ea05 Binary files /dev/null and b/videorag/_videoutil/__pycache__/asr.cpython-311.pyc differ diff --git a/videorag/_videoutil/__pycache__/caption.cpython-311.pyc b/videorag/_videoutil/__pycache__/caption.cpython-311.pyc new file mode 100755 index 0000000..17466c8 Binary files /dev/null and b/videorag/_videoutil/__pycache__/caption.cpython-311.pyc differ diff --git a/videorag/_videoutil/__pycache__/feature.cpython-311.pyc b/videorag/_videoutil/__pycache__/feature.cpython-311.pyc new file mode 100755 index 0000000..06ce972 Binary files /dev/null and b/videorag/_videoutil/__pycache__/feature.cpython-311.pyc differ diff --git a/videorag/_videoutil/__pycache__/split.cpython-311.pyc b/videorag/_videoutil/__pycache__/split.cpython-311.pyc new file mode 100755 index 0000000..8270728 Binary files /dev/null and b/videorag/_videoutil/__pycache__/split.cpython-311.pyc differ diff --git a/videorag/_videoutil/asr.py b/videorag/_videoutil/asr.py new file mode 100755 index 0000000..820bf1d --- /dev/null +++ b/videorag/_videoutil/asr.py @@ -0,0 +1,24 @@ +import os +import torch +import logging +from tqdm import tqdm +from faster_whisper import WhisperModel +from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline + +def speech_to_text(video_name, working_dir, segment_index2name, audio_output_format): + model = WhisperModel("./faster-distil-whisper-large-v3") + model.logger.setLevel(logging.WARNING) + + cache_path = os.path.join(working_dir, '_cache', video_name) + + transcripts = {} + for index in tqdm(segment_index2name, desc=f"Speech Recognition {video_name}"): + segment_name = segment_index2name[index] + audio_file = os.path.join(cache_path, f"{segment_name}.{audio_output_format}") + segments, info = model.transcribe(audio_file) + result = "" + for segment in segments: + result += "[%.2fs -> %.2fs] %s\n" % (segment.start, segment.end, segment.text) + transcripts[index] = result + + return transcripts \ No newline at end of file diff --git a/videorag/_videoutil/caption.py b/videorag/_videoutil/caption.py new file mode 100755 index 0000000..660b093 --- /dev/null +++ b/videorag/_videoutil/caption.py @@ -0,0 +1,88 @@ +import os +import torch +import numpy as np +from PIL import Image +from tqdm import tqdm +from transformers import AutoModel, AutoTokenizer +from moviepy.video.io.VideoFileClip import VideoFileClip + +def encode_video(video, frame_times): + frames = [] + for t in frame_times: + frames.append(video.get_frame(t)) + frames = np.stack(frames, axis=0) + frames = [Image.fromarray(v.astype('uint8')).resize((1280, 720)) for v in frames] + return frames + +def segment_caption(video_name, video_path, segment_index2name, transcripts, segment_times_info, caption_result, error_queue): + try: + model = AutoModel.from_pretrained('./MiniCPM-V-2_6-int4', trust_remote_code=True) + tokenizer = AutoTokenizer.from_pretrained('./MiniCPM-V-2_6-int4', trust_remote_code=True) + model.eval() + + with VideoFileClip(video_path) as video: + for index in tqdm(segment_index2name, desc=f"Captioning Video {video_name}"): + frame_times = segment_times_info[index]["frame_times"] + video_frames = encode_video(video, frame_times) + segment_transcript = transcripts[index] + query = f"The transcript of the current video:\n{segment_transcript}.\nNow provide a description (caption) of the video in English." + msgs = [{'role': 'user', 'content': video_frames + [query]}] + params = {} + params["use_image_id"] = False + params["max_slice_nums"] = 2 + segment_caption = model.chat( + image=None, + msgs=msgs, + tokenizer=tokenizer, + **params + ) + caption_result[index] = segment_caption.replace("\n", "").replace("<|endoftext|>", "") + torch.cuda.empty_cache() + except Exception as e: + error_queue.put(f"Error in segment_caption:\n {str(e)}") + raise RuntimeError + +def merge_segment_information(segment_index2name, segment_times_info, transcripts, captions): + inserting_segments = {} + for index in segment_index2name: + inserting_segments[index] = {"content": None, "time": None} + segment_name = segment_index2name[index] + inserting_segments[index]["time"] = '-'.join(segment_name.split('-')[-2:]) + inserting_segments[index]["content"] = f"Caption:\n{captions[index]}\nTranscript:\n{transcripts[index]}\n\n" + inserting_segments[index]["transcript"] = transcripts[index] + inserting_segments[index]["frame_times"] = segment_times_info[index]["frame_times"].tolist() + return inserting_segments + +def retrieved_segment_caption(caption_model, caption_tokenizer, refine_knowledge, retrieved_segments, video_path_db, video_segments, num_sampled_frames): + # model = AutoModel.from_pretrained('./MiniCPM-V-2_6-int4', trust_remote_code=True) + # tokenizer = AutoTokenizer.from_pretrained('./MiniCPM-V-2_6-int4', trust_remote_code=True) + # model.eval() + + caption_result = {} + for this_segment in tqdm(retrieved_segments, desc='Captioning Segments for Given Query'): + video_name = '_'.join(this_segment.split('_')[:-1]) + index = this_segment.split('_')[-1] + video_path = video_path_db._data[video_name] + timestamp = video_segments._data[video_name][index]["time"].split('-') + start, end = eval(timestamp[0]), eval(timestamp[1]) + video = VideoFileClip(video_path) + frame_times = np.linspace(start, end, num_sampled_frames, endpoint=False) + video_frames = encode_video(video, frame_times) + segment_transcript = video_segments._data[video_name][index]["transcript"] + # query = f"The transcript of the current video:\n{segment_transcript}.\nGiven a question: {query}, you have to extract relevant information from the video and transcript for answering the question." + query = f"The transcript of the current video:\n{segment_transcript}.\nNow provide a very detailed description (caption) of the video in English and extract relevant information about: {refine_knowledge}'" + msgs = [{'role': 'user', 'content': video_frames + [query]}] + params = {} + params["use_image_id"] = False + params["max_slice_nums"] = 2 + segment_caption = caption_model.chat( + image=None, + msgs=msgs, + tokenizer=caption_tokenizer, + **params + ) + this_caption = segment_caption.replace("\n", "").replace("<|endoftext|>", "") + caption_result[this_segment] = f"Caption:\n{this_caption}\nTranscript:\n{segment_transcript}\n\n" + torch.cuda.empty_cache() + + return caption_result \ No newline at end of file diff --git a/videorag/_videoutil/feature.py b/videorag/_videoutil/feature.py new file mode 100755 index 0000000..4d1da69 --- /dev/null +++ b/videorag/_videoutil/feature.py @@ -0,0 +1,28 @@ +import os +import torch +import pickle +from tqdm import tqdm +from imagebind import data +from imagebind.models import imagebind_model +from imagebind.models.imagebind_model import ImageBindModel, ModalityType + + +def encode_video_segments(video_paths, embedder: ImageBindModel): + device = next(embedder.parameters()).device + inputs = { + ModalityType.VISION: data.load_and_transform_video_data(video_paths, device), + } + with torch.no_grad(): + embeddings = embedder(inputs)[ModalityType.VISION] + embeddings = embeddings.cpu() + return embeddings + +def encode_string_query(query:str, embedder: ImageBindModel): + device = next(embedder.parameters()).device + inputs = { + ModalityType.TEXT: data.load_and_transform_text([query], device), + } + with torch.no_grad(): + embeddings = embedder(inputs)[ModalityType.TEXT] + embeddings = embeddings.cpu() + return embeddings \ No newline at end of file diff --git a/videorag/_videoutil/split.py b/videorag/_videoutil/split.py new file mode 100755 index 0000000..e0c1252 --- /dev/null +++ b/videorag/_videoutil/split.py @@ -0,0 +1,76 @@ +import os +import time +import shutil +import numpy as np +from tqdm import tqdm +from moviepy.video import fx as vfx +from moviepy.video.io.VideoFileClip import VideoFileClip + +def split_video( + video_path, + working_dir, + segment_length, + num_frames_per_segment, + audio_output_format='mp3', +): + unique_timestamp = str(int(time.time() * 1000)) + video_name = os.path.basename(video_path).split('.')[0] + video_segment_cache_path = os.path.join(working_dir, '_cache', video_name) + if os.path.exists(video_segment_cache_path): + shutil.rmtree(video_segment_cache_path) + os.makedirs(video_segment_cache_path, exist_ok=False) + + segment_index = 0 + segment_index2name, segment_times_info = {}, {} + with VideoFileClip(video_path) as video: + + total_video_length = int(video.duration) + start_times = list(range(0, total_video_length, segment_length)) + # if the last segment is shorter than 5 seconds, we merged it to the last segment + if len(start_times) > 1 and (total_video_length - start_times[-1]) < 5: + start_times = start_times[:-1] + + for start in tqdm(start_times, desc=f"Spliting Video {video_name}"): + if start != start_times[-1]: + end = min(start + segment_length, total_video_length) + else: + end = total_video_length + + subvideo = video.subclip(start, end) + subvideo_length = subvideo.duration + frame_times = np.linspace(0, subvideo_length, num_frames_per_segment, endpoint=False) + frame_times += start + + segment_index2name[f"{segment_index}"] = f"{unique_timestamp}-{segment_index}-{start}-{end}" + segment_times_info[f"{segment_index}"] = {"frame_times": frame_times, "timestamp": (start, end)} + + # save audio + audio_file_base_name = segment_index2name[f"{segment_index}"] + audio_file = f'{audio_file_base_name}.{audio_output_format}' + subaudio = subvideo.audio + subaudio.write_audiofile(os.path.join(video_segment_cache_path, audio_file), codec='mp3', verbose=False, logger=None) + + segment_index += 1 + + return segment_index2name, segment_times_info + +def saving_video_segments( + video_name, + video_path, + working_dir, + segment_index2name, + segment_times_info, + error_queue, + video_output_format='mp4', +): + try: + with VideoFileClip(video_path) as video: + video_segment_cache_path = os.path.join(working_dir, '_cache', video_name) + for index in tqdm(segment_index2name, desc=f"Saving Video Segments {video_name}"): + start, end = segment_times_info[index]["timestamp"][0], segment_times_info[index]["timestamp"][1] + video_file = f'{segment_index2name[index]}.{video_output_format}' + subvideo = video.subclip(start, end) + subvideo.write_videofile(os.path.join(video_segment_cache_path, video_file), codec='libx264', verbose=False, logger=None) + except Exception as e: + error_queue.put(f"Error in saving_video_segments:\n {str(e)}") + raise RuntimeError \ No newline at end of file diff --git a/videorag/base.py b/videorag/base.py new file mode 100755 index 0000000..122801e --- /dev/null +++ b/videorag/base.py @@ -0,0 +1,149 @@ +from dataclasses import dataclass, field +from typing import TypedDict, Union, Literal, Generic, TypeVar + +import numpy as np + +from ._utils import EmbeddingFunc + + +@dataclass +class QueryParam: + mode: Literal["local", "global", "naive"] = "global" + only_need_context: bool = False + response_type: str = "Multiple Paragraphs" + level: int = 2 + top_k: int = 20 + # naive search + naive_max_token_for_text_unit = 12000 + # videorag search + only_need_context: bool = False + + +TextChunkSchema = TypedDict( + "TextChunkSchema", + {"tokens": int, "content": str, "video_segment_id": str, "chunk_order_index": int}, +) + +SingleCommunitySchema = TypedDict( + "SingleCommunitySchema", + { + "level": int, + "title": str, + "edges": list[list[str, str]], + "nodes": list[str], + "chunk_ids": list[str], + "occurrence": float, + "sub_communities": list[str], + }, +) + + +class CommunitySchema(SingleCommunitySchema): + report_string: str + report_json: dict + + +T = TypeVar("T") + + +@dataclass +class StorageNameSpace: + namespace: str + global_config: dict + + async def index_start_callback(self): + """commit the storage operations after indexing""" + pass + + async def index_done_callback(self): + """commit the storage operations after indexing""" + pass + + async def query_done_callback(self): + """commit the storage operations after querying""" + pass + + +@dataclass +class BaseVectorStorage(StorageNameSpace): + embedding_func: EmbeddingFunc + meta_fields: set = field(default_factory=set) + + async def query(self, query: str, top_k: int) -> list[dict]: + raise NotImplementedError + + async def upsert(self, data: dict[str, dict]): + """Use 'content' field from value for embedding, use key as id. + If embedding_func is None, use 'embedding' field from value + """ + raise NotImplementedError + + +@dataclass +class BaseKVStorage(Generic[T], StorageNameSpace): + async def all_keys(self) -> list[str]: + raise NotImplementedError + + async def get_by_id(self, id: str) -> Union[T, None]: + raise NotImplementedError + + async def get_by_ids( + self, ids: list[str], fields: Union[set[str], None] = None + ) -> list[Union[T, None]]: + raise NotImplementedError + + async def filter_keys(self, data: list[str]) -> set[str]: + """return un-exist keys""" + raise NotImplementedError + + async def upsert(self, data: dict[str, T]): + raise NotImplementedError + + async def drop(self): + raise NotImplementedError + + +@dataclass +class BaseGraphStorage(StorageNameSpace): + async def has_node(self, node_id: str) -> bool: + raise NotImplementedError + + async def has_edge(self, source_node_id: str, target_node_id: str) -> bool: + raise NotImplementedError + + async def node_degree(self, node_id: str) -> int: + raise NotImplementedError + + async def edge_degree(self, src_id: str, tgt_id: str) -> int: + raise NotImplementedError + + async def get_node(self, node_id: str) -> Union[dict, None]: + raise NotImplementedError + + async def get_edge( + self, source_node_id: str, target_node_id: str + ) -> Union[dict, None]: + raise NotImplementedError + + async def get_node_edges( + self, source_node_id: str + ) -> Union[list[tuple[str, str]], None]: + raise NotImplementedError + + async def upsert_node(self, node_id: str, node_data: dict[str, str]): + raise NotImplementedError + + async def upsert_edge( + self, source_node_id: str, target_node_id: str, edge_data: dict[str, str] + ): + raise NotImplementedError + + async def clustering(self, algorithm: str): + raise NotImplementedError + + async def community_schema(self) -> dict[str, SingleCommunitySchema]: + """Return the community representation with report and nodes""" + raise NotImplementedError + + async def embed_nodes(self, algorithm: str) -> tuple[np.ndarray, list[str]]: + raise NotImplementedError("Node embedding is not used in nano-graphrag.") diff --git a/videorag/prompt.py b/videorag/prompt.py new file mode 100755 index 0000000..f65136a --- /dev/null +++ b/videorag/prompt.py @@ -0,0 +1,403 @@ +""" +Reference: + - Prompts are from [graphrag](https://github.com/microsoft/graphrag) +""" + +GRAPH_FIELD_SEP = "" +PROMPTS = {} + +PROMPTS[ + "entity_extraction" +] = """-Goal- +Given a text document that is potentially relevant to this activity and a list of entity types, identify all entities of those types from the text and all relationships among the identified entities. + +-Steps- +1. Identify all entities. For each identified entity, extract the following information: +- entity_name: Name of the entity, capitalized +- entity_type: One of the following types: [{entity_types}] +- entity_description: Comprehensive description of the entity's attributes and activities +Format each entity as ("entity"{tuple_delimiter}{tuple_delimiter}{tuple_delimiter} + +2. From the entities identified in step 1, identify all pairs of (source_entity, target_entity) that are *clearly related* to each other. +For each pair of related entities, extract the following information: +- source_entity: name of the source entity, as identified in step 1 +- target_entity: name of the target entity, as identified in step 1 +- relationship_description: explanation as to why you think the source entity and the target entity are related to each other +- relationship_strength: a numeric score indicating strength of the relationship between the source entity and target entity + Format each relationship as ("relationship"{tuple_delimiter}{tuple_delimiter}{tuple_delimiter}{tuple_delimiter}) + +3. Return output in English as a single list of all the entities and relationships identified in steps 1 and 2. Use **{record_delimiter}** as the list delimiter. + +4. When finished, output {completion_delimiter} + +###################### +-Examples- +###################### +Example 1: + +Entity_types: [person, technology, mission, organization, location] +Text: +while Alex clenched his jaw, the buzz of frustration dull against the backdrop of Taylor's authoritarian certainty. It was this competitive undercurrent that kept him alert, the sense that his and Jordan's shared commitment to discovery was an unspoken rebellion against Cruz's narrowing vision of control and order. + +Then Taylor did something unexpected. They paused beside Jordan and, for a moment, observed the device with something akin to reverence. "If this tech can be understood..." Taylor said, their voice quieter, "It could change the game for us. For all of us." + +The underlying dismissal earlier seemed to falter, replaced by a glimpse of reluctant respect for the gravity of what lay in their hands. Jordan looked up, and for a fleeting heartbeat, their eyes locked with Taylor's, a wordless clash of wills softening into an uneasy truce. + +It was a small transformation, barely perceptible, but one that Alex noted with an inward nod. They had all been brought here by different paths +################ +Output: +("entity"{tuple_delimiter}"Alex"{tuple_delimiter}"person"{tuple_delimiter}"Alex is a character who experiences frustration and is observant of the dynamics among other characters."){record_delimiter} +("entity"{tuple_delimiter}"Taylor"{tuple_delimiter}"person"{tuple_delimiter}"Taylor is portrayed with authoritarian certainty and shows a moment of reverence towards a device, indicating a change in perspective."){record_delimiter} +("entity"{tuple_delimiter}"Jordan"{tuple_delimiter}"person"{tuple_delimiter}"Jordan shares a commitment to discovery and has a significant interaction with Taylor regarding a device."){record_delimiter} +("entity"{tuple_delimiter}"Cruz"{tuple_delimiter}"person"{tuple_delimiter}"Cruz is associated with a vision of control and order, influencing the dynamics among other characters."){record_delimiter} +("entity"{tuple_delimiter}"The Device"{tuple_delimiter}"technology"{tuple_delimiter}"The Device is central to the story, with potential game-changing implications, and is revered by Taylor."){record_delimiter} +("relationship"{tuple_delimiter}"Alex"{tuple_delimiter}"Taylor"{tuple_delimiter}"Alex is affected by Taylor's authoritarian certainty and observes changes in Taylor's attitude towards the device."{tuple_delimiter}7){record_delimiter} +("relationship"{tuple_delimiter}"Alex"{tuple_delimiter}"Jordan"{tuple_delimiter}"Alex and Jordan share a commitment to discovery, which contrasts with Cruz's vision."{tuple_delimiter}6){record_delimiter} +("relationship"{tuple_delimiter}"Taylor"{tuple_delimiter}"Jordan"{tuple_delimiter}"Taylor and Jordan interact directly regarding the device, leading to a moment of mutual respect and an uneasy truce."{tuple_delimiter}8){record_delimiter} +("relationship"{tuple_delimiter}"Jordan"{tuple_delimiter}"Cruz"{tuple_delimiter}"Jordan's commitment to discovery is in rebellion against Cruz's vision of control and order."{tuple_delimiter}5){record_delimiter} +("relationship"{tuple_delimiter}"Taylor"{tuple_delimiter}"The Device"{tuple_delimiter}"Taylor shows reverence towards the device, indicating its importance and potential impact."{tuple_delimiter}9){completion_delimiter} +############################# +Example 2: + +Entity_types: [person, technology, mission, organization, location] +Text: +They were no longer mere operatives; they had become guardians of a threshold, keepers of a message from a realm beyond stars and stripes. This elevation in their mission could not be shackled by regulations and established protocols—it demanded a new perspective, a new resolve. + +Tension threaded through the dialogue of beeps and static as communications with Washington buzzed in the background. The team stood, a portentous air enveloping them. It was clear that the decisions they made in the ensuing hours could redefine humanity's place in the cosmos or condemn them to ignorance and potential peril. + +Their connection to the stars solidified, the group moved to address the crystallizing warning, shifting from passive recipients to active participants. Mercer's latter instincts gained precedence— the team's mandate had evolved, no longer solely to observe and report but to interact and prepare. A metamorphosis had begun, and Operation: Dulce hummed with the newfound frequency of their daring, a tone set not by the earthly +############# +Output: +("entity"{tuple_delimiter}"Washington"{tuple_delimiter}"location"{tuple_delimiter}"Washington is a location where communications are being received, indicating its importance in the decision-making process."){record_delimiter} +("entity"{tuple_delimiter}"Operation: Dulce"{tuple_delimiter}"mission"{tuple_delimiter}"Operation: Dulce is described as a mission that has evolved to interact and prepare, indicating a significant shift in objectives and activities."){record_delimiter} +("entity"{tuple_delimiter}"The team"{tuple_delimiter}"organization"{tuple_delimiter}"The team is portrayed as a group of individuals who have transitioned from passive observers to active participants in a mission, showing a dynamic change in their role."){record_delimiter} +("relationship"{tuple_delimiter}"The team"{tuple_delimiter}"Washington"{tuple_delimiter}"The team receives communications from Washington, which influences their decision-making process."{tuple_delimiter}7){record_delimiter} +("relationship"{tuple_delimiter}"The team"{tuple_delimiter}"Operation: Dulce"{tuple_delimiter}"The team is directly involved in Operation: Dulce, executing its evolved objectives and activities."{tuple_delimiter}9){completion_delimiter} +############################# +Example 3: + +Entity_types: [person, role, technology, organization, event, location, concept] +Text: +their voice slicing through the buzz of activity. "Control may be an illusion when facing an intelligence that literally writes its own rules," they stated stoically, casting a watchful eye over the flurry of data. + +"It's like it's learning to communicate," offered Sam Rivera from a nearby interface, their youthful energy boding a mix of awe and anxiety. "This gives talking to strangers' a whole new meaning." + +Alex surveyed his team—each face a study in concentration, determination, and not a small measure of trepidation. "This might well be our first contact," he acknowledged, "And we need to be ready for whatever answers back." + +Together, they stood on the edge of the unknown, forging humanity's response to a message from the heavens. The ensuing silence was palpable—a collective introspection about their role in this grand cosmic play, one that could rewrite human history. + +The encrypted dialogue continued to unfold, its intricate patterns showing an almost uncanny anticipation +############# +Output: +("entity"{tuple_delimiter}"Sam Rivera"{tuple_delimiter}"person"{tuple_delimiter}"Sam Rivera is a member of a team working on communicating with an unknown intelligence, showing a mix of awe and anxiety."){record_delimiter} +("entity"{tuple_delimiter}"Alex"{tuple_delimiter}"person"{tuple_delimiter}"Alex is the leader of a team attempting first contact with an unknown intelligence, acknowledging the significance of their task."){record_delimiter} +("entity"{tuple_delimiter}"Control"{tuple_delimiter}"concept"{tuple_delimiter}"Control refers to the ability to manage or govern, which is challenged by an intelligence that writes its own rules."){record_delimiter} +("entity"{tuple_delimiter}"Intelligence"{tuple_delimiter}"concept"{tuple_delimiter}"Intelligence here refers to an unknown entity capable of writing its own rules and learning to communicate."){record_delimiter} +("entity"{tuple_delimiter}"First Contact"{tuple_delimiter}"event"{tuple_delimiter}"First Contact is the potential initial communication between humanity and an unknown intelligence."){record_delimiter} +("entity"{tuple_delimiter}"Humanity's Response"{tuple_delimiter}"event"{tuple_delimiter}"Humanity's Response is the collective action taken by Alex's team in response to a message from an unknown intelligence."){record_delimiter} +("relationship"{tuple_delimiter}"Sam Rivera"{tuple_delimiter}"Intelligence"{tuple_delimiter}"Sam Rivera is directly involved in the process of learning to communicate with the unknown intelligence."{tuple_delimiter}9){record_delimiter} +("relationship"{tuple_delimiter}"Alex"{tuple_delimiter}"First Contact"{tuple_delimiter}"Alex leads the team that might be making the First Contact with the unknown intelligence."{tuple_delimiter}10){record_delimiter} +("relationship"{tuple_delimiter}"Alex"{tuple_delimiter}"Humanity's Response"{tuple_delimiter}"Alex and his team are the key figures in Humanity's Response to the unknown intelligence."{tuple_delimiter}8){record_delimiter} +("relationship"{tuple_delimiter}"Control"{tuple_delimiter}"Intelligence"{tuple_delimiter}"The concept of Control is challenged by the Intelligence that writes its own rules."{tuple_delimiter}7){completion_delimiter} +############################# +-Real Data- +###################### +Entity_types: {entity_types} +Text: {input_text} +###################### +Output: +""" + +PROMPTS[ + "summarize_entity_descriptions" +] = """You are a helpful assistant responsible for generating a comprehensive summary of the data provided below. +Given one or two entities, and a list of descriptions, all related to the same entity or group of entities. +Please concatenate all of these into a single, comprehensive description. Make sure to include information collected from all the descriptions. +If the provided descriptions are contradictory, please resolve the contradictions and provide a single, coherent summary. +Make sure it is written in third person, and include the entity names so we the have full context. + +####### +-Data- +Entities: {entity_name} +Description List: {description_list} +####### +Output: +""" + +PROMPTS[ + "entiti_continue_extraction" +] = """MANY entities were missed in the last extraction. Add them below using the same format: +""" + +PROMPTS[ + "entiti_if_loop_extraction" +] = """It appears some entities may have still been missed. Answer YES | NO if there are still entities that need to be added. +""" + +PROMPTS["DEFAULT_ENTITY_TYPES"] = ["organization", "person", "geo", "event"] +PROMPTS["DEFAULT_TUPLE_DELIMITER"] = "<|>" +PROMPTS["DEFAULT_RECORD_DELIMITER"] = "##" +PROMPTS["DEFAULT_COMPLETION_DELIMITER"] = "<|COMPLETE|>" +PROMPTS["fail_response"] = "Sorry, I'm not able to provide an answer to that question." +PROMPTS["process_tickers"] = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"] +PROMPTS["default_text_separator"] = [ + # Paragraph separators + "\n\n", + "\r\n\r\n", + # Line breaks + "\n", + "\r\n", + # Sentence ending punctuation + "。", # Chinese period + ".", # Full-width dot + ".", # English period + "!", # Chinese exclamation mark + "!", # English exclamation mark + "?", # Chinese question mark + "?", # English question mark + # Whitespace characters + " ", # Space + "\t", # Tab + "\u3000", # Full-width space + # Special characters + "\u200b", # Zero-width space (used in some Asian languages) +] + + +PROMPTS[ + "naive_rag_response" +] = """---Role--- + +You are a helpful assistant responding to a query with retrieved knowledge. + +---Goal--- + +Generate a response of the target length and format that responds to the user's question with relevant general knowledge. +Summarize useful and relevant information from the input data tables, suitable for the specified response length and format. +If you don't know the answer or if the provided knowledge do not contain sufficient information to provide an answer, just say so. Do not make anything up. +Do not include information where the supporting evidence for it is not provided. + +---Target response length and format--- + +{response_type} + +---Data tables--- + +{content_data} + +---Goal--- + +Generate a response of the target length and format that responds to the user's question with relevant general knowledge. +Summarize useful and relevant information from the input data tables appropriate for the response length and format. +If you don't know the answer or if the provided knowledge do not contain sufficient information to provide an answer, just say so. Do not make anything up. +Do not include information where the supporting evidence for it is not provided. + +---Notice--- +Please add sections and commentary as appropriate for the length and format if necessary. Format the response in Markdown. +""" + + +PROMPTS[ + "query_rewrite_for_entity_retrieval" +] = """-Goal- +For a given query, generate a declarative sentence to serve as a query for retrieving relevant knowledge. + +###################### +-Examples- +###################### + +Question: What are the main characters? \n(A) Alice\n(B) Bob\n(C) Charlie\n(D) Dana +################ +Output: +The main characters. (Maybe Alice, Bob, Charlie or Dana) + +Question: What locations are shown in the video? +################ +Output: +The locations shown in the video. + +Question: Which animals appear in the wildlife footage? \n(A) Lions\n(B) Elephants\n(C) Zebras +################ +Output: +The animals that appear in the wildlife footage. (Maybe Lions, Elephants or Zebras) + +############################# +-Real Data- +###################### +Question: {input_text} +###################### +Output: +""" + + + +PROMPTS[ + "query_rewrite_for_visual_retrieval" +] = """-Goal- +Given a question that may include scene-related information, generate a declarative sentence to serve as a query for retrieving relevant video segments. + +###################### +-Examples- +###################### + +Question: Which animal does the protagonist encounter in the forest scene? +################ +Output: +The protagonist encounters an animal in the forest. + +Question: In the movie, what color is the car that chases the main character through the city? +################ +Output: +A city chase scene where the main character is pursued by a car. + +Question: What is the weather like during the opening scene of the film?\n(A) Sunny\n(B) Rainy\n(C) Snowy\n(D) Windy +################ +Output: +The opening scene of the film featuring specific weather conditions. (Maybe Sunny, Rainy, Snowy or Windy) + +############################# +-Real Data- +###################### +Question: {input_text} +###################### +Output: +""" + + + +PROMPTS[ + "keywords_extraction" +] = """- Goal - +Given a query, extract the relevant keywords that can help answer the query. Please list the keywords separated by commas. + +###################### +- Examples - +###################### + +Question: Which animal does the protagonist encounter in the forest scene? +################ +Output: +animal, protagonist, forest, scene + +Question: In the movie, what color is the car that chases the main character through the city? +################ +Output: +color, car, chases, main character, city + +Question: What is the weather like during the opening scene of the film?\n(A) Sunny\n(B) Rainy\n(C) Snowy\n(D) Windy +################ +Output: +weather, opening scene, film, Sunny, Rainy, Snowy, Windy + +############################# +- Real Data - +###################### +Question: {input_text} +###################### +Output: +""" + + + +PROMPTS[ + "filtering_segment" +] = """---Role--- + +You are a helpful assistant to determine whether the video may contain information relevant to the knowledge based on its rough caption. +Please note that this is a rough caption of the video segments, which means it may not directly contain the answer but may indicate that the video segment is likely to contain information relevant to answering the question. + +---Video Caption--- + +{caption} + +---Knowledge We Need--- + +{knowledge} + +---Answer--- +Please provide an answer that begins with "yes" or "no," followed by a brief step-by-step explanation. +Answer: +""" + + + +PROMPTS[ + "videorag_response" +] = """---Role--- + +You are a helpful assistant responding to a query with retrieved knowledge. + +---Goal--- + +Generate a response of the target length and format that responds to the user's question with relevant general knowledge. +Summarize useful and relevant information from the retrieved text chunks and the information retrieved from videos, suitable for the specified response length and format. +If you don't know the answer or if the input data tables do not contain sufficient information to provide an answer, just say so. Do not make anything up. +Do not include information where the supporting evidence for it is not provided. + +---Target response length and format--- + +{response_type} + +---Retrieved Information From Videos--- + +{video_data} + +---Retrieved Text Chunks--- + +{chunk_data} + +---Goal--- + +Generate a response of the target length and format that responds to the user's question with relevant general knowledge. +Summarize useful and relevant information from the retrieved text chunks and the information retrieved from videos, suitable for the specified response length and format. +If you don't know the answer or if the input data tables do not contain sufficient information to provide an answer, just say so. Do not make anything up. +Do not include information where the supporting evidence for it is not provided. +Reference relevant video segments within the answers, specifying the video name and start & end timestamps. Use the following reference format: + +---Example of Reference--- + +In one segment, the film highlights the devastating effects of deforestation on wildlife habitats [1]. Another part illustrates successful conservation efforts that have helped endangered species recover [2]. + +#### Reference: +[1] video_name_1, 05:30, 08:00 +[2] video_name_2, 25:00, 28:00 + +---Notice--- +Please add sections and commentary as appropriate for the length and format if necessary. Format the response in Markdown. +""" + +PROMPTS[ + "videorag_response_wo_reference" +] = """---Role--- + +You are a helpful assistant responding to a query with retrieved knowledge. + +---Goal--- + +Generate a response of the target length and format that responds to the user's question with relevant general knowledge. +Summarize useful and relevant information from the retrieved text chunks and the information retrieved from videos, suitable for the specified response length and format. +If you don't know the answer or if the input data tables do not contain sufficient information to provide an answer, just say so. Do not make anything up. +Do not include information where the supporting evidence for it is not provided. + +---Target response length and format--- + +{response_type} + +---Retrieved Information From Videos--- + +{video_data} + +---Retrieved Text Chunks--- + +{chunk_data} + +---Goal--- + +Generate a response of the target length and format that responds to the user's question with relevant general knowledge. +Summarize useful and relevant information from the retrieved text chunks and the information retrieved from videos, suitable for the specified response length and format. +If you don't know the answer or if the input data tables do not contain sufficient information to provide an answer, just say so. Do not make anything up. +Do not include information where the supporting evidence for it is not provided. + +---Notice--- +Please add sections and commentary as appropriate for the length and format if necessary. Format the response in Markdown. +""" \ No newline at end of file diff --git a/videorag/videorag.py b/videorag/videorag.py new file mode 100755 index 0000000..ccb2b2d --- /dev/null +++ b/videorag/videorag.py @@ -0,0 +1,441 @@ +import os +import sys +import json +import shutil +import asyncio +import multiprocessing +from dataclasses import asdict, dataclass, field +from datetime import datetime +from functools import partial +from typing import Callable, Dict, List, Optional, Type, Union, cast +from transformers import AutoModel, AutoTokenizer +import tiktoken + + +from ._llm import ( + gpt_4o_complete, + gpt_4o_mini_complete, + openai_embedding, + azure_gpt_4o_complete, + azure_openai_embedding, + azure_gpt_4o_mini_complete, +) +from ._op import ( + chunking_by_video_segments, + extract_entities, + get_chunks, + videorag_query, + naive_query, + videorag_query_wo_graph, + videorag_query_wo_vision, +) +from ._storage import ( + JsonKVStorage, + NanoVectorDBStorage, + NanoVectorDBVideoSegmentStorage, + NetworkXStorage, +) +from ._utils import ( + EmbeddingFunc, + compute_mdhash_id, + limit_async_func_call, + convert_response_to_json, + always_get_an_event_loop, + logger, +) +from .base import ( + BaseGraphStorage, + BaseKVStorage, + BaseVectorStorage, + StorageNameSpace, + QueryParam, +) +from ._videoutil import( + split_video, + speech_to_text, + segment_caption, + merge_segment_information, + saving_video_segments, +) + + +@dataclass +class VideoRAG: + working_dir: str = field( + default_factory=lambda: f"./videorag_cache_{datetime.now().strftime('%Y-%m-%d-%H:%M:%S')}" + ) + + # video + threads_for_split: int = 10 + video_segment_length: int = 30 # seconds + rough_num_frames_per_segment: int = 5 # frames + fine_num_frames_per_segment: int = 15 # frames + video_output_format: str = "mp4" + audio_output_format: str = "mp3" + video_embedding_batch_num: int = 2 + segment_retrieval_top_k: int = 4 + video_embedding_dim: int = 1024 + + # query + retrieval_topk_chunks: int = 2 + + # graph mode + enable_local: bool = True + enable_naive_rag: bool = True + + # text chunking + chunk_func: Callable[ + [ + list[list[int]], + List[str], + tiktoken.Encoding, + Optional[int], + ], + List[Dict[str, Union[str, int]]], + ] = chunking_by_video_segments + chunk_token_size: int = 1200 + # chunk_overlap_token_size: int = 100 + tiktoken_model_name: str = "gpt-4o" + + # entity extraction + entity_extract_max_gleaning: int = 1 + entity_summary_to_max_tokens: int = 500 + + # text embedding + embedding_func: EmbeddingFunc = field(default_factory=lambda: openai_embedding) + embedding_batch_num: int = 32 + embedding_func_max_async: int = 16 + query_better_than_threshold: float = 0.2 + + # LLM + using_azure_openai: bool = False + best_model_func: callable = gpt_4o_mini_complete + best_model_max_token_size: int = 32768 + best_model_max_async: int = 16 + cheap_model_func: callable = gpt_4o_mini_complete + cheap_model_max_token_size: int = 32768 + cheap_model_max_async: int = 16 + + # entity extraction + entity_extraction_func: callable = extract_entities + + # storage + key_string_value_json_storage_cls: Type[BaseKVStorage] = JsonKVStorage + vector_db_storage_cls: Type[BaseVectorStorage] = NanoVectorDBStorage + vs_vector_db_storage_cls: Type[BaseVectorStorage] = NanoVectorDBVideoSegmentStorage + vector_db_storage_cls_kwargs: dict = field(default_factory=dict) + graph_storage_cls: Type[BaseGraphStorage] = NetworkXStorage + enable_llm_cache: bool = True + + # extension + always_create_working_dir: bool = True + addon_params: dict = field(default_factory=dict) + convert_response_to_json_func: callable = convert_response_to_json + + def load_caption_model(self, debug=False): + # caption model + if not debug: + self.caption_model = AutoModel.from_pretrained('./MiniCPM-V-2_6-int4', trust_remote_code=True) + self.caption_tokenizer = AutoTokenizer.from_pretrained('./MiniCPM-V-2_6-int4', trust_remote_code=True) + self.caption_model.eval() + else: + self.caption_model = None + self.caption_tokenizer = None + + def __post_init__(self): + _print_config = ",\n ".join([f"{k} = {v}" for k, v in asdict(self).items()]) + logger.debug(f"VideoRAG init with param:\n\n {_print_config}\n") + + if self.using_azure_openai: + # If there's no OpenAI API key, use Azure OpenAI + if self.best_model_func == gpt_4o_complete: + self.best_model_func = azure_gpt_4o_complete + if self.cheap_model_func == gpt_4o_mini_complete: + self.cheap_model_func = azure_gpt_4o_mini_complete + if self.embedding_func == openai_embedding: + self.embedding_func = azure_openai_embedding + logger.info( + "Switched the default openai funcs to Azure OpenAI if you didn't set any of it" + ) + + if not os.path.exists(self.working_dir) and self.always_create_working_dir: + logger.info(f"Creating working directory {self.working_dir}") + os.makedirs(self.working_dir) + + self.video_path_db = self.key_string_value_json_storage_cls( + namespace="video_path", global_config=asdict(self) + ) + + self.video_segments = self.key_string_value_json_storage_cls( + namespace="video_segments", global_config=asdict(self) + ) + + self.text_chunks = self.key_string_value_json_storage_cls( + namespace="text_chunks", global_config=asdict(self) + ) + + self.llm_response_cache = ( + self.key_string_value_json_storage_cls( + namespace="llm_response_cache", global_config=asdict(self) + ) + if self.enable_llm_cache + else None + ) + + self.chunk_entity_relation_graph = self.graph_storage_cls( + namespace="chunk_entity_relation", global_config=asdict(self) + ) + + self.embedding_func = limit_async_func_call(self.embedding_func_max_async)( + self.embedding_func + ) + self.entities_vdb = ( + self.vector_db_storage_cls( + namespace="entities", + global_config=asdict(self), + embedding_func=self.embedding_func, + meta_fields={"entity_name"}, + ) + if self.enable_local + else None + ) + self.chunks_vdb = ( + self.vector_db_storage_cls( + namespace="chunks", + global_config=asdict(self), + embedding_func=self.embedding_func, + ) + if self.enable_naive_rag + else None + ) + + self.video_segment_feature_vdb = ( + self.vs_vector_db_storage_cls( + namespace="video_segment_feature", + global_config=asdict(self), + embedding_func=None, # we code the embedding process inside the insert() function. + ) + ) + + self.best_model_func = limit_async_func_call(self.best_model_max_async)( + partial(self.best_model_func, hashing_kv=self.llm_response_cache) + ) + self.cheap_model_func = limit_async_func_call(self.cheap_model_max_async)( + partial(self.cheap_model_func, hashing_kv=self.llm_response_cache) + ) + + def insert_video(self, video_path_list=None): + loop = always_get_an_event_loop() + for video_path in video_path_list: + # Step0: check the existence + video_name = os.path.basename(video_path).split('.')[0] + if video_name in self.video_segments._data: + logger.info(f"Find the video named {os.path.basename(video_path)} in storage and skip it.") + continue + loop.run_until_complete(self.video_path_db.upsert( + {video_name: video_path} + )) + + # Step1: split the videos + segment_index2name, segment_times_info = split_video( + video_path, + self.working_dir, + self.video_segment_length, + self.rough_num_frames_per_segment, + self.audio_output_format, + ) + + # Step2: obtain transcript with whisper + transcripts = speech_to_text( + video_name, + self.working_dir, + segment_index2name, + self.audio_output_format + ) + + # Step3: saving video segments **as well as** obtain caption with vision language model + manager = multiprocessing.Manager() + captions = manager.dict() + error_queue = manager.Queue() + + process_saving_video_segments = multiprocessing.Process( + target=saving_video_segments, + args=( + video_name, + video_path, + self.working_dir, + segment_index2name, + segment_times_info, + error_queue, + self.video_output_format, + ) + ) + + process_segment_caption = multiprocessing.Process( + target=segment_caption, + args=( + video_name, + video_path, + segment_index2name, + transcripts, + segment_times_info, + captions, + error_queue, + ) + ) + + process_saving_video_segments.start() + process_segment_caption.start() + process_saving_video_segments.join() + process_segment_caption.join() + + # if raise error in this two, stop the processing + while not error_queue.empty(): + error_message = error_queue.get() + with open('error_log_videorag.txt', 'a', encoding='utf-8') as log_file: + log_file.write(f"Video Name:{video_name} Error processing:\n{error_message}\n\n") + raise RuntimeError(error_message) + + # Step4: insert video segments information + segments_information = merge_segment_information( + segment_index2name, + segment_times_info, + transcripts, + captions, + ) + manager.shutdown() + loop.run_until_complete(self.video_segments.upsert( + {video_name: segments_information} + )) + + # Step5: encode video segment features + loop.run_until_complete(self.video_segment_feature_vdb.upsert( + video_name, + segment_index2name, + self.video_output_format, + )) + + # Step6: delete the cache file + video_segment_cache_path = os.path.join(self.working_dir, '_cache', video_name) + if os.path.exists(video_segment_cache_path): + shutil.rmtree(video_segment_cache_path) + + # Step 7: saving current video information + loop.run_until_complete(self._save_video_segments()) + + loop.run_until_complete(self.ainsert(self.video_segments._data)) + + def query(self, query: str, param: QueryParam = QueryParam()): + loop = always_get_an_event_loop() + return loop.run_until_complete(self.aquery(query, param)) + + async def aquery(self, query: str, param: QueryParam = QueryParam()): + if param.mode == "videorag": + response = await videorag_query( + query, + self.entities_vdb, + self.text_chunks, + self.chunks_vdb, + self.video_path_db, + self.video_segments, + self.video_segment_feature_vdb, + self.chunk_entity_relation_graph, + self.caption_model, + self.caption_tokenizer, + param, + asdict(self), + ) + else: + raise ValueError(f"Unknown mode {param.mode}") + await self._query_done() + return response + + async def ainsert(self, new_video_segment): + await self._insert_start() + try: + # ---------- chunking + inserting_chunks = get_chunks( + new_videos=new_video_segment, + chunk_func=self.chunk_func, + max_token_size=self.chunk_token_size, + ) + _add_chunk_keys = await self.text_chunks.filter_keys( + list(inserting_chunks.keys()) + ) + inserting_chunks = { + k: v for k, v in inserting_chunks.items() if k in _add_chunk_keys + } + if not len(inserting_chunks): + logger.warning(f"All chunks are already in the storage") + return + logger.info(f"[New Chunks] inserting {len(inserting_chunks)} chunks") + if self.enable_naive_rag: + logger.info("Insert chunks for naive RAG") + await self.chunks_vdb.upsert(inserting_chunks) + + # TODO: no incremental update for communities now, so just drop all + # await self.community_reports.drop() + + # ---------- extract/summary entity and upsert to graph + logger.info("[Entity Extraction]...") + maybe_new_kg, _, _ = await self.entity_extraction_func( + inserting_chunks, + knowledge_graph_inst=self.chunk_entity_relation_graph, + entity_vdb=self.entities_vdb, + global_config=asdict(self), + ) + if maybe_new_kg is None: + logger.warning("No new entities found") + return + self.chunk_entity_relation_graph = maybe_new_kg + # ---------- commit upsertings and indexing + await self.text_chunks.upsert(inserting_chunks) + finally: + await self._insert_done() + + async def _insert_start(self): + tasks = [] + for storage_inst in [ + self.chunk_entity_relation_graph, + ]: + if storage_inst is None: + continue + tasks.append(cast(StorageNameSpace, storage_inst).index_start_callback()) + await asyncio.gather(*tasks) + + async def _save_video_segments(self): + tasks = [] + for storage_inst in [ + self.video_segment_feature_vdb, + self.video_segments, + self.video_path_db, + ]: + if storage_inst is None: + continue + tasks.append(cast(StorageNameSpace, storage_inst).index_done_callback()) + await asyncio.gather(*tasks) + + async def _insert_done(self): + tasks = [] + for storage_inst in [ + self.text_chunks, + self.llm_response_cache, + self.entities_vdb, + self.chunks_vdb, + self.chunk_entity_relation_graph, + self.video_segment_feature_vdb, + self.video_segments, + self.video_path_db, + ]: + if storage_inst is None: + continue + tasks.append(cast(StorageNameSpace, storage_inst).index_done_callback()) + await asyncio.gather(*tasks) + + async def _query_done(self): + tasks = [] + for storage_inst in [self.llm_response_cache]: + if storage_inst is None: + continue + tasks.append(cast(StorageNameSpace, storage_inst).index_done_callback()) + await asyncio.gather(*tasks)