diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..6ec318b --- /dev/null +++ b/.gitignore @@ -0,0 +1,171 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ + +/test* +/hf_space +/.vscode +/BLINK_Benchmark +/checkpoints +/data +/*.egg-info + + +/templates/ \ No newline at end of file diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 25a1149..0000000 --- a/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2024 TIGER Lab - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/README.md b/README.md deleted file mode 100644 index a6ae20e..0000000 --- a/README.md +++ /dev/null @@ -1,2 +0,0 @@ -# MantisScore -official repo for "MantisScore: A Reliable Fine-grained Metric for Video Generation" diff --git a/emnlp_pre/VideoScore_Xuan_He.pdf b/emnlp_pre/VideoScore_Xuan_He.pdf new file mode 100644 index 0000000..7beffa8 Binary files /dev/null and b/emnlp_pre/VideoScore_Xuan_He.pdf differ diff --git a/emnlp_pre/VideoScore_poster.pdf b/emnlp_pre/VideoScore_poster.pdf new file mode 100644 index 0000000..0fe5eff Binary files /dev/null and b/emnlp_pre/VideoScore_poster.pdf differ diff --git a/emnlp_pre/a.txt b/emnlp_pre/a.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/emnlp_pre/a.txt @@ -0,0 +1 @@ + diff --git a/index.html b/index.html new file mode 100644 index 0000000..f109def --- /dev/null +++ b/index.html @@ -0,0 +1,1143 @@ + + + + + + + + + + VideoScore + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+

+ + VideoScore

+

Building Automatic Metrics to Simulate Fine-grained Human Feedback for Video Generation

+
+
+ + 1,2,† + Xuan He*, + + + 1,† + Dongfu Jiang*, + + + 1,3 + Ge Zhang, + + + 1 + Max Ku, + +
+ 1Achint Soni, + 1Sherman Siu, + 1Haonan Chen, + 1Abhranil Chandra, + 1Ziyan Jiang, + 1Aaran Arulraj, + 4Kai Wang, + 1Quy Duc Do, + 1Yuansheng Ni, + 2Bohan Lyu, + 1Yaswanth Narsupalli, + 1Rongqi Fan, + 1Zhiheng Lyu, + + 5Bill Yuchen Lin, + + + 1,† + Wenhu Chen + +
+
+
+ + 1University of Waterloo, + 2Tsinghua University, + 3StarDust.AI, + 4University of Toronto, + 5AI2 + +
+
+
+ + *Equal Contribution + +
+ + + + + +
+ +
+
+ +
+
+
+
+
+ + + + +
+
+

+ Abstract

+
+
+ +
+
+ +
+
+ +
+

+ The recent years have witnessed great advances + in video generation. However, the development + of automatic video metrics is lagging significantly behind. None of the existing metric is + able to provide reliable scores over generated + videos. The main barrier is the lack of large-scale human-annotated dataset. +

    +
  1. + + VideoFeedback Dataset. + In this paper, we release VideoFeedback, the first large-scale dataset + containing human-provided multiaspect score over 37.6K synthesized videos + from 11 existing video generative models. +
  2. +
  3. + + VideoScore. + We train VideoScore (initialized from Mantis) based on VideoFeedback + to enable automatic video quality assessment. Experiments + show that the Spearman correlation between + VideoScore and humans can reach 77.1 on + VideoFeedback-test, beating the prior best + metrics by about 50 points. Further result on + other held-out EvalCrafter, GenAI-Bench, and + VBench show that VideoScore has consistently much higher correlation with human + judges than other metrics. + +
  4. +
  5. Human Feedback for Video generative models. + Due to these results, we believe VideoScore can serve as + a great proxy for human raters to (1) rate different video models to track progress (2) simulate + fine-grained human feedback in Reinforcement + Learning with Human Feedback (RLHF) to improve current video generation models. +
  6. +
+

+ +
+
+
+
+
+ + + + + + +
+
+

+ + VideoFeedback Dataset
Multi-Aspect Human-Annotated Video Evaluation Data

+
+
+ +
+
+
+
+
+

+ VideoFeedback contains a total of 37.6K text-to-video pairs from 11 popular video generative models, + with some real-world videos as data augmentation. + The videos are annotated by raters for five evaluation dimensions: Visual Quality (VQ), Temporal Consistency (TC), (DD) Dynamic Degree (DD), + Text-to-Video Alignment (TVA) and Factual Consistency (FC), + in 1-4 scoring scale. Below we show the detailed description of our VideoFeedback dataset. + Please check out + 🤗 VideoFeedback + on hugging face datasets for usage. + +

+ + +
+

Statistics

+ +
+

+
+
+
+ +
+
+

Dimensions of Evaluation

+ +
+
+ + +
+
+

Annotation Examples

+ 1-Bad, 2-Average, + 3-Good, 4-Real/Perfect
+
+
+ +
+
+
+ + +
+
+
+ GIF 1 +
+

+ prompt: completely base your choice of which one to visit today on the dish that most entices your taste buds, 1080P, high quality, comic +
+

+ + + + + +
VQTCDDTVAFC
33133
+
+
+ +
+
+
+ GIF 1 +
+

+ prompt: an African American female video editor editing videos

+

+ + + + + +
VQTCDDTVAFC
11331
+ +
+
+ +
+
+
+ GIF 1 +
+

+ prompt: Cinematic, A light rain is falling. Tea pickers are picking tea in a tea garden, 4K, anime style

+

+ + + + + +
VQTCDDTVAFC
32331
+
+
+ + +
+
+
+ GIF 1 +
+

+ prompt: crypto new year Christmas santa money dollars pack
+ +

+ + + + + +
VQTCDDTVAFC
12331
+
+
+ +
+
+
+ GIF 1 +
+

+ prompt: Woman receiving a rose and blushing with a smile
+ +

+ + + + + +
VQTCDDTVAFC
22332
+
+
+ +
+
+
+ GIF 1 +
+

+ prompt: panorama gold coast city in future as a dystopian prison
+ +

+ + + + + +
VQTCDDTVAFC
23323
+
+
+ + +
+
+
+ GIF 1 +
+

+ prompt: little bear looks surprised as the moon gets smaller
+ +

+ + + + + +
VQTCDDTVAFC
12312
+
+
+ +
+
+
+ GIF 1 +
+

+ prompt: alexandra daddario, upperbody focus, slow motion, cinematic
+ +

+ + + + + +
VQTCDDTVAFC
22331
+
+
+ + +
+
+
+ GIF 1 +
+

+ prompt: cinematic portrait of two dogs running away from a medieval man
+ +

+ + + + + +
VQTCDDTVAFC
12321
+
+
+ + +
+
+
+ GIF 1 +
+

+ prompt: a skateboard on the bottom of a surfboard, front view
+ +

+ + + + + +
VQTCDDTVAFC
33332
+
+
+ +
+
+
+ GIF 1 +
+

+ prompt: yellow van with trailer starts to back up

+

+ + + + + +
VQTCDDTVAFC
44444
+
+
+ +
+
+
+ GIF 1 +
+

+ prompt: five gray wolf pups frolicking and chasing each other around a remote gravel road, surrounded by grass. The pups run and leap, chasing each other, and nipping at each other, playing
+

+ + + + + +
VQTCDDTVAFC
42424
+
+
+ +
+
+
+ + +
+ +
+ + +
+
+

+ + VideoScore +

+
+
+ +
+
+
+
+
+

+ VideoScore is finetuned on VideoFeedback dataset's 37K training set taking + Mantis-8B-Idefics2 as base model. + We try generation scoring method and regression scoring method, + the former one means model's answer is in a template + predefined for video quality evaluation + while the latter one outputs 5 logits as evaluation scores in 5 dimensions. + + Besides, we also make ablation on base model, using + Mantis-8B-Idefics2, + Idefics2-8B + and + VideoLLaVA-7B + as base models to finetune. Mantis-8B-Idefics2 turns out to + have the best performance on video quality evaluation. +

+
+
+
+
+ +
+

+ Video Evaluation Benchmarks +

+
+
+
+
+

+ + VideoFeedback-test

+

+ We test VideoScore on VideoFeedback-test set, containing 760 videos with human scores from five dimensions. + We take the Spearman correlation between VideoScore and human annotation as performance indicator. + Below we show the results of some feature-based metrics like PIQE, CLIP-sim, X-CILIP-Sore etc, and some + MLLM-prompting methods like GPT-4o Gemini-1.5-Pro, etc and our VideoScore. +

+
+

+ + EvalCrafter Benchmark

+

+ We select 3 dimensions (Visual Quality, Temporal Consistency and Text-to-Video Alignment) + from EvalCrafter that match our evaluation aspects + and collect 2500+ videos for test. + We take the Spearman correlation between VideoScore and human annotation as performance indicator. + +

+
+

⚔️GenAI-Bench and VBench

+

+ GenAI-Bench is a multimodal benchmark for MLLM's capability on preference comparison + for tasks like text-to-video generation, image-editing and others, while + VBench is a comprehensive multi-aspect benchmark suite for + video generative models.
+ For GenAI-Bench we collect 2100+ videos in test and + for VBench we select a subset from 5 aspects of VBench, like technical + quality, subject consistency etc, then subsample 100 unique prompts for four T2V models (2000 videos totally) for test. + We use averaged score of our five dimensions for MLLM prompting baselines and VideoScore to + give the preference and calculate the pairwise accuracy as performance indicator. +

+ +
+
+
+ +
+

+ Results & Leaderboard +

+
+ +
+
+
+ +
+ + VideoScore series + MLLM Prompting Method + Feature-Based Metric +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
MetricFinal Avg Score ↓VideoFeedback-testEvalCrafterGenAI-BenchVBench
VideoScore (reg)69.675.751.178.573.0
VideoScore-(gen)55.677.127.659.058.7
Gemini-1.5-Pro39.722.122.960.952.9
Gemini-1.5-Flash39.420.817.367.152.3
GPT-4o38.923.128.752.051.7
CLIP-sim31.78.936.234.247.4
DINO-sim30.37.532.138.543.3
SSIM-sim29.513.426.934.143.5
CLIP-Score28.6-7.221.745.054.9
LLaVA-1.5-7B27.18.510.549.939.4
LLaVA-1.6-7B23.3-3.113.244.538.7
X-CLIP-Score23.2-1.913.341.440.1
PIQE19.6-10.1-1.234.555.1
BRISQUE19.0-20.33.938.553.7
Idefics118.36.50.334.631.7
MSE-dyn10.6-5.5-17.028.436.5
SSIM-dyn9.2-12.9-26.431.444.5
+

+ The best VideoScore is in bold and the best in baselines is underlined. + +

+
+
+
+ +
+ +
+
+

+ Case Studies +

+
+
+ +
+
+

+ + + VideoFeedback-test

+ +
+
+
+
+

+ Scale of all the scores is in [1, 2, 3, 4] except for VideoScore (reg), + which outputs five float logits ranging from 0.50 to 4.50.
+ For scale [1, 2, 3, 4], 1-Bad, 2-Avg, 3-Good, 4-Perfect/Real.
+ + +

+
+
+ +
+
+
+
+ GIF +
+

+ prompt: A robot that throws a stack of paper from a desk +

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
MethodVQTCDDTVAFCMethodVQTCDDTVAFC
Human score31331
VideoScore (reg)2.670.813.092.500.80VideoScore (gen)31331
GPT-4o34234Gemini-1.5-Pro31133
Gemini-1.5-Flash31133LLaVA-1.6-7B33333
LLaVA-1.5-7B33332Idefics144312
PIQE11111DINO-sim11111
SSIM-dyn33333CLIP-Score22222
+
+ +
+
+
+ GIF +
+

+ prompt: Illustrate a bustling market scene, with fresh produce displayed on stalls, attracting villagers eager to purchase, cartoon style +

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
MethodVQTCDDTVAFCMethodVQTCDDTVAFC
Human score12322
VideoScore (reg)1.911.862.842.441.67VideoScore (gen)21311
GPT-4o33344Gemini-1.5-Pro22133
Gemini-1.5-Flash31123LLaVA-1.6-7B33333
LLaVA-1.5-7B33322Idefics144312
PIQE22222DINO-sim44444
SSIM-dyn22222CLIP-Score33333
+
+ +
+
+
+ GIF +
+

+ prompt: Every day must be Sunday Amusement park inside the school +

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
MethodVQTCDDTVAFCMethodVQTCDDTVAFC
Human score11321
VideoScore (reg)1.041.422.951.971.09VideoScore (gen)11321
GPT-4o34233Gemini-1.5-Pro21221
Gemini-1.5-Flash21121LLaVA-1.6-7B33322
LLaVA-1.5-7B33322Idefics144312
PIQE11111DINO-sim33333
SSIM-dyn44444CLIP-Score22222
+
+ + + +
+
+ +
+

+ + ⚔️GenAI-Bench +

+
+ +
+
+
+

+ In each item we have two videos with same prompt and a human preference annotation. + For VideoScore and MLLM prompting methods, we use average score of all 5 dimensions to predict preference, + while for feature-based metrics, we use their discretized output for the prediction of preference direcly. +

+
+
+ +
+
+
+
+ GIF +
+

+ Left Video
+ prompt: a cute dog is playing a ball +

+
+
+
+
+
+ GIF +
+

+ Right Video
+ prompt: a cute dog is playing a ball +

+
+
+ +
+ GIF +
+
+ +
+
+
+
+ GIF +
+

+ Left Video
+ prompt: An astronaut flying in space, oil painting +

+
+
+
+
+
+ GIF +
+

+ Right Video
+ prompt: An astronaut flying in space, oil painting +

+
+
+ +
+ GIF +
+
+ +
+ +
+ + + +
+
+ +

BibTeX

+
@article{he2024videoscore,
+  title = {VideoScore: Building Automatic Metrics to Simulate Fine-grained Human Feedback for Video Generation},
+  author = {He, Xuan and Jiang, Dongfu and Zhang, Ge and Ku, Max and Soni, Achint and Siu, Sherman and Chen, Haonan and Chandra, Abhranil and Jiang, Ziyan and Arulraj, Aaran and Wang, Kai and Do, Quy Duc and Ni, Yuansheng and Lyu, Bohan and Narsupalli, Yaswanth and Fan, Rongqi and Lyu, Zhiheng and Lin, Yuchen and Chen, Wenhu},
+  journal = {ArXiv},
+  year = {2024},
+  volume={abs/2406.15252},
+  url = {https://arxiv.org/abs/2406.15252},
+}
+    
+
+
+ + + + + diff --git a/static/case_study/0_left.gif b/static/case_study/0_left.gif new file mode 100644 index 0000000..f038cb3 Binary files /dev/null and b/static/case_study/0_left.gif differ diff --git a/static/case_study/0_right.gif b/static/case_study/0_right.gif new file mode 100644 index 0000000..ddf2961 Binary files /dev/null and b/static/case_study/0_right.gif differ diff --git a/static/case_study/13_left.gif b/static/case_study/13_left.gif new file mode 100644 index 0000000..0d88956 Binary files /dev/null and b/static/case_study/13_left.gif differ diff --git a/static/case_study/13_right.gif b/static/case_study/13_right.gif new file mode 100644 index 0000000..0838b2c Binary files /dev/null and b/static/case_study/13_right.gif differ diff --git a/static/case_study/3004368.gif b/static/case_study/3004368.gif new file mode 100644 index 0000000..aa9d6db Binary files /dev/null and b/static/case_study/3004368.gif differ diff --git a/static/case_study/5000217.gif b/static/case_study/5000217.gif new file mode 100644 index 0000000..6f93092 Binary files /dev/null and b/static/case_study/5000217.gif differ diff --git a/static/case_study/8000454.gif b/static/case_study/8000454.gif new file mode 100644 index 0000000..b32aaf7 Binary files /dev/null and b/static/case_study/8000454.gif differ diff --git a/static/case_study/genai_0.png b/static/case_study/genai_0.png new file mode 100644 index 0000000..58523c4 Binary files /dev/null and b/static/case_study/genai_0.png differ diff --git a/static/case_study/genai_13.png b/static/case_study/genai_13.png new file mode 100644 index 0000000..5fb05bc Binary files /dev/null and b/static/case_study/genai_13.png differ diff --git a/static/css/index.css b/static/css/index.css new file mode 100644 index 0000000..8578ca4 --- /dev/null +++ b/static/css/index.css @@ -0,0 +1,159 @@ +body { + font-family: 'Noto Sans', sans-serif; +} + + +.footer .icon-link { + font-size: 25px; + color: #000; +} + +.link-block a { + margin-top: 5px; + margin-bottom: 5px; +} + +.dnerf { + font-variant: small-caps; +} + + +.teaser .hero-body { + padding-top: 0; + padding-bottom: 3rem; +} + +.teaser { + font-family: 'Google Sans', sans-serif; +} + + +.publication-title { +} + +.publication-banner { + max-height: parent; + +} + +.publication-banner video { + position: relative; + left: auto; + top: auto; + transform: none; + object-fit: fit; +} + +.publication-header .hero-body { +} + +.publication-title { + font-family: 'Google Sans', sans-serif; +} + +.publication-authors { + font-family: 'Google Sans', sans-serif; +} + +.publication-venue { + color: #555; + width: fit-content; + font-weight: bold; +} + +.publication-awards { + color: #ff3860; + /* width: fit-content; */ + font-weight: bolder; +} + +.title + .publication-authors, +.subtitle + .publication-authors { + margin-top: -1.25rem; +} + +.publication-authors a { + color: hsl(204, 86%, 53%) !important; +} + +.publication-authors a:hover { + text-decoration: underline; +} + +.author-block { + display: inline-block; +} + +.publication-banner img { +} + +.publication-authors { + /*color: #4286f4;*/ +} + +.publication-video { + position: relative; + width: 100%; + height: 0; + padding-bottom: 56.25%; + + overflow: hidden; + border-radius: 10px !important; +} + +.publication-video iframe { + position: absolute; + top: 0; + left: 0; + width: 100%; + height: 100%; +} + +.publication-body img { +} + +.results-carousel { + overflow: hidden; +} + +.results-carousel .item { + margin: 5px; + overflow: hidden; + border: 1px solid #bbb; + border-radius: 10px; + padding: 0; + font-size: 0; +} + +.results-carousel video { + margin: 0; +} + + +.interpolation-panel { + background: #f5f5f5; + border-radius: 10px; +} + +.interpolation-panel .interpolation-image { + width: 100%; + border-radius: 5px; +} + +.interpolation-video-column { +} + +.interpolation-panel .slider { + margin: 0 !important; +} + +.interpolation-panel .slider { + margin: 0 !important; +} + +#interpolation-image-wrapper { + width: 100%; +} +#interpolation-image-wrapper img { + border-radius: 5px; +} \ No newline at end of file diff --git a/static/eg_videos/0004577.gif b/static/eg_videos/0004577.gif new file mode 100644 index 0000000..0d37b1c Binary files /dev/null and b/static/eg_videos/0004577.gif differ diff --git a/static/eg_videos/1005886.gif b/static/eg_videos/1005886.gif new file mode 100644 index 0000000..abdd194 Binary files /dev/null and b/static/eg_videos/1005886.gif differ diff --git a/static/eg_videos/2003040.gif b/static/eg_videos/2003040.gif new file mode 100644 index 0000000..0704a1d Binary files /dev/null and b/static/eg_videos/2003040.gif differ diff --git a/static/eg_videos/2004537.gif b/static/eg_videos/2004537.gif new file mode 100644 index 0000000..912347e Binary files /dev/null and b/static/eg_videos/2004537.gif differ diff --git a/static/eg_videos/2006682.gif b/static/eg_videos/2006682.gif new file mode 100644 index 0000000..b3168ec Binary files /dev/null and b/static/eg_videos/2006682.gif differ diff --git a/static/eg_videos/3003666.gif b/static/eg_videos/3003666.gif new file mode 100644 index 0000000..d982c52 Binary files /dev/null and b/static/eg_videos/3003666.gif differ diff --git a/static/eg_videos/4002328.gif b/static/eg_videos/4002328.gif new file mode 100644 index 0000000..d9df3e8 Binary files /dev/null and b/static/eg_videos/4002328.gif differ diff --git a/static/eg_videos/4004440.gif b/static/eg_videos/4004440.gif new file mode 100644 index 0000000..9a1057f Binary files /dev/null and b/static/eg_videos/4004440.gif differ diff --git a/static/eg_videos/5001175.gif b/static/eg_videos/5001175.gif new file mode 100644 index 0000000..feb5bbb Binary files /dev/null and b/static/eg_videos/5001175.gif differ diff --git a/static/eg_videos/6003973.gif b/static/eg_videos/6003973.gif new file mode 100644 index 0000000..ffb9ec4 Binary files /dev/null and b/static/eg_videos/6003973.gif differ diff --git a/static/eg_videos/7003198.gif b/static/eg_videos/7003198.gif new file mode 100644 index 0000000..a97f0cd Binary files /dev/null and b/static/eg_videos/7003198.gif differ diff --git a/static/eg_videos/7003618.gif b/static/eg_videos/7003618.gif new file mode 100644 index 0000000..7c43045 Binary files /dev/null and b/static/eg_videos/7003618.gif differ diff --git a/static/eg_videos/8001892.gif b/static/eg_videos/8001892.gif new file mode 100644 index 0000000..ab5eff9 Binary files /dev/null and b/static/eg_videos/8001892.gif differ diff --git a/static/eg_videos/9000384.gif b/static/eg_videos/9000384.gif new file mode 100644 index 0000000..93dee18 Binary files /dev/null and b/static/eg_videos/9000384.gif differ diff --git a/static/eg_videos/9000974.gif b/static/eg_videos/9000974.gif new file mode 100644 index 0000000..07ce708 Binary files /dev/null and b/static/eg_videos/9000974.gif differ diff --git a/static/eg_videos/p005353.gif b/static/eg_videos/p005353.gif new file mode 100644 index 0000000..8acc15d Binary files /dev/null and b/static/eg_videos/p005353.gif differ diff --git a/static/eg_videos/s000444.gif b/static/eg_videos/s000444.gif new file mode 100644 index 0000000..618cfff Binary files /dev/null and b/static/eg_videos/s000444.gif differ diff --git a/static/eg_videos/s000907.gif b/static/eg_videos/s000907.gif new file mode 100644 index 0000000..ea61384 Binary files /dev/null and b/static/eg_videos/s000907.gif differ diff --git a/static/images/anno_example.png b/static/images/anno_example.png new file mode 100644 index 0000000..14339b9 Binary files /dev/null and b/static/images/anno_example.png differ diff --git a/static/images/data_logo1.png b/static/images/data_logo1.png new file mode 100644 index 0000000..b729f7f Binary files /dev/null and b/static/images/data_logo1.png differ diff --git a/static/images/data_logo2.png b/static/images/data_logo2.png new file mode 100644 index 0000000..879abdb Binary files /dev/null and b/static/images/data_logo2.png differ diff --git a/static/images/data_logo3.png b/static/images/data_logo3.png new file mode 100644 index 0000000..b10a0b0 Binary files /dev/null and b/static/images/data_logo3.png differ diff --git a/static/images/data_logo4.png b/static/images/data_logo4.png new file mode 100644 index 0000000..93b0ee0 Binary files /dev/null and b/static/images/data_logo4.png differ diff --git a/static/images/dataset.png b/static/images/dataset.png new file mode 100644 index 0000000..f4c0c8c Binary files /dev/null and b/static/images/dataset.png differ diff --git a/static/images/ec_icon.png b/static/images/ec_icon.png new file mode 100644 index 0000000..fa4b4ce Binary files /dev/null and b/static/images/ec_icon.png differ diff --git a/static/images/logo.png b/static/images/logo.png new file mode 100644 index 0000000..923762a Binary files /dev/null and b/static/images/logo.png differ diff --git a/static/images/res_eval_crafter.png b/static/images/res_eval_crafter.png new file mode 100644 index 0000000..e801c56 Binary files /dev/null and b/static/images/res_eval_crafter.png differ diff --git a/static/images/res_genai_vbench.png b/static/images/res_genai_vbench.png new file mode 100644 index 0000000..27b857b Binary files /dev/null and b/static/images/res_genai_vbench.png differ diff --git a/static/images/res_video_eval.png b/static/images/res_video_eval.png new file mode 100644 index 0000000..3dc8058 Binary files /dev/null and b/static/images/res_video_eval.png differ diff --git a/static/images/subscore_def.png b/static/images/subscore_def.png new file mode 100644 index 0000000..9493ee3 Binary files /dev/null and b/static/images/subscore_def.png differ diff --git a/static/images/teaser.png b/static/images/teaser.png new file mode 100644 index 0000000..09a42ea Binary files /dev/null and b/static/images/teaser.png differ diff --git a/static/images/wordcloud.png b/static/images/wordcloud.png new file mode 100644 index 0000000..6c0c4d1 Binary files /dev/null and b/static/images/wordcloud.png differ diff --git a/templates/gif.html b/templates/gif.html new file mode 100644 index 0000000..45d1e20 --- /dev/null +++ b/templates/gif.html @@ -0,0 +1,225 @@ +
+
+
+ + +
+
+
+ GIF 1 +
+

+ prompt: completely base your choice of which one to visit today on the dish that most entices your taste buds, 1080P, high quality, comic +
+

+ + + + + +
VQTCDDTVAFC
33133
+
+
+ +
+
+
+ GIF 1 +
+

+ prompt: an African American female video editor editing videos

+

+ + + + + +
VQTCDDTVAFC
11331
+ +
+
+ +
+
+
+ GIF 1 +
+

+ prompt: Cinematic, A light rain is falling. Tea pickers are picking tea in a tea garden, 4K, anime style

+

+ + + + + +
VQTCDDTVAFC
32331
+
+
+ + +
+
+
+ GIF 1 +
+

+ prompt: crypto new year Christmas santa money dollars pack
+ +

+ + + + + +
VQTCDDTVAFC
12331
+
+
+ +
+
+
+ GIF 1 +
+

+ prompt: Woman receiving a rose and blushing with a smile
+ +

+ + + + + +
VQTCDDTVAFC
22332
+
+
+ +
+
+
+ GIF 1 +
+

+ prompt: panorama gold coast city in future as a dystopian prison
+ +

+ + + + + +
VQTCDDTVAFC
23323
+
+
+ + +
+
+
+ GIF 1 +
+

+ prompt: little bear looks surprised as the moon gets smaller
+ +

+ + + + + +
VQTCDDTVAFC
12312
+
+
+ +
+
+
+ GIF 1 +
+

+ prompt: alexandra daddario, upperbody focus, slow motion, cinematic
+ +

+ + + + + +
VQTCDDTVAFC
22331
+
+
+ + +
+
+
+ GIF 1 +
+

+ prompt: cinematic portrait of two dogs running away from a medieval man
+ +

+ + + + + +
VQTCDDTVAFC
12321
+
+
+ + +
+
+
+ GIF 1 +
+

+ prompt: a skateboard on the bottom of a surfboard, front view
+ +

+ + + + + +
VQTCDDTVAFC
33332
+
+
+ +
+
+
+ GIF 1 +
+

+ prompt: yellow van with trailer starts to back up

+

+ + + + + +
VQTCDDTVAFC
44444
+
+
+ +
+
+
+ GIF 1 +
+

+ prompt: five gray wolf pups frolicking and chasing each other around a remote gravel road, surrounded by grass. The pups run and leap, chasing each other, and nipping at each other, playing
+

+ + + + + +
VQTCDDTVAFC
42424
+
+
+ +
+
+
\ No newline at end of file diff --git a/templates/index.html b/templates/index.html new file mode 100644 index 0000000..a6e617a --- /dev/null +++ b/templates/index.html @@ -0,0 +1,1123 @@ + + + + + + + + + + MantisScore + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+

+ + MantisScore

+

A Reliable Fine-grained Metric for Video Generation

+
+
+ + 1,2,† + Xuan He*, + + + 1,† + Dongfu Jiang*, + + + 1,3 + Ge Zhang, + + + 1 + Max Ku, + +
+ 1Achint Soni, + 1Sherman Siu, + 1Haonan Chen, + 1Abhranil Chandra, + 1Ziyan Jiang, + 1Aaran Arulraj, + 4Kai Wang, + 1Quy Duc Do, + 1Yuansheng Ni, + 2Bohan Lyu, + 1Yaswanth Narsupalli, + 1Rongqi Fan, + 1Zhiheng Lyu, + + 5Bill Yuchen Lin, + + + 1,,† + Wenhu Chen + +
+
+
+ + 1University of Waterloo, + 2Tsinghua University, + 3StarDust.AI, + 4University of Toronto, + 5AI2 + +
+
+
+ + *Equal Contribution + +
+ + + + + +
+ +
+
+ +
+
+
+
+
+ + + +
+
+

+ Demo - MantisScore

+
+
+ + + + + +
+
+

+ Abstract

+
+
+ +
+
+ +
+
+ +
+

+ The recent years have witnessed great advances + in text-to-video generation. However, the video + evaluation metrics have lagged significantly behind, which fails to produce an accurate and + holistic measure of the generated videos' quality. The main barrier is the lack of high-quality + human rating data.
+

    +
  1. + + VideoEval Dataset. In this paper, we release + VideoEval, the first large-scale multi-aspect + video evaluation dataset. VideoEval consists + of high-quality human-provided ratings for 5 + video evaluation aspects on the 37.6K videos + generated from 11 existing popular video generative models. +
  2. +
  3. + + MantisScore. + We train MantisScore based on VideoEval to enable automatic video quality assessment. + Experiments show that the Spearman correlation between MantisScore + and humans can reach 77.1 on VideoEval test, beating the prior best metrics by about + 50 points. Further result on the held-out Eval-Crafter, GenAI-Bench, and VBench, show that + MantisScore is highly generalizable and + still beating the prior best metrics by a remarkable margin. +
  4. +
  5. + Model Variants. We observe that using Mantis as + the based model consistently beats that using + Idefics2 and VideoLLaVA, and the regression based model can achieve better results than the + generative ones. +
  6. + +
+ Due to its high reliability, we + believe MantisScore can serve as a valuable + tool for accelerate video generation research. + +

+ +
+
+
+
+
+ + + + + +
+
+

+ + VideoEval Dataset
Multi-Aspect Human-Annotated Video Evaluation Data

+
+
+ +
+
+
+
+
+

+ VideoEval contains a total of 37.6K text-to-video pairs from 11 popular video generative models, + with some real-world videos as data augmentation. + The videos are annotated by raters for five evaluation dimensions: Visual Quality, Temporal Consistency, Dynamic Degree, + Text-to-Video Alignment and Factual Consistency, + in 1-4 scoring scale. Below we show the detailed description of our VideoEval dataset. + Please check out + 🤗 VideoEval + on hugging face datasets for usage. + +

+ + +
+

Statistics

+ +
+

+
+
+
+
+
+ +
+
+

Annotation Examples

+ 1-Bad, 2-Average, 3-Good, 4-Real/Perfect
+
+
+ +
+
+
+ + +
+
+
+ GIF 1 +
+

+ prompt: completely base your choice of which one to visit today on the dish that most entices your taste buds, 1080P, high quality, comic +
+

+ + + + + +
VQTCDDTVAFC
33133
+
+
+ +
+
+
+ GIF 1 +
+

+ prompt: an African American female video editor editing videos

+

+ + + + + +
VQTCDDTVAFC
11331
+ +
+
+ +
+
+
+ GIF 1 +
+

+ prompt: Cinematic, A light rain is falling. Tea pickers are picking tea in a tea garden, 4K, anime style

+

+ + + + + +
VQTCDDTVAFC
32331
+
+
+ + +
+
+
+ GIF 1 +
+

+ prompt: crypto new year Christmas santa money dollars pack
+ +

+ + + + + +
VQTCDDTVAFC
12331
+
+
+ +
+
+
+ GIF 1 +
+

+ prompt: Woman receiving a rose and blushing with a smile
+ +

+ + + + + +
VQTCDDTVAFC
22332
+
+
+ +
+
+
+ GIF 1 +
+

+ prompt: panorama gold coast city in future as a dystopian prison
+ +

+ + + + + +
VQTCDDTVAFC
23323
+
+
+ + +
+
+
+ GIF 1 +
+

+ prompt: little bear looks surprised as the moon gets smaller
+ +

+ + + + + +
VQTCDDTVAFC
12312
+
+
+ +
+
+
+ GIF 1 +
+

+ prompt: alexandra daddario, upperbody focus, slow motion, cinematic
+ +

+ + + + + +
VQTCDDTVAFC
22331
+
+
+ + +
+
+
+ GIF 1 +
+

+ prompt: cinematic portrait of two dogs running away from a medieval man
+ +

+ + + + + +
VQTCDDTVAFC
12321
+
+
+ + +
+
+
+ GIF 1 +
+

+ prompt: a skateboard on the bottom of a surfboard, front view
+ +

+ + + + + +
VQTCDDTVAFC
33332
+
+
+ +
+
+
+ GIF 1 +
+

+ prompt: yellow van with trailer starts to back up

+

+ + + + + +
VQTCDDTVAFC
44444
+
+
+ +
+
+
+ GIF 1 +
+

+ prompt: five gray wolf pups frolicking and chasing each other around a remote gravel road, surrounded by grass. The pups run and leap, chasing each other, and nipping at each other, playing
+

+ + + + + +
VQTCDDTVAFC
42424
+
+
+ +
+
+
+ + +
+ +
+ + +
+
+

+ + MantisScore +

+
+
+ +
+
+
+
+
+

+ MantisScore is finetuned on VideoEval dataset's 37K training set taking + Mantis-8B-Idefics2 as base model. + We try generation scoring method and regression scoring method, + the former one means model's answer is in a template + predefined for video quality evaluation + while the latter one outputs 5 logits as evaluation scores in 5 dimensions. + + Besides, we also make ablation on base model, using + Mantis-8B-Idefics2, + Idefics2-8B + and + VideoLLaVA-7B + as base models to finetune. Mantis-8B-Idefics2 turns out to + have the best performance on video quality evaluation. +

+
+
+
+
+ +
+

+ Video Evaluation Benchmarks +

+
+
+
+
+

+ + VideoEval-test

+

+ We test our video evaluator MantisScore on VideoEval-test set, + Here is the results of some feature-based metrics like PIQE, CLIP-sim, X-CILIP-Sore etc, + MLLM-prompting methods like GPT-4o Gemini-1.5-Pro, etc and our MantisScore. + As seen in the table below, MantisScore surpass the best baseline by 54.1 in average on 5 aspects. +

+
+

+ + EvalCrafter Benchmark

+

+ We select 3 dimensions from EvalCrafter that match our evaluation aspects + and collect 2500+ videos for test. MantisScore surpass all the baselines in 3 apsects + and EvalCrafter(GPT-4V) in Text-to-Video Alignment. +

+
+

⚔️GenAI-Bench and VBench

+

+ GenAI-Bench is a multimodal benchmark for MLLM's capability on preference comparison + for tasks like text-to-video generation, image-editing and others, while + VBench is a comprehensive multi-aspect benchmark suite for + video generative models. For GenAI-Bench we collect 2100+ videos and + for VBench select a subset from 5 aspects of VBench, like technical + quality, subject consistency, and so on, then subsample 100 unique prompts (2000 videos totally) for testing. + We use the averaged scores of the five aspects for MLLM prompting baselines and our models to + give the preference and calculate the pairwise accuracy as performance indicator. +

+ +
+
+
+ +
+

+ Results & Leaderboard +

+
+ +
+
+
+ +
+ + MantisScore series + + MLLM Open Source + Feature-Based Metric +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
MetricFinal Sum Score ↓VideoEval-testEvalCrafterGenAI-BenchVBench
MantisScore (reg)278.375.751.178.573.0
MantisScore-(gen)222.477.127.659.058.7
∆ over Best Baseline119.5 54.014.911.417.9
Gemini-1.5-Pro158.822.122.960.952.9
Gemini-1.5-Flash157.520.817.367.152.3
GPT-4o155.423.128.752.051.7
CLIP-sim126.88.936.234.247.4
DINO-sim121.37.532.138.543.3
SSIM-sim118.013.426.934.143.5
CLIP-Score114.4-7.221.745.054.9
LLaVA-1.5-7B108.38.510.549.939.4
LLaVA-1.6-7B93.3-3.113.244.538.7
X-CLIP-Score92.9-1.913.341.440.1
PIQE78.3-10.1-1.234.555.1
BRISQUE75.9-20.33.938.553.7
Idefics173.06.50.334.631.7
MSE-dyn42.5-5.5-17.028.436.5
SSIM-dyn36.7-12.9-26.431.444.5
Fuyu-----
Kosmos-2-----
CogVLM-----
OpenFlamingo-----
+

+ The best MantisScore is in bold and the best in baselines is underlined. + "-" means the answer of MLLM is meaningless or in wrong format. +

+
+
+
+ +
+ +
+
+

+ Case Studies +

+
+
+ +
+
+

+ + + VideoEval-test

+ +
+
+
+
+

+ Scale of all the scores is in [1, 2, 3, 4] except for MantisScore (reg), + which outputs five float logits ranging from 0.50 to 4.50.
+ For scale [1, 2, 3, 4], 1-Bad, 2-Avg, 3-Good, 4-Perfect/Real.
+ + +

+
+
+ +
+
+
+
+ GIF +
+

+ prompt: A robot that throws a stack of paper from a desk +

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
MethodVQTCDDTVAFCMethodVQTCDDTVAFC
Human score31331
MantisScore (reg)2.670.813.092.500.80MantisScore (gen)31331
GPT-4o34234Gemini-1.5-Pro31133
Gemini-1.5-Flash31133LLaVA-1.6-7B33333
LLaVA-1.5-7B33332Idefics144312
PIQE1----DINO-sim-1---
SSIM-dyn--3--CLIP-Score---2-
+
+ +
+
+
+ GIF +
+

+ prompt: Every day must be Sunday Amusement park inside the school +

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
MethodVQTCDDTVAFCMethodVQTCDDTVAFC
Human score11321
MantisScore (reg)1.041.422.951.971.09MantisScore (gen)11321
GPT-4o34233Gemini-1.5-Pro21221
Gemini-1.5-Flash21121LLaVA-1.6-7B33322
LLaVA-1.5-7B33322Idefics144312
PIQE1----DINO-sim-3---
SSIM-dyn--4--CLIP-Score---2-
+
+ + +
+
+ +
+

+ + ⚔️GenAI-Bench +

+
+ +
+
+
+

+ In GenAI-Bench, we have two videos with same prompt and a human preference annotation in each entry. + For MantisScore and MLLM prompting methods, we use average score of all 5 dimensions to predict preference, + while for feature-based metrics, we use their discretized output for the prediction of preference direcly. +

+
+
+ +
+
+
+
+ GIF +
+

+ Left Video
+ prompt: a cute dog is playing a ball +

+
+
+
+
+
+ GIF +
+

+ Right Video
+ prompt: a cute dog is playing a ball +

+
+
+ +
+ GIF +
+
+ +
+
+
+
+ GIF +
+

+ Left Video
+ prompt: An astronaut flying in space, oil painting +

+
+
+
+
+
+ GIF +
+

+ Right Video
+ prompt: An astronaut flying in space, oil painting +

+
+
+ +
+ GIF +
+
+ +
+ +
+ + + +
+
+ +

BibTeX

+ +
+
+ + + + + diff --git a/training/download.py b/training/download.py new file mode 100644 index 0000000..19ec8bb --- /dev/null +++ b/training/download.py @@ -0,0 +1,4 @@ +from huggingface_hub import hf_hub_download + +hf_hub_download(repo_id="TIGER-Lab/VideoFeedback", filename="annotated/frames_annotated_train.zip", repo_type="dataset", local_dir="./data") +hf_hub_download(repo_id="TIGER-Lab/VideoFeedback", filename="annotated/frames_real_train.zip", repo_type="dataset", local_dir="./data") diff --git a/training/download.sh b/training/download.sh new file mode 100644 index 0000000..44de3f2 --- /dev/null +++ b/training/download.sh @@ -0,0 +1,11 @@ + +mkdir -p ./data + +if [ -d "${bench_name}/${split}/frames_${postfix}" ]; then + echo "frames exists" +else + echo "frames not exist, downloading..." + wget "https://huggingface.co/datasets/TIGER-Lab/VideoFeedback/resolve/main/real/frames_real_train.zip" -O "./data/frames_real_train.zip" + unzip "./data/frames_real_train.zip" -d "./data/images/" + rm "./data/frames_real_train.zip" +fi \ No newline at end of file