Spaces:
Running
on
Zero
Running
on
Zero
aknapitsch user
commited on
Commit
·
9507532
1
Parent(s):
f7100fd
initial commit of map anything demo
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +7 -0
- .gitignore +276 -0
- .gradio/certificate.pem +31 -0
- LICENSE +201 -0
- README.md +4 -4
- README_grad.md +12 -0
- app.py +1752 -0
- app_interactive.py +9 -0
- configs/calibration_benchmark.yaml +23 -0
- configs/dataset/ase_wai/default.yaml +3 -0
- configs/dataset/ase_wai/train/default.yaml +26 -0
- configs/dataset/ase_wai/val/default.yaml +26 -0
- configs/dataset/bedlam_wai/default.yaml +3 -0
- configs/dataset/bedlam_wai/train/default.yaml +26 -0
- configs/dataset/bedlam_wai/val/default.yaml +26 -0
- configs/dataset/benchmark_512_eth3d_snpp_tav2.yaml +20 -0
- configs/dataset/benchmark_512_snpp_tav2.yaml +17 -0
- configs/dataset/benchmark_518_eth3d_snpp_tav2.yaml +20 -0
- configs/dataset/benchmark_518_snpp_tav2.yaml +17 -0
- configs/dataset/benchmark_sv_calib_518_many_ar_eth3d_snpp_tav2.yaml +20 -0
- configs/dataset/blendedmvs_wai/default.yaml +3 -0
- configs/dataset/blendedmvs_wai/train/default.yaml +26 -0
- configs/dataset/blendedmvs_wai/val/default.yaml +26 -0
- configs/dataset/default.yaml +45 -0
- configs/dataset/dl3dv_wai/default.yaml +3 -0
- configs/dataset/dl3dv_wai/train/default.yaml +28 -0
- configs/dataset/dl3dv_wai/val/default.yaml +28 -0
- configs/dataset/dtu_wai/default.yaml +2 -0
- configs/dataset/dtu_wai/test/default.yaml +22 -0
- configs/dataset/dynamicreplica_wai/default.yaml +3 -0
- configs/dataset/dynamicreplica_wai/train/default.yaml +26 -0
- configs/dataset/dynamicreplica_wai/val/default.yaml +26 -0
- configs/dataset/eth3d_wai/default.yaml +2 -0
- configs/dataset/eth3d_wai/test/default.yaml +22 -0
- configs/dataset/gta_sfm_wai/default.yaml +3 -0
- configs/dataset/gta_sfm_wai/train/default.yaml +26 -0
- configs/dataset/gta_sfm_wai/val/default.yaml +26 -0
- configs/dataset/matrixcity_wai/default.yaml +3 -0
- configs/dataset/matrixcity_wai/train/default.yaml +26 -0
- configs/dataset/matrixcity_wai/val/default.yaml +26 -0
- configs/dataset/megadepth_wai/default.yaml +3 -0
- configs/dataset/megadepth_wai/train/default.yaml +26 -0
- configs/dataset/megadepth_wai/val/default.yaml +26 -0
- configs/dataset/megatrain_11d_se_518_many_ar_48ipg_64g.yaml +53 -0
- configs/dataset/megatrain_12d_518_many_ar_24ipg_16g.yaml +56 -0
- configs/dataset/megatrain_13d_512_many_ar_24ipg_16g.yaml +59 -0
- configs/dataset/megatrain_13d_518_many_ar_24ipg_16g.yaml +59 -0
- configs/dataset/megatrain_13d_518_many_ar_48ipg_64g.yaml +59 -0
- configs/dataset/megatrain_6d_518_many_ar_48ipg_64g.yaml +38 -0
- configs/dataset/megatrain_6d_518_many_ar_48ipg_8g.yaml +38 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,10 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
examples/**/*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
examples/**/*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
examples/**/*.png filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
examples/**/*.bmp filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
examples/**/*.tiff filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
examples/**/*.tif filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
examples/* filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,276 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Byte-compiled / optimized / DLL files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
|
| 6 |
+
# C extensions
|
| 7 |
+
*.so
|
| 8 |
+
|
| 9 |
+
# Distribution / packaging
|
| 10 |
+
.Python
|
| 11 |
+
build/
|
| 12 |
+
develop-eggs/
|
| 13 |
+
dist/
|
| 14 |
+
downloads/
|
| 15 |
+
eggs/
|
| 16 |
+
.eggs/
|
| 17 |
+
lib/
|
| 18 |
+
lib64/
|
| 19 |
+
parts/
|
| 20 |
+
sdist/
|
| 21 |
+
var/
|
| 22 |
+
wheels/
|
| 23 |
+
share/python-wheels/
|
| 24 |
+
*.egg-info/
|
| 25 |
+
.installed.cfg
|
| 26 |
+
*.egg
|
| 27 |
+
MANIFEST
|
| 28 |
+
|
| 29 |
+
# PyInstaller
|
| 30 |
+
# Usually these files are written by a python script from a template
|
| 31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 32 |
+
*.manifest
|
| 33 |
+
*.spec
|
| 34 |
+
|
| 35 |
+
# Installer logs
|
| 36 |
+
pip-log.txt
|
| 37 |
+
pip-delete-this-directory.txt
|
| 38 |
+
|
| 39 |
+
# Unit test / coverage reports
|
| 40 |
+
htmlcov/
|
| 41 |
+
.tox/
|
| 42 |
+
.nox/
|
| 43 |
+
.coverage
|
| 44 |
+
.coverage.*
|
| 45 |
+
.cache
|
| 46 |
+
nosetests.xml
|
| 47 |
+
coverage.xml
|
| 48 |
+
*.cover
|
| 49 |
+
*.py,cover
|
| 50 |
+
.hypothesis/
|
| 51 |
+
.pytest_cache/
|
| 52 |
+
cover/
|
| 53 |
+
|
| 54 |
+
# Translations
|
| 55 |
+
*.mo
|
| 56 |
+
*.pot
|
| 57 |
+
|
| 58 |
+
# Django stuff:
|
| 59 |
+
*.log
|
| 60 |
+
local_settings.py
|
| 61 |
+
db.sqlite3
|
| 62 |
+
db.sqlite3-journal
|
| 63 |
+
|
| 64 |
+
# Flask stuff:
|
| 65 |
+
instance/
|
| 66 |
+
.webassets-cache
|
| 67 |
+
|
| 68 |
+
# Scrapy stuff:
|
| 69 |
+
.scrapy
|
| 70 |
+
|
| 71 |
+
# Sphinx documentation
|
| 72 |
+
docs/_build/
|
| 73 |
+
|
| 74 |
+
# PyBuilder
|
| 75 |
+
.pybuilder/
|
| 76 |
+
target/
|
| 77 |
+
|
| 78 |
+
# Jupyter Notebook
|
| 79 |
+
.ipynb_checkpoints
|
| 80 |
+
|
| 81 |
+
# IPython
|
| 82 |
+
profile_default/
|
| 83 |
+
ipython_config.py
|
| 84 |
+
|
| 85 |
+
# pyenv
|
| 86 |
+
# For a library or package, you might want to ignore these files since the code is
|
| 87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
| 88 |
+
# .python-version
|
| 89 |
+
|
| 90 |
+
# pipenv
|
| 91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 94 |
+
# install all needed dependencies.
|
| 95 |
+
#Pipfile.lock
|
| 96 |
+
|
| 97 |
+
# poetry
|
| 98 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
| 99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 100 |
+
# commonly ignored for libraries.
|
| 101 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
| 102 |
+
#poetry.lock
|
| 103 |
+
|
| 104 |
+
# pdm
|
| 105 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
| 106 |
+
#pdm.lock
|
| 107 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
| 108 |
+
# in version control.
|
| 109 |
+
# https://pdm.fming.dev/#use-with-ide
|
| 110 |
+
.pdm.toml
|
| 111 |
+
|
| 112 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
| 113 |
+
__pypackages__/
|
| 114 |
+
|
| 115 |
+
# Celery stuff
|
| 116 |
+
celerybeat-schedule
|
| 117 |
+
celerybeat.pid
|
| 118 |
+
|
| 119 |
+
# SageMath parsed files
|
| 120 |
+
*.sage.py
|
| 121 |
+
|
| 122 |
+
# Environments
|
| 123 |
+
.env
|
| 124 |
+
.venv
|
| 125 |
+
env/
|
| 126 |
+
venv/
|
| 127 |
+
ENV/
|
| 128 |
+
env.bak/
|
| 129 |
+
venv.bak/
|
| 130 |
+
|
| 131 |
+
# Spyder project settings
|
| 132 |
+
.spyderproject
|
| 133 |
+
.spyproject
|
| 134 |
+
|
| 135 |
+
# Rope project settings
|
| 136 |
+
.ropeproject
|
| 137 |
+
|
| 138 |
+
# mkdocs documentation
|
| 139 |
+
/site
|
| 140 |
+
|
| 141 |
+
# mypy
|
| 142 |
+
.mypy_cache/
|
| 143 |
+
.dmypy.json
|
| 144 |
+
dmypy.json
|
| 145 |
+
|
| 146 |
+
# Pyre type checker
|
| 147 |
+
.pyre/
|
| 148 |
+
|
| 149 |
+
# pytype static type analyzer
|
| 150 |
+
.pytype/
|
| 151 |
+
|
| 152 |
+
# Cython debug symbols
|
| 153 |
+
cython_debug/
|
| 154 |
+
|
| 155 |
+
# PyCharm
|
| 156 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
| 157 |
+
# be added to the global gitignore or merged into this project gitignore. For a PyCharm
|
| 158 |
+
# project, it is recommended to ignore the entire .idea directory, or at least the following:
|
| 159 |
+
# .idea/workspace.xml
|
| 160 |
+
# .idea/tasks.xml
|
| 161 |
+
# .idea/usage.statistics.xml
|
| 162 |
+
# .idea/dictionaries
|
| 163 |
+
# .idea/shelf
|
| 164 |
+
|
| 165 |
+
# VS Code
|
| 166 |
+
.vscode/
|
| 167 |
+
*.code-workspace
|
| 168 |
+
|
| 169 |
+
# Local History for Visual Studio Code
|
| 170 |
+
.history/
|
| 171 |
+
|
| 172 |
+
# Built Visual Studio Code Extensions
|
| 173 |
+
*.vsix
|
| 174 |
+
|
| 175 |
+
# Hugging Face specific
|
| 176 |
+
# Model files (usually large binary files)
|
| 177 |
+
*.bin
|
| 178 |
+
*.safetensors
|
| 179 |
+
*.h5
|
| 180 |
+
*.onnx
|
| 181 |
+
*.pkl
|
| 182 |
+
*.pth
|
| 183 |
+
*.pt
|
| 184 |
+
*.ckpt
|
| 185 |
+
*.pb
|
| 186 |
+
*.tflite
|
| 187 |
+
*.mlmodel
|
| 188 |
+
|
| 189 |
+
# Hugging Face cache and tokens
|
| 190 |
+
.cache/
|
| 191 |
+
cache/
|
| 192 |
+
**/cache/
|
| 193 |
+
hf_token*
|
| 194 |
+
.huggingface/
|
| 195 |
+
transformers_cache/
|
| 196 |
+
datasets_cache/
|
| 197 |
+
input_images_*
|
| 198 |
+
|
| 199 |
+
# Gradio temporary files
|
| 200 |
+
gradio_cached_examples/
|
| 201 |
+
flagged/
|
| 202 |
+
|
| 203 |
+
# Data directories
|
| 204 |
+
data/
|
| 205 |
+
checkpoints/
|
| 206 |
+
outputs/
|
| 207 |
+
results/
|
| 208 |
+
logs/
|
| 209 |
+
tmp/
|
| 210 |
+
temp/
|
| 211 |
+
# examples/*/
|
| 212 |
+
# /examples*.jpg
|
| 213 |
+
# *.png
|
| 214 |
+
# *.jpeg
|
| 215 |
+
# examples/
|
| 216 |
+
|
| 217 |
+
# OS generated files
|
| 218 |
+
.DS_Store
|
| 219 |
+
.DS_Store?
|
| 220 |
+
._*
|
| 221 |
+
.Spotlight-V100
|
| 222 |
+
.Trashes
|
| 223 |
+
ehthumbs.db
|
| 224 |
+
Thumbs.db
|
| 225 |
+
desktop.ini
|
| 226 |
+
|
| 227 |
+
# Backup files
|
| 228 |
+
*.bak
|
| 229 |
+
*.swp
|
| 230 |
+
*.swo
|
| 231 |
+
*~
|
| 232 |
+
|
| 233 |
+
# Compressed files
|
| 234 |
+
*.7z
|
| 235 |
+
*.dmg
|
| 236 |
+
*.gz
|
| 237 |
+
*.iso
|
| 238 |
+
*.jar
|
| 239 |
+
*.rar
|
| 240 |
+
*.tar
|
| 241 |
+
*.zip
|
| 242 |
+
|
| 243 |
+
# IDE and editor files
|
| 244 |
+
.idea/
|
| 245 |
+
*.sublime-project
|
| 246 |
+
*.sublime-workspace
|
| 247 |
+
.vscode/settings.json
|
| 248 |
+
.vscode/tasks.json
|
| 249 |
+
.vscode/launch.json
|
| 250 |
+
.vscode/extensions.json
|
| 251 |
+
|
| 252 |
+
# Node modules (if any frontend components)
|
| 253 |
+
node_modules/
|
| 254 |
+
npm-debug.log*
|
| 255 |
+
yarn-debug.log*
|
| 256 |
+
yarn-error.log*
|
| 257 |
+
|
| 258 |
+
# Docker
|
| 259 |
+
Dockerfile*
|
| 260 |
+
docker-compose*
|
| 261 |
+
.dockerignore
|
| 262 |
+
|
| 263 |
+
# MLOps and experiment tracking
|
| 264 |
+
wandb/
|
| 265 |
+
.neptune/
|
| 266 |
+
mlruns/
|
| 267 |
+
.mlflow/
|
| 268 |
+
tensorboard_logs/
|
| 269 |
+
|
| 270 |
+
# Secrets and configuration
|
| 271 |
+
*.secret
|
| 272 |
+
*.key
|
| 273 |
+
config.ini
|
| 274 |
+
.env.local
|
| 275 |
+
.env.*.local
|
| 276 |
+
secrets.json
|
.gradio/certificate.pem
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-----BEGIN CERTIFICATE-----
|
| 2 |
+
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
|
| 3 |
+
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
| 4 |
+
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
|
| 5 |
+
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
|
| 6 |
+
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
|
| 7 |
+
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
|
| 8 |
+
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
|
| 9 |
+
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
|
| 10 |
+
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
|
| 11 |
+
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
|
| 12 |
+
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
|
| 13 |
+
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
|
| 14 |
+
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
|
| 15 |
+
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
|
| 16 |
+
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
|
| 17 |
+
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
|
| 18 |
+
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
|
| 19 |
+
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
|
| 20 |
+
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
|
| 21 |
+
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
|
| 22 |
+
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
|
| 23 |
+
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
|
| 24 |
+
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
|
| 25 |
+
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
|
| 26 |
+
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
|
| 27 |
+
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
|
| 28 |
+
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
|
| 29 |
+
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
|
| 30 |
+
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
|
| 31 |
+
-----END CERTIFICATE-----
|
LICENSE
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Apache License
|
| 2 |
+
Version 2.0, January 2004
|
| 3 |
+
http://www.apache.org/licenses/
|
| 4 |
+
|
| 5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 6 |
+
|
| 7 |
+
1. Definitions.
|
| 8 |
+
|
| 9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 11 |
+
|
| 12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 13 |
+
the copyright owner that is granting the License.
|
| 14 |
+
|
| 15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 16 |
+
other entities that control, are controlled by, or are under common
|
| 17 |
+
control with that entity. For the purposes of this definition,
|
| 18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 19 |
+
direction or management of such entity, whether by contract or
|
| 20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 22 |
+
|
| 23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 24 |
+
exercising permissions granted by this License.
|
| 25 |
+
|
| 26 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 27 |
+
including but not limited to software source code, documentation
|
| 28 |
+
source, and configuration files.
|
| 29 |
+
|
| 30 |
+
"Object" form shall mean any form resulting from mechanical
|
| 31 |
+
transformation or translation of a Source form, including but
|
| 32 |
+
not limited to compiled object code, generated documentation,
|
| 33 |
+
and conversions to other media types.
|
| 34 |
+
|
| 35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 36 |
+
Object form, made available under the License, as indicated by a
|
| 37 |
+
copyright notice that is included in or attached to the work
|
| 38 |
+
(an example is provided in the Appendix below).
|
| 39 |
+
|
| 40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 41 |
+
form, that is based on (or derived from) the Work and for which the
|
| 42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 44 |
+
of this License, Derivative Works shall not include works that remain
|
| 45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 46 |
+
the Work and Derivative Works thereof.
|
| 47 |
+
|
| 48 |
+
"Contribution" shall mean any work of authorship, including
|
| 49 |
+
the original version of the Work and any modifications or additions
|
| 50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 54 |
+
means any form of electronic, verbal, or written communication sent
|
| 55 |
+
to the Licensor or its representatives, including but not limited to
|
| 56 |
+
communication on electronic mailing lists, source code control systems,
|
| 57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 59 |
+
excluding communication that is conspicuously marked or otherwise
|
| 60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 61 |
+
|
| 62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 64 |
+
subsequently incorporated within the Work.
|
| 65 |
+
|
| 66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 71 |
+
Work and such Derivative Works in Source or Object form.
|
| 72 |
+
|
| 73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 76 |
+
(except as stated in this section) patent license to make, have made,
|
| 77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 78 |
+
where such license applies only to those patent claims licensable
|
| 79 |
+
by such Contributor that are necessarily infringed by their
|
| 80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 82 |
+
institute patent litigation against any entity (including a
|
| 83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 84 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 85 |
+
or contributory patent infringement, then any patent licenses
|
| 86 |
+
granted to You under this License for that Work shall terminate
|
| 87 |
+
as of the date such litigation is filed.
|
| 88 |
+
|
| 89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 90 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 91 |
+
modifications, and in Source or Object form, provided that You
|
| 92 |
+
meet the following conditions:
|
| 93 |
+
|
| 94 |
+
(a) You must give any other recipients of the Work or
|
| 95 |
+
Derivative Works a copy of this License; and
|
| 96 |
+
|
| 97 |
+
(b) You must cause any modified files to carry prominent notices
|
| 98 |
+
stating that You changed the files; and
|
| 99 |
+
|
| 100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 101 |
+
that You distribute, all copyright, patent, trademark, and
|
| 102 |
+
attribution notices from the Source form of the Work,
|
| 103 |
+
excluding those notices that do not pertain to any part of
|
| 104 |
+
the Derivative Works; and
|
| 105 |
+
|
| 106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 107 |
+
distribution, then any Derivative Works that You distribute must
|
| 108 |
+
include a readable copy of the attribution notices contained
|
| 109 |
+
within such NOTICE file, excluding those notices that do not
|
| 110 |
+
pertain to any part of the Derivative Works, in at least one
|
| 111 |
+
of the following places: within a NOTICE text file distributed
|
| 112 |
+
as part of the Derivative Works; within the Source form or
|
| 113 |
+
documentation, if provided along with the Derivative Works; or,
|
| 114 |
+
within a display generated by the Derivative Works, if and
|
| 115 |
+
wherever such third-party notices normally appear. The contents
|
| 116 |
+
of the NOTICE file are for informational purposes only and
|
| 117 |
+
do not modify the License. You may add Your own attribution
|
| 118 |
+
notices within Derivative Works that You distribute, alongside
|
| 119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 120 |
+
that such additional attribution notices cannot be construed
|
| 121 |
+
as modifying the License.
|
| 122 |
+
|
| 123 |
+
You may add Your own copyright statement to Your modifications and
|
| 124 |
+
may provide additional or different license terms and conditions
|
| 125 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 126 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 127 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 128 |
+
the conditions stated in this License.
|
| 129 |
+
|
| 130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 132 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 133 |
+
this License, without any additional terms or conditions.
|
| 134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 135 |
+
the terms of any separate license agreement you may have executed
|
| 136 |
+
with Licensor regarding such Contributions.
|
| 137 |
+
|
| 138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 140 |
+
except as required for reasonable and customary use in describing the
|
| 141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 142 |
+
|
| 143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 144 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 147 |
+
implied, including, without limitation, any warranties or conditions
|
| 148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 150 |
+
appropriateness of using or redistributing the Work and assume any
|
| 151 |
+
risks associated with Your exercise of permissions under this License.
|
| 152 |
+
|
| 153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 154 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 155 |
+
unless required by applicable law (such as deliberate and grossly
|
| 156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 157 |
+
liable to You for damages, including any direct, indirect, special,
|
| 158 |
+
incidental, or consequential damages of any character arising as a
|
| 159 |
+
result of this License or out of the use or inability to use the
|
| 160 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 161 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 162 |
+
other commercial damages or losses), even if such Contributor
|
| 163 |
+
has been advised of the possibility of such damages.
|
| 164 |
+
|
| 165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 168 |
+
or other liability obligations and/or rights consistent with this
|
| 169 |
+
License. However, in accepting such obligations, You may act only
|
| 170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 171 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 172 |
+
defend, and hold each Contributor harmless for any liability
|
| 173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 174 |
+
of your accepting any such warranty or additional liability.
|
| 175 |
+
|
| 176 |
+
END OF TERMS AND CONDITIONS
|
| 177 |
+
|
| 178 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 179 |
+
|
| 180 |
+
To apply the Apache License to your work, attach the following
|
| 181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 182 |
+
replaced with your own identifying information. (Don't include
|
| 183 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 184 |
+
comment syntax for the file format. We also recommend that a
|
| 185 |
+
file or class name and description of purpose be included on the
|
| 186 |
+
same "printed page" as the copyright notice for easier
|
| 187 |
+
identification within third-party archives.
|
| 188 |
+
|
| 189 |
+
Copyright 2025 Meta, Nikhil Keetha
|
| 190 |
+
|
| 191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 192 |
+
you may not use this file except in compliance with the License.
|
| 193 |
+
You may obtain a copy of the License at
|
| 194 |
+
|
| 195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 196 |
+
|
| 197 |
+
Unless required by applicable law or agreed to in writing, software
|
| 198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 200 |
+
See the License for the specific language governing permissions and
|
| 201 |
+
limitations under the License.
|
README.md
CHANGED
|
@@ -1,8 +1,8 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
sdk: gradio
|
| 7 |
sdk_version: 5.44.1
|
| 8 |
app_file: app.py
|
|
|
|
| 1 |
---
|
| 2 |
+
title: Mapanything Gradio
|
| 3 |
+
emoji: 🐠
|
| 4 |
+
colorFrom: purple
|
| 5 |
+
colorTo: green
|
| 6 |
sdk: gradio
|
| 7 |
sdk_version: 5.44.1
|
| 8 |
app_file: app.py
|
README_grad.md
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Mapanything Gradio
|
| 3 |
+
emoji: 🐠
|
| 4 |
+
colorFrom: purple
|
| 5 |
+
colorTo: green
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: 5.44.1
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: false
|
| 10 |
+
---
|
| 11 |
+
|
| 12 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
|
@@ -0,0 +1,1752 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
# conda activate hf3.10
|
| 7 |
+
|
| 8 |
+
import base64
|
| 9 |
+
import gc
|
| 10 |
+
import os
|
| 11 |
+
import shutil
|
| 12 |
+
import sys
|
| 13 |
+
import time
|
| 14 |
+
from datetime import datetime
|
| 15 |
+
|
| 16 |
+
import cv2
|
| 17 |
+
import gradio as gr
|
| 18 |
+
import numpy as np
|
| 19 |
+
import spaces
|
| 20 |
+
import torch
|
| 21 |
+
from huggingface_hub import hf_hub_download
|
| 22 |
+
|
| 23 |
+
sys.path.append("mapanything/")
|
| 24 |
+
|
| 25 |
+
from hf_utils.css_and_html import (
|
| 26 |
+
get_acknowledgements_html,
|
| 27 |
+
get_description_html,
|
| 28 |
+
get_gradio_theme,
|
| 29 |
+
get_header_html,
|
| 30 |
+
GRADIO_CSS,
|
| 31 |
+
MEASURE_INSTRUCTIONS_HTML,
|
| 32 |
+
)
|
| 33 |
+
from hf_utils.vgg_geometry import unproject_depth_map_to_point_map
|
| 34 |
+
from hf_utils.visual_util import predictions_to_glb
|
| 35 |
+
from mapanything.models import init_model
|
| 36 |
+
from mapanything.utils.geometry import depth_edge, normals_edge, points_to_normals
|
| 37 |
+
from mapanything.utils.image import load_images, rgb
|
| 38 |
+
from mapanything.utils.inference import loss_of_one_batch_multi_view
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def get_logo_base64():
|
| 42 |
+
"""Convert WAI logo to base64 for embedding in HTML"""
|
| 43 |
+
logo_path = "examples/wai_logo/wai_logo.png"
|
| 44 |
+
try:
|
| 45 |
+
with open(logo_path, "rb") as img_file:
|
| 46 |
+
img_data = img_file.read()
|
| 47 |
+
base64_str = base64.b64encode(img_data).decode()
|
| 48 |
+
return f"data:image/png;base64,{base64_str}"
|
| 49 |
+
except FileNotFoundError:
|
| 50 |
+
return None
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
print("Initializing and loading MapAnything model...")
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def load_hf_token():
|
| 57 |
+
"""Load HuggingFace access token from local file"""
|
| 58 |
+
token_file_paths = [
|
| 59 |
+
"~/hf_token.txt",
|
| 60 |
+
]
|
| 61 |
+
|
| 62 |
+
for token_path in token_file_paths:
|
| 63 |
+
if os.path.exists(token_path):
|
| 64 |
+
try:
|
| 65 |
+
with open(token_path, "r") as f:
|
| 66 |
+
token = f.read().strip()
|
| 67 |
+
print(f"Loaded HuggingFace token from: {token_path}")
|
| 68 |
+
return token
|
| 69 |
+
except Exception as e:
|
| 70 |
+
print(f"Error reading token from {token_path}: {e}")
|
| 71 |
+
continue
|
| 72 |
+
|
| 73 |
+
# Also try environment variable
|
| 74 |
+
# see https://huggingface.co/docs/hub/spaces-overview#managing-secrets on options
|
| 75 |
+
token = (
|
| 76 |
+
os.getenv("HF_TOKEN")
|
| 77 |
+
or os.getenv("HUGGING_FACE_HUB_TOKEN")
|
| 78 |
+
or os.getenv("HUGGING_FACE_MODEL_TOKEN")
|
| 79 |
+
)
|
| 80 |
+
if token:
|
| 81 |
+
print("Loaded HuggingFace token from environment variable")
|
| 82 |
+
return token
|
| 83 |
+
|
| 84 |
+
print(
|
| 85 |
+
"Warning: No HuggingFace token found. Model loading may fail for private repositories."
|
| 86 |
+
)
|
| 87 |
+
return None
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def init_hydra_config(config_path, overrides=None):
|
| 91 |
+
"Initialize Hydra config"
|
| 92 |
+
import hydra
|
| 93 |
+
|
| 94 |
+
config_dir = os.path.dirname(config_path)
|
| 95 |
+
config_name = os.path.basename(config_path).split(".")[0]
|
| 96 |
+
relative_path = os.path.relpath(config_dir, os.path.dirname(__file__))
|
| 97 |
+
hydra.core.global_hydra.GlobalHydra.instance().clear()
|
| 98 |
+
hydra.initialize(version_base=None, config_path=relative_path)
|
| 99 |
+
if overrides is not None:
|
| 100 |
+
cfg = hydra.compose(config_name=config_name, overrides=overrides)
|
| 101 |
+
else:
|
| 102 |
+
cfg = hydra.compose(config_name=config_name)
|
| 103 |
+
return cfg
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def init_inference_model(config, ckpt_path, device):
|
| 107 |
+
"Initialize the model for inference"
|
| 108 |
+
if isinstance(config, dict):
|
| 109 |
+
config_path = config["path"]
|
| 110 |
+
overrrides = config["config_overrides"]
|
| 111 |
+
model_args = init_hydra_config(config_path, overrides=overrrides)
|
| 112 |
+
model = init_model(model_args.model.model_str, model_args.model.model_config)
|
| 113 |
+
else:
|
| 114 |
+
config_path = config
|
| 115 |
+
model_args = init_hydra_config(config_path)
|
| 116 |
+
model = init_model(model_args.model_str, model_args.model_config)
|
| 117 |
+
model.to(device)
|
| 118 |
+
if ckpt_path is not None:
|
| 119 |
+
print("Loading model from: ", ckpt_path)
|
| 120 |
+
|
| 121 |
+
# Load HuggingFace token for private repositories
|
| 122 |
+
hf_token = load_hf_token()
|
| 123 |
+
|
| 124 |
+
# Try to download from HuggingFace Hub first if it's a HF URL
|
| 125 |
+
if "huggingface.co" in ckpt_path:
|
| 126 |
+
try:
|
| 127 |
+
# Extract repo_id and filename from URL
|
| 128 |
+
# URL format: https://huggingface.co/facebook/MapAnything/resolve/main/mapa_curri_24v_13d_48ipg_64g.pth
|
| 129 |
+
parts = ckpt_path.replace("https://huggingface.co/", "").split("/")
|
| 130 |
+
repo_id = f"{parts[0]}/{parts[1]}" # e.g., "facebook/MapAnything"
|
| 131 |
+
filename = "/".join(
|
| 132 |
+
parts[4:]
|
| 133 |
+
) # e.g., "mapa_curri_24v_13d_48ipg_64g.pth"
|
| 134 |
+
|
| 135 |
+
print(f"Downloading from HuggingFace Hub: {repo_id}/{filename}")
|
| 136 |
+
local_file = hf_hub_download(
|
| 137 |
+
repo_id=repo_id,
|
| 138 |
+
filename=filename,
|
| 139 |
+
token=hf_token,
|
| 140 |
+
cache_dir=None, # Use default cache
|
| 141 |
+
)
|
| 142 |
+
ckpt = torch.load(local_file, map_location=device, weights_only=False)
|
| 143 |
+
except Exception as e:
|
| 144 |
+
print(f"HuggingFace Hub download failed: {e}")
|
| 145 |
+
print("Falling back to torch.hub.load_state_dict_from_url...")
|
| 146 |
+
# Fallback to original method
|
| 147 |
+
ckpt = torch.hub.load_state_dict_from_url(
|
| 148 |
+
ckpt_path, map_location=device
|
| 149 |
+
)
|
| 150 |
+
else:
|
| 151 |
+
# Use original method for non-HF URLs
|
| 152 |
+
ckpt = torch.hub.load_state_dict_from_url(ckpt_path, map_location=device)
|
| 153 |
+
|
| 154 |
+
print(model.load_state_dict(ckpt["model"], strict=False))
|
| 155 |
+
model.eval()
|
| 156 |
+
return model
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
# MapAnything Configuration
|
| 160 |
+
high_level_config = {
|
| 161 |
+
"path": "configs/train.yaml",
|
| 162 |
+
"config_overrides": [
|
| 163 |
+
"machine=aws",
|
| 164 |
+
"model=mapanything",
|
| 165 |
+
"model/task=images_only",
|
| 166 |
+
"model.encoder.uses_torch_hub=false",
|
| 167 |
+
],
|
| 168 |
+
"checkpoint_path": "https://huggingface.co/facebook/MapAnything/resolve/main/mapa_curri_24v_13d_48ipg_64g.pth",
|
| 169 |
+
"trained_with_amp": True,
|
| 170 |
+
"trained_with_amp_dtype": "fp16",
|
| 171 |
+
"data_norm_type": "dinov2",
|
| 172 |
+
"patch_size": 14,
|
| 173 |
+
"resolution": 518,
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
# Initialize model - this will be done on GPU when needed
|
| 177 |
+
model = None
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
# -------------------------------------------------------------------------
|
| 181 |
+
# 1) Core model inference
|
| 182 |
+
# -------------------------------------------------------------------------
|
| 183 |
+
@spaces.GPU(duration=120)
|
| 184 |
+
def run_model(target_dir, model_placeholder):
|
| 185 |
+
"""
|
| 186 |
+
Run the MapAnything model on images in the 'target_dir/images' folder and return predictions.
|
| 187 |
+
"""
|
| 188 |
+
global model
|
| 189 |
+
print(f"Processing images from {target_dir}")
|
| 190 |
+
|
| 191 |
+
# Device check
|
| 192 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 193 |
+
device = torch.device(device)
|
| 194 |
+
# if not torch.cuda.is_available():
|
| 195 |
+
# raise ValueError("CUDA is not available. Check your environment.")
|
| 196 |
+
|
| 197 |
+
# Initialize model if not already done
|
| 198 |
+
if model is None:
|
| 199 |
+
print("Initializing MapAnything model...")
|
| 200 |
+
model = init_inference_model(
|
| 201 |
+
high_level_config, high_level_config["checkpoint_path"], device
|
| 202 |
+
)
|
| 203 |
+
else:
|
| 204 |
+
model = model.to(device)
|
| 205 |
+
|
| 206 |
+
model.eval()
|
| 207 |
+
|
| 208 |
+
# Load images using MapAnything's load_images function
|
| 209 |
+
print("Loading images...")
|
| 210 |
+
image_folder_path = os.path.join(target_dir, "images")
|
| 211 |
+
views = load_images(
|
| 212 |
+
image_folder_path,
|
| 213 |
+
resolution_set=high_level_config["resolution"],
|
| 214 |
+
verbose=False,
|
| 215 |
+
norm_type=high_level_config["data_norm_type"],
|
| 216 |
+
patch_size=high_level_config["patch_size"],
|
| 217 |
+
stride=1,
|
| 218 |
+
)
|
| 219 |
+
|
| 220 |
+
print(f"Loaded {len(views)} images")
|
| 221 |
+
if len(views) == 0:
|
| 222 |
+
raise ValueError("No images found. Check your upload.")
|
| 223 |
+
|
| 224 |
+
# Run inference using MapAnything's inference function
|
| 225 |
+
print("Running MapAnything inference...")
|
| 226 |
+
with torch.no_grad():
|
| 227 |
+
pred_result = loss_of_one_batch_multi_view(
|
| 228 |
+
views,
|
| 229 |
+
model,
|
| 230 |
+
None,
|
| 231 |
+
device,
|
| 232 |
+
use_amp=high_level_config["trained_with_amp"],
|
| 233 |
+
amp_dtype=high_level_config["trained_with_amp_dtype"],
|
| 234 |
+
)
|
| 235 |
+
|
| 236 |
+
# Convert predictions to format expected by visualization
|
| 237 |
+
predictions = {}
|
| 238 |
+
|
| 239 |
+
# Initialize lists for the required keys
|
| 240 |
+
extrinsic_list = []
|
| 241 |
+
intrinsic_list = []
|
| 242 |
+
world_points_list = []
|
| 243 |
+
depth_maps_list = []
|
| 244 |
+
images_list = []
|
| 245 |
+
confidence_list = []
|
| 246 |
+
final_mask_list = []
|
| 247 |
+
|
| 248 |
+
# Check if confidence data is available
|
| 249 |
+
has_confidence = False
|
| 250 |
+
for view_idx, view in enumerate(views):
|
| 251 |
+
view_key = f"pred{view_idx + 1}"
|
| 252 |
+
if view_key in pred_result and "conf" in pred_result[view_key]:
|
| 253 |
+
has_confidence = True
|
| 254 |
+
break
|
| 255 |
+
|
| 256 |
+
# Extract predictions for each view
|
| 257 |
+
for view_idx, view in enumerate(views):
|
| 258 |
+
# Get image for colors
|
| 259 |
+
image = rgb(view["img"], norm_type=high_level_config["data_norm_type"])
|
| 260 |
+
|
| 261 |
+
view_key = f"pred{view_idx + 1}"
|
| 262 |
+
if view_key in pred_result:
|
| 263 |
+
pred_pts3d = pred_result[view_key]["pts3d"][0].cpu().numpy()
|
| 264 |
+
|
| 265 |
+
# Get confidence data if available
|
| 266 |
+
confidence_map = None
|
| 267 |
+
if "conf" in pred_result[view_key]:
|
| 268 |
+
confidence_map = pred_result[view_key]["conf"][0].cpu().numpy()
|
| 269 |
+
|
| 270 |
+
# Compute final_mask just like in visualize_raw_inference_output function
|
| 271 |
+
# Create the prediction mask based on parameters
|
| 272 |
+
pred_mask = None
|
| 273 |
+
use_gt_mask_on_pred = False # Set based on your requirements
|
| 274 |
+
use_pred_mask = True # Set based on your requirements
|
| 275 |
+
use_non_ambi_mask = True # Set based on your requirements
|
| 276 |
+
use_conf_mask = False # Set based on your requirements
|
| 277 |
+
conf_percentile = 10 # Set based on your requirements
|
| 278 |
+
use_edge_mask = True # Set based on your requirements
|
| 279 |
+
pts_edge_tol = 5 # Set based on your requirements
|
| 280 |
+
depth_edge_rtol = 0.03 # Set based on your requirements
|
| 281 |
+
|
| 282 |
+
if use_pred_mask:
|
| 283 |
+
# Get non ambiguous mask if available and requested
|
| 284 |
+
has_non_ambiguous_mask = (
|
| 285 |
+
"non_ambiguous_mask" in pred_result[view_key] and use_non_ambi_mask
|
| 286 |
+
)
|
| 287 |
+
if has_non_ambiguous_mask:
|
| 288 |
+
non_ambiguous_mask = (
|
| 289 |
+
pred_result[view_key]["non_ambiguous_mask"][0].cpu().numpy()
|
| 290 |
+
)
|
| 291 |
+
pred_mask = non_ambiguous_mask
|
| 292 |
+
|
| 293 |
+
# Get confidence mask if available and requested
|
| 294 |
+
has_conf = "conf" in pred_result[view_key] and use_conf_mask
|
| 295 |
+
if has_conf:
|
| 296 |
+
confidences = pred_result[view_key]["conf"][0].cpu()
|
| 297 |
+
percentile_threshold = torch.quantile(
|
| 298 |
+
confidences, conf_percentile / 100.0
|
| 299 |
+
)
|
| 300 |
+
conf_mask = confidences > percentile_threshold
|
| 301 |
+
conf_mask = conf_mask.numpy()
|
| 302 |
+
if pred_mask is not None:
|
| 303 |
+
pred_mask = pred_mask & conf_mask
|
| 304 |
+
else:
|
| 305 |
+
pred_mask = conf_mask
|
| 306 |
+
|
| 307 |
+
# Apply edge mask if requested
|
| 308 |
+
if use_edge_mask and pred_mask is not None:
|
| 309 |
+
if "cam_quats" not in pred_result[view_key]:
|
| 310 |
+
# For direct point prediction
|
| 311 |
+
# Compute normals and edge mask
|
| 312 |
+
normals, normals_mask = points_to_normals(
|
| 313 |
+
pred_pts3d, mask=pred_mask
|
| 314 |
+
)
|
| 315 |
+
edge_mask = ~(
|
| 316 |
+
normals_edge(normals, tol=pts_edge_tol, mask=normals_mask)
|
| 317 |
+
)
|
| 318 |
+
else:
|
| 319 |
+
# For ray-based prediction
|
| 320 |
+
ray_depth = pred_result[view_key]["depth_along_ray"][0].cpu()
|
| 321 |
+
local_pts3d = (
|
| 322 |
+
pred_result[view_key]["ray_directions"][0].cpu() * ray_depth
|
| 323 |
+
)
|
| 324 |
+
depth_z = local_pts3d[..., 2].numpy()
|
| 325 |
+
|
| 326 |
+
# Compute normals and edge mask
|
| 327 |
+
normals, normals_mask = points_to_normals(
|
| 328 |
+
pred_pts3d, mask=pred_mask
|
| 329 |
+
)
|
| 330 |
+
edge_mask = ~(
|
| 331 |
+
depth_edge(depth_z, rtol=depth_edge_rtol, mask=pred_mask)
|
| 332 |
+
& normals_edge(normals, tol=pts_edge_tol, mask=normals_mask)
|
| 333 |
+
)
|
| 334 |
+
if pred_mask is not None:
|
| 335 |
+
pred_mask = pred_mask & edge_mask
|
| 336 |
+
|
| 337 |
+
# Determine final mask to use (like in visualize_raw_inference_output)
|
| 338 |
+
final_mask = None
|
| 339 |
+
valid_mask = np.ones_like(
|
| 340 |
+
pred_pts3d[..., 0], dtype=bool
|
| 341 |
+
) # Create dummy valid_mask for app.py context
|
| 342 |
+
|
| 343 |
+
if use_gt_mask_on_pred:
|
| 344 |
+
final_mask = valid_mask
|
| 345 |
+
if use_pred_mask and pred_mask is not None:
|
| 346 |
+
final_mask = final_mask & pred_mask
|
| 347 |
+
elif use_pred_mask and pred_mask is not None:
|
| 348 |
+
final_mask = pred_mask
|
| 349 |
+
else:
|
| 350 |
+
final_mask = np.ones_like(valid_mask, dtype=bool)
|
| 351 |
+
|
| 352 |
+
# Check if we have camera pose and intrinsics data
|
| 353 |
+
if "cam_quats" in pred_result[view_key]:
|
| 354 |
+
# Get decoupled quantities (like in visualize_raw_custom_data_inference_output)
|
| 355 |
+
cam_quats = pred_result[view_key]["cam_quats"][0].cpu()
|
| 356 |
+
cam_trans = pred_result[view_key]["cam_trans"][0].cpu()
|
| 357 |
+
ray_directions = pred_result[view_key]["ray_directions"][0].cpu()
|
| 358 |
+
ray_depth = pred_result[view_key]["depth_along_ray"][0].cpu()
|
| 359 |
+
|
| 360 |
+
# Convert the quantities
|
| 361 |
+
from mapanything.utils.geometry import (
|
| 362 |
+
quaternion_to_rotation_matrix,
|
| 363 |
+
recover_pinhole_intrinsics_from_ray_directions,
|
| 364 |
+
)
|
| 365 |
+
|
| 366 |
+
cam_rot = quaternion_to_rotation_matrix(cam_quats)
|
| 367 |
+
cam_pose = torch.eye(4)
|
| 368 |
+
cam_pose[:3, :3] = cam_rot
|
| 369 |
+
cam_pose[:3, 3] = cam_trans
|
| 370 |
+
cam_pose = np.linalg.inv(cam_pose)
|
| 371 |
+
cam_intrinsics = recover_pinhole_intrinsics_from_ray_directions(
|
| 372 |
+
ray_directions, use_geometric_calculation=True
|
| 373 |
+
)
|
| 374 |
+
|
| 375 |
+
# Compute depth as in app_map.py
|
| 376 |
+
local_pts3d = ray_directions * ray_depth
|
| 377 |
+
depth_z = local_pts3d[..., 2]
|
| 378 |
+
|
| 379 |
+
# Convert to numpy and extract 3x4 extrinsic (remove bottom row)
|
| 380 |
+
extrinsic = cam_pose[:3, :4].numpy() # Shape: (3, 4)
|
| 381 |
+
intrinsic = cam_intrinsics.numpy() # Shape: (3, 3)
|
| 382 |
+
depth_z = depth_z.numpy() # Shape: (H, W)
|
| 383 |
+
else:
|
| 384 |
+
# Use dummy values if camera info not available
|
| 385 |
+
# extrinsic: (3, 4) - [R|t] matrix
|
| 386 |
+
extrinsic = np.eye(3, 4) # Identity rotation, zero translation
|
| 387 |
+
# intrinsic: (3, 3) - camera intrinsic matrix
|
| 388 |
+
intrinsic = np.eye(3)
|
| 389 |
+
# depth_z: (H, W) - dummy depth values
|
| 390 |
+
depth_z = np.zeros_like(pred_pts3d[..., 0])
|
| 391 |
+
|
| 392 |
+
# Append to lists
|
| 393 |
+
extrinsic_list.append(extrinsic)
|
| 394 |
+
intrinsic_list.append(intrinsic)
|
| 395 |
+
world_points_list.append(pred_pts3d)
|
| 396 |
+
depth_maps_list.append(depth_z)
|
| 397 |
+
images_list.append(image[0]) # Add image to list
|
| 398 |
+
final_mask_list.append(final_mask) # Add final_mask to list
|
| 399 |
+
|
| 400 |
+
# Add confidence data (or None if not available)
|
| 401 |
+
if confidence_map is not None:
|
| 402 |
+
confidence_list.append(confidence_map)
|
| 403 |
+
elif has_confidence:
|
| 404 |
+
# If some views have confidence but this one doesn't, add dummy confidence
|
| 405 |
+
confidence_list.append(np.ones_like(depth_z))
|
| 406 |
+
|
| 407 |
+
# Convert lists to numpy arrays with required shapes
|
| 408 |
+
# extrinsic: (S, 3, 4) - batch of camera extrinsic matrices
|
| 409 |
+
predictions["extrinsic"] = np.stack(extrinsic_list, axis=0)
|
| 410 |
+
|
| 411 |
+
# intrinsic: (S, 3, 3) - batch of camera intrinsic matrices
|
| 412 |
+
predictions["intrinsic"] = np.stack(intrinsic_list, axis=0)
|
| 413 |
+
|
| 414 |
+
# world_points: (S, H, W, 3) - batch of 3D world points
|
| 415 |
+
predictions["world_points"] = np.stack(world_points_list, axis=0)
|
| 416 |
+
|
| 417 |
+
# depth: (S, H, W, 1) or (S, H, W) - batch of depth maps
|
| 418 |
+
depth_maps = np.stack(depth_maps_list, axis=0)
|
| 419 |
+
# Add channel dimension if needed to match (S, H, W, 1) format
|
| 420 |
+
if len(depth_maps.shape) == 3:
|
| 421 |
+
depth_maps = depth_maps[..., np.newaxis]
|
| 422 |
+
predictions["depth"] = depth_maps
|
| 423 |
+
|
| 424 |
+
# images: (S, H, W, 3) - batch of input images
|
| 425 |
+
predictions["images"] = np.stack(images_list, axis=0)
|
| 426 |
+
|
| 427 |
+
# confidence: (S, H, W) - batch of confidence maps (only if available)
|
| 428 |
+
if confidence_list:
|
| 429 |
+
predictions["confidence"] = np.stack(confidence_list, axis=0)
|
| 430 |
+
|
| 431 |
+
# final_mask: (S, H, W) - batch of final masks for filtering
|
| 432 |
+
predictions["final_mask"] = np.stack(final_mask_list, axis=0)
|
| 433 |
+
|
| 434 |
+
world_points = unproject_depth_map_to_point_map(
|
| 435 |
+
depth_maps, predictions["extrinsic"], predictions["intrinsic"]
|
| 436 |
+
)
|
| 437 |
+
predictions["world_points_from_depth"] = world_points
|
| 438 |
+
|
| 439 |
+
# Process data for visualization tabs (depth, normal, measure)
|
| 440 |
+
processed_data = process_predictions_for_visualization(
|
| 441 |
+
pred_result, views, high_level_config
|
| 442 |
+
)
|
| 443 |
+
|
| 444 |
+
# Clean up
|
| 445 |
+
torch.cuda.empty_cache()
|
| 446 |
+
|
| 447 |
+
return predictions, processed_data
|
| 448 |
+
|
| 449 |
+
|
| 450 |
+
def update_view_selectors(processed_data):
|
| 451 |
+
"""Update view selector dropdowns based on available views"""
|
| 452 |
+
if processed_data is None or len(processed_data) == 0:
|
| 453 |
+
choices = ["View 1"]
|
| 454 |
+
else:
|
| 455 |
+
num_views = len(processed_data)
|
| 456 |
+
choices = [f"View {i + 1}" for i in range(num_views)]
|
| 457 |
+
|
| 458 |
+
return (
|
| 459 |
+
gr.Dropdown(choices=choices, value=choices[0]), # depth_view_selector
|
| 460 |
+
gr.Dropdown(choices=choices, value=choices[0]), # normal_view_selector
|
| 461 |
+
gr.Dropdown(choices=choices, value=choices[0]), # measure_view_selector
|
| 462 |
+
)
|
| 463 |
+
|
| 464 |
+
|
| 465 |
+
def get_view_data_by_index(processed_data, view_index):
|
| 466 |
+
"""Get view data by index, handling bounds"""
|
| 467 |
+
if processed_data is None or len(processed_data) == 0:
|
| 468 |
+
return None
|
| 469 |
+
|
| 470 |
+
view_keys = list(processed_data.keys())
|
| 471 |
+
if view_index < 0 or view_index >= len(view_keys):
|
| 472 |
+
view_index = 0
|
| 473 |
+
|
| 474 |
+
return processed_data[view_keys[view_index]]
|
| 475 |
+
|
| 476 |
+
|
| 477 |
+
def update_depth_view(processed_data, view_index, conf_thres=None):
|
| 478 |
+
"""Update depth view for a specific view index with optional confidence filtering"""
|
| 479 |
+
view_data = get_view_data_by_index(processed_data, view_index)
|
| 480 |
+
if view_data is None or view_data["depth"] is None:
|
| 481 |
+
return None
|
| 482 |
+
|
| 483 |
+
# Use confidence filtering if available
|
| 484 |
+
confidence = view_data.get("confidence")
|
| 485 |
+
return colorize_depth(
|
| 486 |
+
view_data["depth"], confidence=confidence, conf_thres=conf_thres
|
| 487 |
+
)
|
| 488 |
+
|
| 489 |
+
|
| 490 |
+
def update_normal_view(processed_data, view_index, conf_thres=None):
|
| 491 |
+
"""Update normal view for a specific view index with optional confidence filtering"""
|
| 492 |
+
view_data = get_view_data_by_index(processed_data, view_index)
|
| 493 |
+
if view_data is None or view_data["normal"] is None:
|
| 494 |
+
return None
|
| 495 |
+
|
| 496 |
+
# Use confidence filtering if available
|
| 497 |
+
confidence = view_data.get("confidence")
|
| 498 |
+
return colorize_normal(
|
| 499 |
+
view_data["normal"], confidence=confidence, conf_thres=conf_thres
|
| 500 |
+
)
|
| 501 |
+
|
| 502 |
+
|
| 503 |
+
def update_measure_view(processed_data, view_index):
|
| 504 |
+
"""Update measure view for a specific view index"""
|
| 505 |
+
view_data = get_view_data_by_index(processed_data, view_index)
|
| 506 |
+
if view_data is None:
|
| 507 |
+
return None, [] # image, measure_points
|
| 508 |
+
return view_data["image"], []
|
| 509 |
+
|
| 510 |
+
|
| 511 |
+
def navigate_depth_view(
|
| 512 |
+
processed_data, current_selector_value, direction, conf_thres=None
|
| 513 |
+
):
|
| 514 |
+
"""Navigate depth view (direction: -1 for previous, +1 for next)"""
|
| 515 |
+
if processed_data is None or len(processed_data) == 0:
|
| 516 |
+
return "View 1", None
|
| 517 |
+
|
| 518 |
+
# Parse current view number
|
| 519 |
+
try:
|
| 520 |
+
current_view = int(current_selector_value.split()[1]) - 1
|
| 521 |
+
except:
|
| 522 |
+
current_view = 0
|
| 523 |
+
|
| 524 |
+
num_views = len(processed_data)
|
| 525 |
+
new_view = (current_view + direction) % num_views
|
| 526 |
+
|
| 527 |
+
new_selector_value = f"View {new_view + 1}"
|
| 528 |
+
depth_vis = update_depth_view(processed_data, new_view, conf_thres=conf_thres)
|
| 529 |
+
|
| 530 |
+
return new_selector_value, depth_vis
|
| 531 |
+
|
| 532 |
+
|
| 533 |
+
def navigate_normal_view(
|
| 534 |
+
processed_data, current_selector_value, direction, conf_thres=None
|
| 535 |
+
):
|
| 536 |
+
"""Navigate normal view (direction: -1 for previous, +1 for next)"""
|
| 537 |
+
if processed_data is None or len(processed_data) == 0:
|
| 538 |
+
return "View 1", None
|
| 539 |
+
|
| 540 |
+
# Parse current view number
|
| 541 |
+
try:
|
| 542 |
+
current_view = int(current_selector_value.split()[1]) - 1
|
| 543 |
+
except:
|
| 544 |
+
current_view = 0
|
| 545 |
+
|
| 546 |
+
num_views = len(processed_data)
|
| 547 |
+
new_view = (current_view + direction) % num_views
|
| 548 |
+
|
| 549 |
+
new_selector_value = f"View {new_view + 1}"
|
| 550 |
+
normal_vis = update_normal_view(processed_data, new_view, conf_thres=conf_thres)
|
| 551 |
+
|
| 552 |
+
return new_selector_value, normal_vis
|
| 553 |
+
|
| 554 |
+
|
| 555 |
+
def navigate_measure_view(processed_data, current_selector_value, direction):
|
| 556 |
+
"""Navigate measure view (direction: -1 for previous, +1 for next)"""
|
| 557 |
+
if processed_data is None or len(processed_data) == 0:
|
| 558 |
+
return "View 1", None, []
|
| 559 |
+
|
| 560 |
+
# Parse current view number
|
| 561 |
+
try:
|
| 562 |
+
current_view = int(current_selector_value.split()[1]) - 1
|
| 563 |
+
except:
|
| 564 |
+
current_view = 0
|
| 565 |
+
|
| 566 |
+
num_views = len(processed_data)
|
| 567 |
+
new_view = (current_view + direction) % num_views
|
| 568 |
+
|
| 569 |
+
new_selector_value = f"View {new_view + 1}"
|
| 570 |
+
measure_image, measure_points = update_measure_view(processed_data, new_view)
|
| 571 |
+
|
| 572 |
+
return new_selector_value, measure_image, measure_points
|
| 573 |
+
|
| 574 |
+
|
| 575 |
+
def populate_visualization_tabs(processed_data, conf_thres=None):
|
| 576 |
+
"""Populate the depth, normal, and measure tabs with processed data"""
|
| 577 |
+
if processed_data is None or len(processed_data) == 0:
|
| 578 |
+
return None, None, None, []
|
| 579 |
+
|
| 580 |
+
# Use update functions to ensure confidence filtering is applied from the start
|
| 581 |
+
depth_vis = update_depth_view(processed_data, 0, conf_thres=conf_thres)
|
| 582 |
+
normal_vis = update_normal_view(processed_data, 0, conf_thres=conf_thres)
|
| 583 |
+
measure_img, _ = update_measure_view(processed_data, 0)
|
| 584 |
+
|
| 585 |
+
return depth_vis, normal_vis, measure_img, []
|
| 586 |
+
|
| 587 |
+
|
| 588 |
+
# -------------------------------------------------------------------------
|
| 589 |
+
# 2) Handle uploaded video/images --> produce target_dir + images
|
| 590 |
+
# -------------------------------------------------------------------------
|
| 591 |
+
def handle_uploads(input_video, input_images, s_time_interval=1.0):
|
| 592 |
+
"""
|
| 593 |
+
Create a new 'target_dir' + 'images' subfolder, and place user-uploaded
|
| 594 |
+
images or extracted frames from video into it. Return (target_dir, image_paths).
|
| 595 |
+
"""
|
| 596 |
+
start_time = time.time()
|
| 597 |
+
gc.collect()
|
| 598 |
+
torch.cuda.empty_cache()
|
| 599 |
+
|
| 600 |
+
# Create a unique folder name
|
| 601 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S_%f")
|
| 602 |
+
target_dir = f"input_images_{timestamp}"
|
| 603 |
+
target_dir_images = os.path.join(target_dir, "images")
|
| 604 |
+
|
| 605 |
+
# Clean up if somehow that folder already exists
|
| 606 |
+
if os.path.exists(target_dir):
|
| 607 |
+
shutil.rmtree(target_dir)
|
| 608 |
+
os.makedirs(target_dir)
|
| 609 |
+
os.makedirs(target_dir_images)
|
| 610 |
+
|
| 611 |
+
image_paths = []
|
| 612 |
+
|
| 613 |
+
# --- Handle images ---
|
| 614 |
+
if input_images is not None:
|
| 615 |
+
for file_data in input_images:
|
| 616 |
+
if isinstance(file_data, dict) and "name" in file_data:
|
| 617 |
+
file_path = file_data["name"]
|
| 618 |
+
else:
|
| 619 |
+
file_path = file_data
|
| 620 |
+
dst_path = os.path.join(target_dir_images, os.path.basename(file_path))
|
| 621 |
+
shutil.copy(file_path, dst_path)
|
| 622 |
+
image_paths.append(dst_path)
|
| 623 |
+
|
| 624 |
+
# --- Handle video ---
|
| 625 |
+
if input_video is not None:
|
| 626 |
+
if isinstance(input_video, dict) and "name" in input_video:
|
| 627 |
+
video_path = input_video["name"]
|
| 628 |
+
else:
|
| 629 |
+
video_path = input_video
|
| 630 |
+
|
| 631 |
+
vs = cv2.VideoCapture(video_path)
|
| 632 |
+
fps = vs.get(cv2.CAP_PROP_FPS)
|
| 633 |
+
frame_interval = int(fps * s_time_interval) # 1 frame/sec
|
| 634 |
+
|
| 635 |
+
count = 0
|
| 636 |
+
video_frame_num = 0
|
| 637 |
+
while True:
|
| 638 |
+
gotit, frame = vs.read()
|
| 639 |
+
if not gotit:
|
| 640 |
+
break
|
| 641 |
+
count += 1
|
| 642 |
+
if count % frame_interval == 0:
|
| 643 |
+
image_path = os.path.join(
|
| 644 |
+
target_dir_images, f"{video_frame_num:06}.png"
|
| 645 |
+
)
|
| 646 |
+
cv2.imwrite(image_path, frame)
|
| 647 |
+
image_paths.append(image_path)
|
| 648 |
+
video_frame_num += 1
|
| 649 |
+
|
| 650 |
+
# Sort final images for gallery
|
| 651 |
+
image_paths = sorted(image_paths)
|
| 652 |
+
|
| 653 |
+
end_time = time.time()
|
| 654 |
+
print(
|
| 655 |
+
f"Files copied to {target_dir_images}; took {end_time - start_time:.3f} seconds"
|
| 656 |
+
)
|
| 657 |
+
return target_dir, image_paths
|
| 658 |
+
|
| 659 |
+
|
| 660 |
+
# -------------------------------------------------------------------------
|
| 661 |
+
# 3) Update gallery on upload
|
| 662 |
+
# -------------------------------------------------------------------------
|
| 663 |
+
def update_gallery_on_upload(input_video, input_images, s_time_interval=1.0):
|
| 664 |
+
"""
|
| 665 |
+
Whenever user uploads or changes files, immediately handle them
|
| 666 |
+
and show in the gallery. Return (target_dir, image_paths).
|
| 667 |
+
If nothing is uploaded, returns "None" and empty list.
|
| 668 |
+
"""
|
| 669 |
+
if not input_video and not input_images:
|
| 670 |
+
return None, None, None, None
|
| 671 |
+
target_dir, image_paths = handle_uploads(input_video, input_images, s_time_interval)
|
| 672 |
+
return (
|
| 673 |
+
None,
|
| 674 |
+
target_dir,
|
| 675 |
+
image_paths,
|
| 676 |
+
"Upload complete. Click 'Reconstruct' to begin 3D processing.",
|
| 677 |
+
)
|
| 678 |
+
|
| 679 |
+
|
| 680 |
+
# -------------------------------------------------------------------------
|
| 681 |
+
# 4) Reconstruction: uses the target_dir plus any viz parameters
|
| 682 |
+
# -------------------------------------------------------------------------
|
| 683 |
+
@spaces.GPU(duration=120)
|
| 684 |
+
def gradio_demo(
|
| 685 |
+
target_dir,
|
| 686 |
+
conf_thres=3.0,
|
| 687 |
+
frame_filter="All",
|
| 688 |
+
show_cam=True,
|
| 689 |
+
filter_sky=False,
|
| 690 |
+
filter_black_bg=False,
|
| 691 |
+
filter_white_bg=False,
|
| 692 |
+
mask_ambiguous=False,
|
| 693 |
+
):
|
| 694 |
+
"""
|
| 695 |
+
Perform reconstruction using the already-created target_dir/images.
|
| 696 |
+
"""
|
| 697 |
+
if not os.path.isdir(target_dir) or target_dir == "None":
|
| 698 |
+
return None, "No valid target directory found. Please upload first.", None, None
|
| 699 |
+
|
| 700 |
+
start_time = time.time()
|
| 701 |
+
gc.collect()
|
| 702 |
+
torch.cuda.empty_cache()
|
| 703 |
+
|
| 704 |
+
# Always use Pointmap Branch for MapAnything
|
| 705 |
+
prediction_mode = "Pointmap Branch"
|
| 706 |
+
|
| 707 |
+
# Prepare frame_filter dropdown
|
| 708 |
+
target_dir_images = os.path.join(target_dir, "images")
|
| 709 |
+
all_files = (
|
| 710 |
+
sorted(os.listdir(target_dir_images))
|
| 711 |
+
if os.path.isdir(target_dir_images)
|
| 712 |
+
else []
|
| 713 |
+
)
|
| 714 |
+
all_files = [f"{i}: {filename}" for i, filename in enumerate(all_files)]
|
| 715 |
+
frame_filter_choices = ["All"] + all_files
|
| 716 |
+
|
| 717 |
+
print("Running MapAnything model...")
|
| 718 |
+
with torch.no_grad():
|
| 719 |
+
predictions, processed_data = run_model(target_dir, None)
|
| 720 |
+
|
| 721 |
+
# Save predictions
|
| 722 |
+
prediction_save_path = os.path.join(target_dir, "predictions.npz")
|
| 723 |
+
np.savez(prediction_save_path, **predictions)
|
| 724 |
+
|
| 725 |
+
# Handle None frame_filter
|
| 726 |
+
if frame_filter is None:
|
| 727 |
+
frame_filter = "All"
|
| 728 |
+
|
| 729 |
+
# Build a GLB file name
|
| 730 |
+
glbfile = os.path.join(
|
| 731 |
+
target_dir,
|
| 732 |
+
f"glbscene_{conf_thres}_{frame_filter.replace('.', '_').replace(':', '').replace(' ', '_')}_cam{show_cam}_sky{filter_sky}_black{filter_black_bg}_white{filter_white_bg}_mask{mask_ambiguous}_pred{prediction_mode.replace(' ', '_')}.glb",
|
| 733 |
+
)
|
| 734 |
+
|
| 735 |
+
# Convert predictions to GLB
|
| 736 |
+
glbscene = predictions_to_glb(
|
| 737 |
+
predictions,
|
| 738 |
+
conf_thres=conf_thres,
|
| 739 |
+
filter_by_frames=frame_filter,
|
| 740 |
+
show_cam=show_cam,
|
| 741 |
+
target_dir=target_dir,
|
| 742 |
+
prediction_mode=prediction_mode,
|
| 743 |
+
mask_sky=filter_sky,
|
| 744 |
+
mask_black_bg=filter_black_bg,
|
| 745 |
+
mask_white_bg=filter_white_bg,
|
| 746 |
+
mask_ambiguous=mask_ambiguous,
|
| 747 |
+
)
|
| 748 |
+
glbscene.export(file_obj=glbfile)
|
| 749 |
+
|
| 750 |
+
# Cleanup
|
| 751 |
+
del predictions
|
| 752 |
+
gc.collect()
|
| 753 |
+
torch.cuda.empty_cache()
|
| 754 |
+
|
| 755 |
+
end_time = time.time()
|
| 756 |
+
print(f"Total time: {end_time - start_time:.2f} seconds")
|
| 757 |
+
log_msg = (
|
| 758 |
+
f"Reconstruction Success ({len(all_files)} frames). Waiting for visualization."
|
| 759 |
+
)
|
| 760 |
+
|
| 761 |
+
# Populate visualization tabs with processed data
|
| 762 |
+
depth_vis, normal_vis, measure_img, measure_pts = populate_visualization_tabs(
|
| 763 |
+
processed_data, conf_thres=conf_thres
|
| 764 |
+
)
|
| 765 |
+
|
| 766 |
+
# Update view selectors based on available views
|
| 767 |
+
depth_selector, normal_selector, measure_selector = update_view_selectors(
|
| 768 |
+
processed_data
|
| 769 |
+
)
|
| 770 |
+
|
| 771 |
+
return (
|
| 772 |
+
glbfile,
|
| 773 |
+
log_msg,
|
| 774 |
+
gr.Dropdown(choices=frame_filter_choices, value=frame_filter, interactive=True),
|
| 775 |
+
processed_data,
|
| 776 |
+
depth_vis,
|
| 777 |
+
normal_vis,
|
| 778 |
+
measure_img,
|
| 779 |
+
"", # measure_text (empty initially)
|
| 780 |
+
depth_selector,
|
| 781 |
+
normal_selector,
|
| 782 |
+
measure_selector,
|
| 783 |
+
)
|
| 784 |
+
|
| 785 |
+
|
| 786 |
+
# -------------------------------------------------------------------------
|
| 787 |
+
# 5) Helper functions for UI resets + re-visualization
|
| 788 |
+
# -------------------------------------------------------------------------
|
| 789 |
+
def apply_confidence_filtering(data, confidence, conf_thres):
|
| 790 |
+
"""Apply confidence filtering to data arrays"""
|
| 791 |
+
if confidence is None or data is None:
|
| 792 |
+
return data
|
| 793 |
+
|
| 794 |
+
# Convert confidence threshold from percentage to confidence value
|
| 795 |
+
conf_threshold = np.percentile(confidence, conf_thres)
|
| 796 |
+
conf_mask = (confidence >= conf_threshold) & (confidence > 1e-5)
|
| 797 |
+
|
| 798 |
+
# conf_mask = confidence >= (conf_thres)
|
| 799 |
+
|
| 800 |
+
# Apply mask to data
|
| 801 |
+
if len(data.shape) == 3: # 3D data (H, W, C)
|
| 802 |
+
filtered_data = data.copy()
|
| 803 |
+
for c in range(data.shape[2]):
|
| 804 |
+
filtered_data[:, :, c] = np.where(conf_mask, data[:, :, c], 0)
|
| 805 |
+
elif len(data.shape) == 2: # 2D data (H, W)
|
| 806 |
+
filtered_data = np.where(conf_mask, data, 0)
|
| 807 |
+
else:
|
| 808 |
+
filtered_data = data
|
| 809 |
+
|
| 810 |
+
return filtered_data
|
| 811 |
+
|
| 812 |
+
|
| 813 |
+
def colorize_depth(depth_map, confidence=None, conf_thres=None):
|
| 814 |
+
"""Convert depth map to colorized visualization with optional confidence filtering"""
|
| 815 |
+
if depth_map is None:
|
| 816 |
+
return None
|
| 817 |
+
|
| 818 |
+
# Apply confidence filtering if available
|
| 819 |
+
if confidence is not None and conf_thres is not None:
|
| 820 |
+
depth_map = apply_confidence_filtering(depth_map, confidence, conf_thres)
|
| 821 |
+
|
| 822 |
+
# Normalize depth to 0-1 range
|
| 823 |
+
depth_normalized = depth_map.copy()
|
| 824 |
+
valid_mask = depth_normalized > 0
|
| 825 |
+
if valid_mask.sum() > 0:
|
| 826 |
+
valid_depths = depth_normalized[valid_mask]
|
| 827 |
+
p5 = np.percentile(valid_depths, 5)
|
| 828 |
+
p95 = np.percentile(valid_depths, 95)
|
| 829 |
+
|
| 830 |
+
depth_normalized[valid_mask] = (depth_normalized[valid_mask] - p5) / (p95 - p5)
|
| 831 |
+
|
| 832 |
+
# Apply colormap
|
| 833 |
+
import matplotlib.pyplot as plt
|
| 834 |
+
|
| 835 |
+
colormap = plt.cm.turbo_r
|
| 836 |
+
# colormap = plt.cm.plasma
|
| 837 |
+
# colormap = plt.cm.viridis
|
| 838 |
+
colored = colormap(depth_normalized)
|
| 839 |
+
colored = (colored[:, :, :3] * 255).astype(np.uint8)
|
| 840 |
+
|
| 841 |
+
# Set invalid pixels to white
|
| 842 |
+
colored[~valid_mask] = [255, 255, 255]
|
| 843 |
+
|
| 844 |
+
return colored
|
| 845 |
+
|
| 846 |
+
|
| 847 |
+
def colorize_normal(normal_map, confidence=None, conf_thres=None):
|
| 848 |
+
"""Convert normal map to colorized visualization with optional confidence filtering"""
|
| 849 |
+
if normal_map is None:
|
| 850 |
+
return None
|
| 851 |
+
|
| 852 |
+
# Apply confidence filtering if available
|
| 853 |
+
if confidence is not None and conf_thres is not None:
|
| 854 |
+
normal_map = apply_confidence_filtering(normal_map, confidence, conf_thres)
|
| 855 |
+
|
| 856 |
+
# Normalize normals to [0, 1] range for visualization
|
| 857 |
+
normal_vis = (normal_map + 1.0) / 2.0
|
| 858 |
+
normal_vis = (normal_vis * 255).astype(np.uint8)
|
| 859 |
+
|
| 860 |
+
return normal_vis
|
| 861 |
+
|
| 862 |
+
|
| 863 |
+
def process_predictions_for_visualization(pred_result, views, high_level_config):
|
| 864 |
+
"""Extract depth, normal, and 3D points from predictions for visualization"""
|
| 865 |
+
processed_data = {}
|
| 866 |
+
|
| 867 |
+
# Check if confidence data is available in any view
|
| 868 |
+
has_confidence_data = False
|
| 869 |
+
for view_idx, view in enumerate(views):
|
| 870 |
+
view_key = f"pred{view_idx + 1}"
|
| 871 |
+
if view_key in pred_result and "conf" in pred_result[view_key]:
|
| 872 |
+
has_confidence_data = True
|
| 873 |
+
break
|
| 874 |
+
|
| 875 |
+
# Process each view
|
| 876 |
+
for view_idx, view in enumerate(views):
|
| 877 |
+
view_key = f"pred{view_idx + 1}"
|
| 878 |
+
if view_key not in pred_result:
|
| 879 |
+
continue
|
| 880 |
+
|
| 881 |
+
# Get image
|
| 882 |
+
image = rgb(view["img"], norm_type=high_level_config["data_norm_type"])
|
| 883 |
+
|
| 884 |
+
# Get predicted points
|
| 885 |
+
pred_pts3d = pred_result[view_key]["pts3d"][0].cpu().numpy()
|
| 886 |
+
|
| 887 |
+
# Initialize data for this view
|
| 888 |
+
view_data = {
|
| 889 |
+
"image": image[0],
|
| 890 |
+
"points3d": pred_pts3d,
|
| 891 |
+
"depth": None,
|
| 892 |
+
"normal": None,
|
| 893 |
+
"mask": None,
|
| 894 |
+
"confidence": None,
|
| 895 |
+
"has_confidence": has_confidence_data,
|
| 896 |
+
}
|
| 897 |
+
|
| 898 |
+
# Get confidence data if available
|
| 899 |
+
if "conf" in pred_result[view_key]:
|
| 900 |
+
confidence = pred_result[view_key]["conf"][0].cpu().numpy()
|
| 901 |
+
view_data["confidence"] = confidence
|
| 902 |
+
|
| 903 |
+
# Get masks if available
|
| 904 |
+
has_non_ambiguous_mask = "non_ambiguous_mask" in pred_result[view_key]
|
| 905 |
+
if has_non_ambiguous_mask:
|
| 906 |
+
view_data["mask"] = (
|
| 907 |
+
pred_result[view_key]["non_ambiguous_mask"][0].cpu().numpy()
|
| 908 |
+
)
|
| 909 |
+
|
| 910 |
+
# Extract depth and camera info if available
|
| 911 |
+
if "cam_quats" in pred_result[view_key]:
|
| 912 |
+
ray_directions = pred_result[view_key]["ray_directions"][0].cpu()
|
| 913 |
+
ray_depth = pred_result[view_key]["depth_along_ray"][0].cpu()
|
| 914 |
+
|
| 915 |
+
# Compute depth
|
| 916 |
+
local_pts3d = ray_directions * ray_depth
|
| 917 |
+
depth_z = local_pts3d[..., 2].numpy()
|
| 918 |
+
view_data["depth"] = depth_z
|
| 919 |
+
|
| 920 |
+
# Compute normals if we have valid points
|
| 921 |
+
if has_non_ambiguous_mask:
|
| 922 |
+
try:
|
| 923 |
+
normals, _ = points_to_normals(pred_pts3d, mask=view_data["mask"])
|
| 924 |
+
view_data["normal"] = normals
|
| 925 |
+
except:
|
| 926 |
+
# If normal computation fails, skip it
|
| 927 |
+
pass
|
| 928 |
+
|
| 929 |
+
processed_data[view_idx] = view_data
|
| 930 |
+
|
| 931 |
+
return processed_data
|
| 932 |
+
|
| 933 |
+
|
| 934 |
+
def reset_measure(processed_data):
|
| 935 |
+
"""Reset measure points"""
|
| 936 |
+
if processed_data is None or len(processed_data) == 0:
|
| 937 |
+
return None, [], ""
|
| 938 |
+
|
| 939 |
+
# Return the first view image
|
| 940 |
+
first_view = list(processed_data.values())[0]
|
| 941 |
+
return first_view["image"], [], ""
|
| 942 |
+
|
| 943 |
+
|
| 944 |
+
def measure(
|
| 945 |
+
processed_data, measure_points, current_view_selector, event: gr.SelectData
|
| 946 |
+
):
|
| 947 |
+
"""Handle measurement on images"""
|
| 948 |
+
try:
|
| 949 |
+
print(f"Measure function called with selector: {current_view_selector}")
|
| 950 |
+
|
| 951 |
+
if processed_data is None or len(processed_data) == 0:
|
| 952 |
+
return None, [], "No data available"
|
| 953 |
+
|
| 954 |
+
# Use the currently selected view instead of always using the first view
|
| 955 |
+
try:
|
| 956 |
+
current_view_index = int(current_view_selector.split()[1]) - 1
|
| 957 |
+
except:
|
| 958 |
+
current_view_index = 0
|
| 959 |
+
|
| 960 |
+
print(f"Using view index: {current_view_index}")
|
| 961 |
+
|
| 962 |
+
# Get view data safely
|
| 963 |
+
if current_view_index < 0 or current_view_index >= len(processed_data):
|
| 964 |
+
current_view_index = 0
|
| 965 |
+
|
| 966 |
+
view_keys = list(processed_data.keys())
|
| 967 |
+
current_view = processed_data[view_keys[current_view_index]]
|
| 968 |
+
|
| 969 |
+
if current_view is None:
|
| 970 |
+
return None, [], "No view data available"
|
| 971 |
+
|
| 972 |
+
point2d = event.index[0], event.index[1]
|
| 973 |
+
print(f"Clicked point: {point2d}")
|
| 974 |
+
|
| 975 |
+
measure_points.append(point2d)
|
| 976 |
+
|
| 977 |
+
# Get image and ensure it's valid
|
| 978 |
+
image = current_view["image"]
|
| 979 |
+
if image is None:
|
| 980 |
+
return None, [], "No image available"
|
| 981 |
+
|
| 982 |
+
image = image.copy()
|
| 983 |
+
points3d = current_view["points3d"]
|
| 984 |
+
|
| 985 |
+
# Ensure image is in uint8 format for proper cv2 operations
|
| 986 |
+
try:
|
| 987 |
+
if image.dtype != np.uint8:
|
| 988 |
+
if image.max() <= 1.0:
|
| 989 |
+
# Image is in [0, 1] range, convert to [0, 255]
|
| 990 |
+
image = (image * 255).astype(np.uint8)
|
| 991 |
+
else:
|
| 992 |
+
# Image is already in [0, 255] range
|
| 993 |
+
image = image.astype(np.uint8)
|
| 994 |
+
except Exception as e:
|
| 995 |
+
print(f"Image conversion error: {e}")
|
| 996 |
+
return None, [], f"Image conversion error: {e}"
|
| 997 |
+
|
| 998 |
+
# Draw circles for points
|
| 999 |
+
try:
|
| 1000 |
+
for p in measure_points:
|
| 1001 |
+
if 0 <= p[0] < image.shape[1] and 0 <= p[1] < image.shape[0]:
|
| 1002 |
+
image = cv2.circle(
|
| 1003 |
+
image, p, radius=5, color=(255, 0, 0), thickness=2
|
| 1004 |
+
)
|
| 1005 |
+
except Exception as e:
|
| 1006 |
+
print(f"Drawing error: {e}")
|
| 1007 |
+
return None, [], f"Drawing error: {e}"
|
| 1008 |
+
|
| 1009 |
+
depth_text = ""
|
| 1010 |
+
try:
|
| 1011 |
+
for i, p in enumerate(measure_points):
|
| 1012 |
+
if (
|
| 1013 |
+
current_view["depth"] is not None
|
| 1014 |
+
and 0 <= p[1] < current_view["depth"].shape[0]
|
| 1015 |
+
and 0 <= p[0] < current_view["depth"].shape[1]
|
| 1016 |
+
):
|
| 1017 |
+
d = current_view["depth"][p[1], p[0]]
|
| 1018 |
+
depth_text += f"- **P{i + 1} depth: {d:.2f}m.**\n"
|
| 1019 |
+
else:
|
| 1020 |
+
# Use Z coordinate of 3D points if depth not available
|
| 1021 |
+
if (
|
| 1022 |
+
points3d is not None
|
| 1023 |
+
and 0 <= p[1] < points3d.shape[0]
|
| 1024 |
+
and 0 <= p[0] < points3d.shape[1]
|
| 1025 |
+
):
|
| 1026 |
+
z = points3d[p[1], p[0], 2]
|
| 1027 |
+
depth_text += f"- **P{i + 1} Z-coord: {z:.2f}m.**\n"
|
| 1028 |
+
except Exception as e:
|
| 1029 |
+
print(f"Depth text error: {e}")
|
| 1030 |
+
depth_text = f"Error computing depth: {e}\n"
|
| 1031 |
+
|
| 1032 |
+
if len(measure_points) == 2:
|
| 1033 |
+
try:
|
| 1034 |
+
point1, point2 = measure_points
|
| 1035 |
+
# Draw line
|
| 1036 |
+
if (
|
| 1037 |
+
0 <= point1[0] < image.shape[1]
|
| 1038 |
+
and 0 <= point1[1] < image.shape[0]
|
| 1039 |
+
and 0 <= point2[0] < image.shape[1]
|
| 1040 |
+
and 0 <= point2[1] < image.shape[0]
|
| 1041 |
+
):
|
| 1042 |
+
image = cv2.line(
|
| 1043 |
+
image, point1, point2, color=(255, 0, 0), thickness=2
|
| 1044 |
+
)
|
| 1045 |
+
|
| 1046 |
+
# Compute 3D distance
|
| 1047 |
+
distance_text = "- **Distance: Unable to compute**"
|
| 1048 |
+
if (
|
| 1049 |
+
points3d is not None
|
| 1050 |
+
and 0 <= point1[1] < points3d.shape[0]
|
| 1051 |
+
and 0 <= point1[0] < points3d.shape[1]
|
| 1052 |
+
and 0 <= point2[1] < points3d.shape[0]
|
| 1053 |
+
and 0 <= point2[0] < points3d.shape[1]
|
| 1054 |
+
):
|
| 1055 |
+
try:
|
| 1056 |
+
p1_3d = points3d[point1[1], point1[0]]
|
| 1057 |
+
p2_3d = points3d[point2[1], point2[0]]
|
| 1058 |
+
distance = np.linalg.norm(p1_3d - p2_3d)
|
| 1059 |
+
distance_text = f"- **Distance: {distance:.2f}m**"
|
| 1060 |
+
except Exception as e:
|
| 1061 |
+
print(f"Distance computation error: {e}")
|
| 1062 |
+
distance_text = f"- **Distance computation error: {e}**"
|
| 1063 |
+
|
| 1064 |
+
measure_points = []
|
| 1065 |
+
text = depth_text + distance_text
|
| 1066 |
+
print(f"Measurement complete: {text}")
|
| 1067 |
+
return [image, measure_points, text]
|
| 1068 |
+
except Exception as e:
|
| 1069 |
+
print(f"Final measurement error: {e}")
|
| 1070 |
+
return None, [], f"Measurement error: {e}"
|
| 1071 |
+
else:
|
| 1072 |
+
print(f"Single point measurement: {depth_text}")
|
| 1073 |
+
return [image, measure_points, depth_text]
|
| 1074 |
+
|
| 1075 |
+
except Exception as e:
|
| 1076 |
+
print(f"Overall measure function error: {e}")
|
| 1077 |
+
return None, [], f"Measure function error: {e}"
|
| 1078 |
+
|
| 1079 |
+
|
| 1080 |
+
def clear_fields():
|
| 1081 |
+
"""
|
| 1082 |
+
Clears the 3D viewer, the stored target_dir, and empties the gallery.
|
| 1083 |
+
"""
|
| 1084 |
+
return None
|
| 1085 |
+
|
| 1086 |
+
|
| 1087 |
+
def update_log():
|
| 1088 |
+
"""
|
| 1089 |
+
Display a quick log message while waiting.
|
| 1090 |
+
"""
|
| 1091 |
+
return "Loading and Reconstructing..."
|
| 1092 |
+
|
| 1093 |
+
|
| 1094 |
+
def update_visualization(
|
| 1095 |
+
target_dir,
|
| 1096 |
+
conf_thres,
|
| 1097 |
+
frame_filter,
|
| 1098 |
+
show_cam,
|
| 1099 |
+
is_example,
|
| 1100 |
+
filter_sky=False,
|
| 1101 |
+
filter_black_bg=False,
|
| 1102 |
+
filter_white_bg=False,
|
| 1103 |
+
mask_ambiguous=False,
|
| 1104 |
+
):
|
| 1105 |
+
"""
|
| 1106 |
+
Reload saved predictions from npz, create (or reuse) the GLB for new parameters,
|
| 1107 |
+
and return it for the 3D viewer. If is_example == "True", skip.
|
| 1108 |
+
"""
|
| 1109 |
+
|
| 1110 |
+
# If it's an example click, skip as requested
|
| 1111 |
+
if is_example == "True":
|
| 1112 |
+
return (
|
| 1113 |
+
gr.update(),
|
| 1114 |
+
"No reconstruction available. Please click the Reconstruct button first.",
|
| 1115 |
+
)
|
| 1116 |
+
|
| 1117 |
+
if not target_dir or target_dir == "None" or not os.path.isdir(target_dir):
|
| 1118 |
+
return (
|
| 1119 |
+
gr.update(),
|
| 1120 |
+
"No reconstruction available. Please click the Reconstruct button first.",
|
| 1121 |
+
)
|
| 1122 |
+
|
| 1123 |
+
predictions_path = os.path.join(target_dir, "predictions.npz")
|
| 1124 |
+
if not os.path.exists(predictions_path):
|
| 1125 |
+
return (
|
| 1126 |
+
gr.update(),
|
| 1127 |
+
f"No reconstruction available at {predictions_path}. Please run 'Reconstruct' first.",
|
| 1128 |
+
)
|
| 1129 |
+
|
| 1130 |
+
loaded = np.load(predictions_path, allow_pickle=True)
|
| 1131 |
+
predictions = {key: loaded[key] for key in loaded.keys()}
|
| 1132 |
+
|
| 1133 |
+
# Always use Pointmap Branch for MapAnything
|
| 1134 |
+
prediction_mode = "Pointmap Branch"
|
| 1135 |
+
|
| 1136 |
+
glbfile = os.path.join(
|
| 1137 |
+
target_dir,
|
| 1138 |
+
f"glbscene_{conf_thres}_{frame_filter.replace('.', '_').replace(':', '').replace(' ', '_')}_cam{show_cam}_sky{filter_sky}_black{filter_black_bg}_white{filter_white_bg}_pred{prediction_mode.replace(' ', '_')}.glb",
|
| 1139 |
+
)
|
| 1140 |
+
|
| 1141 |
+
if not os.path.exists(glbfile):
|
| 1142 |
+
glbscene = predictions_to_glb(
|
| 1143 |
+
predictions,
|
| 1144 |
+
conf_thres=conf_thres,
|
| 1145 |
+
filter_by_frames=frame_filter,
|
| 1146 |
+
show_cam=show_cam,
|
| 1147 |
+
target_dir=target_dir,
|
| 1148 |
+
prediction_mode=prediction_mode,
|
| 1149 |
+
mask_sky=filter_sky,
|
| 1150 |
+
mask_black_bg=filter_black_bg,
|
| 1151 |
+
mask_white_bg=filter_white_bg,
|
| 1152 |
+
mask_ambiguous=mask_ambiguous,
|
| 1153 |
+
)
|
| 1154 |
+
glbscene.export(file_obj=glbfile)
|
| 1155 |
+
|
| 1156 |
+
return (
|
| 1157 |
+
glbfile,
|
| 1158 |
+
"Visualization updated.",
|
| 1159 |
+
)
|
| 1160 |
+
|
| 1161 |
+
|
| 1162 |
+
# -------------------------------------------------------------------------
|
| 1163 |
+
# Example scene functions
|
| 1164 |
+
# -------------------------------------------------------------------------
|
| 1165 |
+
def get_scene_info(examples_dir):
|
| 1166 |
+
"""Get information about scenes in the examples directory"""
|
| 1167 |
+
import glob
|
| 1168 |
+
|
| 1169 |
+
scenes = []
|
| 1170 |
+
if not os.path.exists(examples_dir):
|
| 1171 |
+
return scenes
|
| 1172 |
+
|
| 1173 |
+
for scene_folder in sorted(os.listdir(examples_dir)):
|
| 1174 |
+
scene_path = os.path.join(examples_dir, scene_folder)
|
| 1175 |
+
if os.path.isdir(scene_path):
|
| 1176 |
+
# Find all image files in the scene folder
|
| 1177 |
+
image_extensions = ["*.jpg", "*.jpeg", "*.png", "*.bmp", "*.tiff", "*.tif"]
|
| 1178 |
+
image_files = []
|
| 1179 |
+
for ext in image_extensions:
|
| 1180 |
+
image_files.extend(glob.glob(os.path.join(scene_path, ext)))
|
| 1181 |
+
image_files.extend(glob.glob(os.path.join(scene_path, ext.upper())))
|
| 1182 |
+
|
| 1183 |
+
if image_files:
|
| 1184 |
+
# Sort images and get the first one for thumbnail
|
| 1185 |
+
image_files = sorted(image_files)
|
| 1186 |
+
first_image = image_files[0]
|
| 1187 |
+
num_images = len(image_files)
|
| 1188 |
+
|
| 1189 |
+
scenes.append(
|
| 1190 |
+
{
|
| 1191 |
+
"name": scene_folder,
|
| 1192 |
+
"path": scene_path,
|
| 1193 |
+
"thumbnail": first_image,
|
| 1194 |
+
"num_images": num_images,
|
| 1195 |
+
"image_files": image_files,
|
| 1196 |
+
}
|
| 1197 |
+
)
|
| 1198 |
+
|
| 1199 |
+
return scenes
|
| 1200 |
+
|
| 1201 |
+
|
| 1202 |
+
def load_example_scene(scene_name, examples_dir="examples"):
|
| 1203 |
+
"""Load a scene from examples directory"""
|
| 1204 |
+
scenes = get_scene_info(examples_dir)
|
| 1205 |
+
|
| 1206 |
+
# Find the selected scene
|
| 1207 |
+
selected_scene = None
|
| 1208 |
+
for scene in scenes:
|
| 1209 |
+
if scene["name"] == scene_name:
|
| 1210 |
+
selected_scene = scene
|
| 1211 |
+
break
|
| 1212 |
+
|
| 1213 |
+
if selected_scene is None:
|
| 1214 |
+
return None, None, None, "Scene not found"
|
| 1215 |
+
|
| 1216 |
+
# Create target directory and copy images
|
| 1217 |
+
target_dir, image_paths = handle_uploads(None, selected_scene["image_files"])
|
| 1218 |
+
|
| 1219 |
+
return (
|
| 1220 |
+
None, # Clear reconstruction output
|
| 1221 |
+
target_dir, # Set target directory
|
| 1222 |
+
image_paths, # Set gallery
|
| 1223 |
+
f"Loaded scene '{scene_name}' with {selected_scene['num_images']} images. Click 'Reconstruct' to begin 3D processing.",
|
| 1224 |
+
)
|
| 1225 |
+
|
| 1226 |
+
|
| 1227 |
+
# -------------------------------------------------------------------------
|
| 1228 |
+
# 6) Build Gradio UI
|
| 1229 |
+
# -------------------------------------------------------------------------
|
| 1230 |
+
theme = get_gradio_theme()
|
| 1231 |
+
|
| 1232 |
+
with gr.Blocks(theme=theme, css=GRADIO_CSS) as demo:
|
| 1233 |
+
# State variables for the tabbed interface
|
| 1234 |
+
is_example = gr.Textbox(label="is_example", visible=False, value="None")
|
| 1235 |
+
num_images = gr.Textbox(label="num_images", visible=False, value="None")
|
| 1236 |
+
processed_data_state = gr.State(value=None)
|
| 1237 |
+
measure_points_state = gr.State(value=[])
|
| 1238 |
+
current_view_index = gr.State(value=0) # Track current view index for navigation
|
| 1239 |
+
|
| 1240 |
+
gr.HTML(get_header_html(get_logo_base64()))
|
| 1241 |
+
gr.HTML(get_description_html())
|
| 1242 |
+
|
| 1243 |
+
target_dir_output = gr.Textbox(label="Target Dir", visible=False, value="None")
|
| 1244 |
+
|
| 1245 |
+
with gr.Row():
|
| 1246 |
+
with gr.Column(scale=2):
|
| 1247 |
+
input_video = gr.Video(label="Upload Video", interactive=True)
|
| 1248 |
+
s_time_interval = gr.Slider(
|
| 1249 |
+
minimum=0.1,
|
| 1250 |
+
maximum=5.0,
|
| 1251 |
+
value=1.0,
|
| 1252 |
+
step=0.1,
|
| 1253 |
+
label="Sample time interval (take a sample every x sec.)",
|
| 1254 |
+
interactive=True,
|
| 1255 |
+
visible=True,
|
| 1256 |
+
)
|
| 1257 |
+
input_images = gr.File(
|
| 1258 |
+
file_count="multiple", label="Upload Images", interactive=True
|
| 1259 |
+
)
|
| 1260 |
+
|
| 1261 |
+
image_gallery = gr.Gallery(
|
| 1262 |
+
label="Preview",
|
| 1263 |
+
columns=4,
|
| 1264 |
+
height="300px",
|
| 1265 |
+
show_download_button=True,
|
| 1266 |
+
object_fit="contain",
|
| 1267 |
+
preview=True,
|
| 1268 |
+
)
|
| 1269 |
+
|
| 1270 |
+
with gr.Column(scale=4):
|
| 1271 |
+
with gr.Column():
|
| 1272 |
+
gr.Markdown("**3D Reconstruction (Point Cloud and Camera Poses)**")
|
| 1273 |
+
log_output = gr.Markdown(
|
| 1274 |
+
"Please upload a video or images, then click Reconstruct.",
|
| 1275 |
+
elem_classes=["custom-log"],
|
| 1276 |
+
)
|
| 1277 |
+
|
| 1278 |
+
# Add tabbed interface similar to MoGe
|
| 1279 |
+
with gr.Tabs():
|
| 1280 |
+
with gr.Tab("3D View"):
|
| 1281 |
+
reconstruction_output = gr.Model3D(
|
| 1282 |
+
height=520,
|
| 1283 |
+
zoom_speed=0.5,
|
| 1284 |
+
pan_speed=0.5,
|
| 1285 |
+
clear_color=[0.0, 0.0, 0.0, 0.0],
|
| 1286 |
+
key="persistent_3d_viewer",
|
| 1287 |
+
elem_id="reconstruction_3d_viewer",
|
| 1288 |
+
)
|
| 1289 |
+
with gr.Tab("Depth"):
|
| 1290 |
+
with gr.Row(elem_classes=["navigation-row"]):
|
| 1291 |
+
prev_depth_btn = gr.Button("◀ Previous", size="sm", scale=1)
|
| 1292 |
+
depth_view_selector = gr.Dropdown(
|
| 1293 |
+
choices=["View 1"],
|
| 1294 |
+
value="View 1",
|
| 1295 |
+
label="Select View",
|
| 1296 |
+
scale=2,
|
| 1297 |
+
interactive=True,
|
| 1298 |
+
allow_custom_value=True,
|
| 1299 |
+
)
|
| 1300 |
+
next_depth_btn = gr.Button("Next ▶", size="sm", scale=1)
|
| 1301 |
+
depth_map = gr.Image(
|
| 1302 |
+
type="numpy",
|
| 1303 |
+
label="Colorized Depth Map",
|
| 1304 |
+
format="png",
|
| 1305 |
+
interactive=False,
|
| 1306 |
+
)
|
| 1307 |
+
with gr.Tab("Normal"):
|
| 1308 |
+
with gr.Row(elem_classes=["navigation-row"]):
|
| 1309 |
+
prev_normal_btn = gr.Button(
|
| 1310 |
+
"◀ Previous", size="sm", scale=1
|
| 1311 |
+
)
|
| 1312 |
+
normal_view_selector = gr.Dropdown(
|
| 1313 |
+
choices=["View 1"],
|
| 1314 |
+
value="View 1",
|
| 1315 |
+
label="Select View",
|
| 1316 |
+
scale=2,
|
| 1317 |
+
interactive=True,
|
| 1318 |
+
allow_custom_value=True,
|
| 1319 |
+
)
|
| 1320 |
+
next_normal_btn = gr.Button("Next ▶", size="sm", scale=1)
|
| 1321 |
+
normal_map = gr.Image(
|
| 1322 |
+
type="numpy",
|
| 1323 |
+
label="Normal Map",
|
| 1324 |
+
format="png",
|
| 1325 |
+
interactive=False,
|
| 1326 |
+
)
|
| 1327 |
+
with gr.Tab("Measure"):
|
| 1328 |
+
gr.Markdown(MEASURE_INSTRUCTIONS_HTML)
|
| 1329 |
+
with gr.Row(elem_classes=["navigation-row"]):
|
| 1330 |
+
prev_measure_btn = gr.Button(
|
| 1331 |
+
"◀ Previous", size="sm", scale=1
|
| 1332 |
+
)
|
| 1333 |
+
measure_view_selector = gr.Dropdown(
|
| 1334 |
+
choices=["View 1"],
|
| 1335 |
+
value="View 1",
|
| 1336 |
+
label="Select View",
|
| 1337 |
+
scale=2,
|
| 1338 |
+
interactive=True,
|
| 1339 |
+
allow_custom_value=True,
|
| 1340 |
+
)
|
| 1341 |
+
next_measure_btn = gr.Button("Next ▶", size="sm", scale=1)
|
| 1342 |
+
measure_image = gr.Image(
|
| 1343 |
+
type="numpy",
|
| 1344 |
+
show_label=False,
|
| 1345 |
+
format="webp",
|
| 1346 |
+
interactive=False,
|
| 1347 |
+
sources=[],
|
| 1348 |
+
)
|
| 1349 |
+
measure_text = gr.Markdown("")
|
| 1350 |
+
|
| 1351 |
+
with gr.Row():
|
| 1352 |
+
submit_btn = gr.Button("Reconstruct", scale=1, variant="primary")
|
| 1353 |
+
clear_btn = gr.ClearButton(
|
| 1354 |
+
[
|
| 1355 |
+
input_video,
|
| 1356 |
+
input_images,
|
| 1357 |
+
reconstruction_output,
|
| 1358 |
+
log_output,
|
| 1359 |
+
target_dir_output,
|
| 1360 |
+
image_gallery,
|
| 1361 |
+
],
|
| 1362 |
+
scale=1,
|
| 1363 |
+
)
|
| 1364 |
+
|
| 1365 |
+
with gr.Row():
|
| 1366 |
+
conf_thres = gr.Slider(
|
| 1367 |
+
minimum=0,
|
| 1368 |
+
maximum=100,
|
| 1369 |
+
value=0,
|
| 1370 |
+
step=0.1,
|
| 1371 |
+
label="Confidence Threshold (%), only shown in depth and normals",
|
| 1372 |
+
)
|
| 1373 |
+
frame_filter = gr.Dropdown(
|
| 1374 |
+
choices=["All"], value="All", label="Show Points from Frame"
|
| 1375 |
+
)
|
| 1376 |
+
with gr.Column():
|
| 1377 |
+
show_cam = gr.Checkbox(label="Show Camera", value=True)
|
| 1378 |
+
filter_sky = gr.Checkbox(
|
| 1379 |
+
label="Filter Sky (using skyseg.onnx)", value=False
|
| 1380 |
+
)
|
| 1381 |
+
filter_black_bg = gr.Checkbox(
|
| 1382 |
+
label="Filter Black Background", value=False
|
| 1383 |
+
)
|
| 1384 |
+
filter_white_bg = gr.Checkbox(
|
| 1385 |
+
label="Filter White Background", value=False
|
| 1386 |
+
)
|
| 1387 |
+
mask_ambiguous = gr.Checkbox(label="Mask Ambiguous", value=True)
|
| 1388 |
+
|
| 1389 |
+
# ---------------------- Example Scenes Section ----------------------
|
| 1390 |
+
gr.Markdown("## Example Scenes")
|
| 1391 |
+
gr.Markdown("Click any thumbnail to load the scene for reconstruction.")
|
| 1392 |
+
|
| 1393 |
+
# Get scene information
|
| 1394 |
+
scenes = get_scene_info("examples")
|
| 1395 |
+
|
| 1396 |
+
# Create thumbnail grid (4 columns, N rows)
|
| 1397 |
+
if scenes:
|
| 1398 |
+
for i in range(0, len(scenes), 4): # Process 4 scenes per row
|
| 1399 |
+
with gr.Row():
|
| 1400 |
+
for j in range(4):
|
| 1401 |
+
scene_idx = i + j
|
| 1402 |
+
if scene_idx < len(scenes):
|
| 1403 |
+
scene = scenes[scene_idx]
|
| 1404 |
+
with gr.Column(scale=1, elem_classes=["clickable-thumbnail"]):
|
| 1405 |
+
# Clickable thumbnail
|
| 1406 |
+
scene_img = gr.Image(
|
| 1407 |
+
value=scene["thumbnail"],
|
| 1408 |
+
height=150,
|
| 1409 |
+
interactive=False,
|
| 1410 |
+
show_label=False,
|
| 1411 |
+
elem_id=f"scene_thumb_{scene['name']}",
|
| 1412 |
+
sources=[],
|
| 1413 |
+
)
|
| 1414 |
+
|
| 1415 |
+
# Scene name and image count as text below thumbnail
|
| 1416 |
+
gr.Markdown(
|
| 1417 |
+
f"**{scene['name']}** \n {scene['num_images']} images",
|
| 1418 |
+
elem_classes=["scene-info"],
|
| 1419 |
+
)
|
| 1420 |
+
|
| 1421 |
+
# Connect thumbnail click to load scene
|
| 1422 |
+
scene_img.select(
|
| 1423 |
+
fn=lambda name=scene["name"]: load_example_scene(name),
|
| 1424 |
+
outputs=[
|
| 1425 |
+
reconstruction_output,
|
| 1426 |
+
target_dir_output,
|
| 1427 |
+
image_gallery,
|
| 1428 |
+
log_output,
|
| 1429 |
+
],
|
| 1430 |
+
)
|
| 1431 |
+
else:
|
| 1432 |
+
# Empty column to maintain grid structure
|
| 1433 |
+
with gr.Column(scale=1):
|
| 1434 |
+
pass
|
| 1435 |
+
|
| 1436 |
+
# -------------------------------------------------------------------------
|
| 1437 |
+
# "Reconstruct" button logic:
|
| 1438 |
+
# - Clear fields
|
| 1439 |
+
# - Update log
|
| 1440 |
+
# - gradio_demo(...) with the existing target_dir
|
| 1441 |
+
# - Then set is_example = "False"
|
| 1442 |
+
# -------------------------------------------------------------------------
|
| 1443 |
+
submit_btn.click(fn=clear_fields, inputs=[], outputs=[reconstruction_output]).then(
|
| 1444 |
+
fn=update_log, inputs=[], outputs=[log_output]
|
| 1445 |
+
).then(
|
| 1446 |
+
fn=gradio_demo,
|
| 1447 |
+
inputs=[
|
| 1448 |
+
target_dir_output,
|
| 1449 |
+
conf_thres,
|
| 1450 |
+
frame_filter,
|
| 1451 |
+
show_cam,
|
| 1452 |
+
filter_sky,
|
| 1453 |
+
filter_black_bg,
|
| 1454 |
+
filter_white_bg,
|
| 1455 |
+
mask_ambiguous,
|
| 1456 |
+
],
|
| 1457 |
+
outputs=[
|
| 1458 |
+
reconstruction_output,
|
| 1459 |
+
log_output,
|
| 1460 |
+
frame_filter,
|
| 1461 |
+
processed_data_state,
|
| 1462 |
+
depth_map,
|
| 1463 |
+
normal_map,
|
| 1464 |
+
measure_image,
|
| 1465 |
+
measure_text,
|
| 1466 |
+
depth_view_selector,
|
| 1467 |
+
normal_view_selector,
|
| 1468 |
+
measure_view_selector,
|
| 1469 |
+
],
|
| 1470 |
+
).then(
|
| 1471 |
+
fn=lambda: "False",
|
| 1472 |
+
inputs=[],
|
| 1473 |
+
outputs=[is_example], # set is_example to "False"
|
| 1474 |
+
)
|
| 1475 |
+
|
| 1476 |
+
# -------------------------------------------------------------------------
|
| 1477 |
+
# Real-time Visualization Updates
|
| 1478 |
+
# -------------------------------------------------------------------------
|
| 1479 |
+
def update_all_visualizations_on_conf_change(
|
| 1480 |
+
processed_data,
|
| 1481 |
+
depth_selector,
|
| 1482 |
+
normal_selector,
|
| 1483 |
+
conf_thres_val,
|
| 1484 |
+
target_dir,
|
| 1485 |
+
frame_filter,
|
| 1486 |
+
show_cam,
|
| 1487 |
+
is_example,
|
| 1488 |
+
):
|
| 1489 |
+
"""Update 3D view and all tabs when confidence threshold changes"""
|
| 1490 |
+
|
| 1491 |
+
# Update 3D pointcloud visualization
|
| 1492 |
+
glb_file, log_msg = update_visualization(
|
| 1493 |
+
target_dir,
|
| 1494 |
+
conf_thres_val,
|
| 1495 |
+
frame_filter,
|
| 1496 |
+
show_cam,
|
| 1497 |
+
is_example,
|
| 1498 |
+
)
|
| 1499 |
+
|
| 1500 |
+
# Update depth and normal tabs with new confidence threshold
|
| 1501 |
+
depth_vis = None
|
| 1502 |
+
normal_vis = None
|
| 1503 |
+
|
| 1504 |
+
if processed_data is not None:
|
| 1505 |
+
# Get current view indices from selectors
|
| 1506 |
+
try:
|
| 1507 |
+
depth_view_idx = (
|
| 1508 |
+
int(depth_selector.split()[1]) - 1 if depth_selector else 0
|
| 1509 |
+
)
|
| 1510 |
+
except:
|
| 1511 |
+
depth_view_idx = 0
|
| 1512 |
+
|
| 1513 |
+
try:
|
| 1514 |
+
normal_view_idx = (
|
| 1515 |
+
int(normal_selector.split()[1]) - 1 if normal_selector else 0
|
| 1516 |
+
)
|
| 1517 |
+
except:
|
| 1518 |
+
normal_view_idx = 0
|
| 1519 |
+
|
| 1520 |
+
# Update visualizations with new confidence threshold
|
| 1521 |
+
depth_vis = update_depth_view(
|
| 1522 |
+
processed_data, depth_view_idx, conf_thres=conf_thres_val
|
| 1523 |
+
)
|
| 1524 |
+
normal_vis = update_normal_view(
|
| 1525 |
+
processed_data, normal_view_idx, conf_thres=conf_thres_val
|
| 1526 |
+
)
|
| 1527 |
+
|
| 1528 |
+
return glb_file, log_msg, depth_vis, normal_vis
|
| 1529 |
+
|
| 1530 |
+
conf_thres.change(
|
| 1531 |
+
fn=update_all_visualizations_on_conf_change,
|
| 1532 |
+
inputs=[
|
| 1533 |
+
processed_data_state,
|
| 1534 |
+
depth_view_selector,
|
| 1535 |
+
normal_view_selector,
|
| 1536 |
+
conf_thres,
|
| 1537 |
+
target_dir_output,
|
| 1538 |
+
frame_filter,
|
| 1539 |
+
show_cam,
|
| 1540 |
+
is_example,
|
| 1541 |
+
],
|
| 1542 |
+
outputs=[reconstruction_output, log_output, depth_map, normal_map],
|
| 1543 |
+
)
|
| 1544 |
+
frame_filter.change(
|
| 1545 |
+
update_visualization,
|
| 1546 |
+
[
|
| 1547 |
+
target_dir_output,
|
| 1548 |
+
conf_thres,
|
| 1549 |
+
frame_filter,
|
| 1550 |
+
show_cam,
|
| 1551 |
+
is_example,
|
| 1552 |
+
],
|
| 1553 |
+
[reconstruction_output, log_output],
|
| 1554 |
+
)
|
| 1555 |
+
show_cam.change(
|
| 1556 |
+
update_visualization,
|
| 1557 |
+
[
|
| 1558 |
+
target_dir_output,
|
| 1559 |
+
conf_thres,
|
| 1560 |
+
frame_filter,
|
| 1561 |
+
show_cam,
|
| 1562 |
+
is_example,
|
| 1563 |
+
],
|
| 1564 |
+
[reconstruction_output, log_output],
|
| 1565 |
+
)
|
| 1566 |
+
filter_sky.change(
|
| 1567 |
+
update_visualization,
|
| 1568 |
+
[
|
| 1569 |
+
target_dir_output,
|
| 1570 |
+
conf_thres,
|
| 1571 |
+
frame_filter,
|
| 1572 |
+
show_cam,
|
| 1573 |
+
is_example,
|
| 1574 |
+
filter_sky,
|
| 1575 |
+
filter_black_bg,
|
| 1576 |
+
filter_white_bg,
|
| 1577 |
+
mask_ambiguous,
|
| 1578 |
+
],
|
| 1579 |
+
[reconstruction_output, log_output],
|
| 1580 |
+
)
|
| 1581 |
+
filter_black_bg.change(
|
| 1582 |
+
update_visualization,
|
| 1583 |
+
[
|
| 1584 |
+
target_dir_output,
|
| 1585 |
+
conf_thres,
|
| 1586 |
+
frame_filter,
|
| 1587 |
+
show_cam,
|
| 1588 |
+
is_example,
|
| 1589 |
+
filter_sky,
|
| 1590 |
+
filter_black_bg,
|
| 1591 |
+
filter_white_bg,
|
| 1592 |
+
mask_ambiguous,
|
| 1593 |
+
],
|
| 1594 |
+
[reconstruction_output, log_output],
|
| 1595 |
+
)
|
| 1596 |
+
filter_white_bg.change(
|
| 1597 |
+
update_visualization,
|
| 1598 |
+
[
|
| 1599 |
+
target_dir_output,
|
| 1600 |
+
conf_thres,
|
| 1601 |
+
frame_filter,
|
| 1602 |
+
show_cam,
|
| 1603 |
+
is_example,
|
| 1604 |
+
filter_sky,
|
| 1605 |
+
filter_black_bg,
|
| 1606 |
+
filter_white_bg,
|
| 1607 |
+
mask_ambiguous,
|
| 1608 |
+
],
|
| 1609 |
+
[reconstruction_output, log_output],
|
| 1610 |
+
)
|
| 1611 |
+
mask_ambiguous.change(
|
| 1612 |
+
update_visualization,
|
| 1613 |
+
[
|
| 1614 |
+
target_dir_output,
|
| 1615 |
+
conf_thres,
|
| 1616 |
+
frame_filter,
|
| 1617 |
+
show_cam,
|
| 1618 |
+
is_example,
|
| 1619 |
+
filter_sky,
|
| 1620 |
+
filter_black_bg,
|
| 1621 |
+
filter_white_bg,
|
| 1622 |
+
mask_ambiguous,
|
| 1623 |
+
],
|
| 1624 |
+
[reconstruction_output, log_output],
|
| 1625 |
+
)
|
| 1626 |
+
|
| 1627 |
+
# -------------------------------------------------------------------------
|
| 1628 |
+
# Auto-update gallery whenever user uploads or changes their files
|
| 1629 |
+
# -------------------------------------------------------------------------
|
| 1630 |
+
input_video.change(
|
| 1631 |
+
fn=update_gallery_on_upload,
|
| 1632 |
+
inputs=[input_video, input_images, s_time_interval],
|
| 1633 |
+
outputs=[reconstruction_output, target_dir_output, image_gallery, log_output],
|
| 1634 |
+
)
|
| 1635 |
+
input_images.change(
|
| 1636 |
+
fn=update_gallery_on_upload,
|
| 1637 |
+
inputs=[input_video, input_images, s_time_interval],
|
| 1638 |
+
outputs=[reconstruction_output, target_dir_output, image_gallery, log_output],
|
| 1639 |
+
)
|
| 1640 |
+
|
| 1641 |
+
# -------------------------------------------------------------------------
|
| 1642 |
+
# Measure tab functionality
|
| 1643 |
+
# -------------------------------------------------------------------------
|
| 1644 |
+
measure_image.select(
|
| 1645 |
+
fn=measure,
|
| 1646 |
+
inputs=[processed_data_state, measure_points_state, measure_view_selector],
|
| 1647 |
+
outputs=[measure_image, measure_points_state, measure_text],
|
| 1648 |
+
)
|
| 1649 |
+
|
| 1650 |
+
# -------------------------------------------------------------------------
|
| 1651 |
+
# Navigation functionality for Depth, Normal, and Measure tabs
|
| 1652 |
+
# -------------------------------------------------------------------------
|
| 1653 |
+
|
| 1654 |
+
# Depth tab navigation
|
| 1655 |
+
prev_depth_btn.click(
|
| 1656 |
+
fn=lambda processed_data, current_selector, conf_thres_val: navigate_depth_view(
|
| 1657 |
+
processed_data, current_selector, -1, conf_thres=conf_thres_val
|
| 1658 |
+
),
|
| 1659 |
+
inputs=[processed_data_state, depth_view_selector, conf_thres],
|
| 1660 |
+
outputs=[depth_view_selector, depth_map],
|
| 1661 |
+
)
|
| 1662 |
+
|
| 1663 |
+
next_depth_btn.click(
|
| 1664 |
+
fn=lambda processed_data, current_selector, conf_thres_val: navigate_depth_view(
|
| 1665 |
+
processed_data, current_selector, 1, conf_thres=conf_thres_val
|
| 1666 |
+
),
|
| 1667 |
+
inputs=[processed_data_state, depth_view_selector, conf_thres],
|
| 1668 |
+
outputs=[depth_view_selector, depth_map],
|
| 1669 |
+
)
|
| 1670 |
+
|
| 1671 |
+
depth_view_selector.change(
|
| 1672 |
+
fn=lambda processed_data, selector_value, conf_thres_val: (
|
| 1673 |
+
update_depth_view(
|
| 1674 |
+
processed_data,
|
| 1675 |
+
int(selector_value.split()[1]) - 1,
|
| 1676 |
+
conf_thres=conf_thres_val,
|
| 1677 |
+
)
|
| 1678 |
+
if selector_value
|
| 1679 |
+
else None
|
| 1680 |
+
),
|
| 1681 |
+
inputs=[processed_data_state, depth_view_selector, conf_thres],
|
| 1682 |
+
outputs=[depth_map],
|
| 1683 |
+
)
|
| 1684 |
+
|
| 1685 |
+
# Normal tab navigation
|
| 1686 |
+
prev_normal_btn.click(
|
| 1687 |
+
fn=lambda processed_data,
|
| 1688 |
+
current_selector,
|
| 1689 |
+
conf_thres_val: navigate_normal_view(
|
| 1690 |
+
processed_data, current_selector, -1, conf_thres=conf_thres_val
|
| 1691 |
+
),
|
| 1692 |
+
inputs=[processed_data_state, normal_view_selector, conf_thres],
|
| 1693 |
+
outputs=[normal_view_selector, normal_map],
|
| 1694 |
+
)
|
| 1695 |
+
|
| 1696 |
+
next_normal_btn.click(
|
| 1697 |
+
fn=lambda processed_data,
|
| 1698 |
+
current_selector,
|
| 1699 |
+
conf_thres_val: navigate_normal_view(
|
| 1700 |
+
processed_data, current_selector, 1, conf_thres=conf_thres_val
|
| 1701 |
+
),
|
| 1702 |
+
inputs=[processed_data_state, normal_view_selector, conf_thres],
|
| 1703 |
+
outputs=[normal_view_selector, normal_map],
|
| 1704 |
+
)
|
| 1705 |
+
|
| 1706 |
+
normal_view_selector.change(
|
| 1707 |
+
fn=lambda processed_data, selector_value, conf_thres_val: (
|
| 1708 |
+
update_normal_view(
|
| 1709 |
+
processed_data,
|
| 1710 |
+
int(selector_value.split()[1]) - 1,
|
| 1711 |
+
conf_thres=conf_thres_val,
|
| 1712 |
+
)
|
| 1713 |
+
if selector_value
|
| 1714 |
+
else None
|
| 1715 |
+
),
|
| 1716 |
+
inputs=[processed_data_state, normal_view_selector, conf_thres],
|
| 1717 |
+
outputs=[normal_map],
|
| 1718 |
+
)
|
| 1719 |
+
|
| 1720 |
+
# Measure tab navigation
|
| 1721 |
+
prev_measure_btn.click(
|
| 1722 |
+
fn=lambda processed_data, current_selector: navigate_measure_view(
|
| 1723 |
+
processed_data, current_selector, -1
|
| 1724 |
+
),
|
| 1725 |
+
inputs=[processed_data_state, measure_view_selector],
|
| 1726 |
+
outputs=[measure_view_selector, measure_image, measure_points_state],
|
| 1727 |
+
)
|
| 1728 |
+
|
| 1729 |
+
next_measure_btn.click(
|
| 1730 |
+
fn=lambda processed_data, current_selector: navigate_measure_view(
|
| 1731 |
+
processed_data, current_selector, 1
|
| 1732 |
+
),
|
| 1733 |
+
inputs=[processed_data_state, measure_view_selector],
|
| 1734 |
+
outputs=[measure_view_selector, measure_image, measure_points_state],
|
| 1735 |
+
)
|
| 1736 |
+
|
| 1737 |
+
measure_view_selector.change(
|
| 1738 |
+
fn=lambda processed_data, selector_value: (
|
| 1739 |
+
update_measure_view(processed_data, int(selector_value.split()[1]) - 1)
|
| 1740 |
+
if selector_value
|
| 1741 |
+
else (None, [])
|
| 1742 |
+
),
|
| 1743 |
+
inputs=[processed_data_state, measure_view_selector],
|
| 1744 |
+
outputs=[measure_image, measure_points_state],
|
| 1745 |
+
)
|
| 1746 |
+
|
| 1747 |
+
# -------------------------------------------------------------------------
|
| 1748 |
+
# Acknowledgement section
|
| 1749 |
+
# -------------------------------------------------------------------------
|
| 1750 |
+
gr.HTML(get_acknowledgements_html())
|
| 1751 |
+
|
| 1752 |
+
demo.queue(max_size=20).launch(show_error=True, share=True, ssr_mode=False)
|
app_interactive.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def greet(name):
|
| 5 |
+
return "Hello " + name + "!!"
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
demo = gr.Interface(fn=greet, inputs="text", outputs="text")
|
| 9 |
+
demo.launch()
|
configs/calibration_benchmark.yaml
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- machine: aws
|
| 3 |
+
- model: default
|
| 4 |
+
- dataset: default
|
| 5 |
+
- _self_
|
| 6 |
+
|
| 7 |
+
output_dir: ${hydra:run.dir}
|
| 8 |
+
root_data_dir: ${machine.root_data_dir}
|
| 9 |
+
mapanything_dataset_metadata_dir: ${machine.mapanything_dataset_metadata_dir}
|
| 10 |
+
root_pretrained_checkpoints_dir: ${machine.root_pretrained_checkpoints_dir}
|
| 11 |
+
root_experiments_dir: ${machine.root_experiments_dir}
|
| 12 |
+
root_uniception_pretrained_checkpoints_dir: ${machine.root_uniception_pretrained_checkpoints_dir}
|
| 13 |
+
|
| 14 |
+
### Benchmarking args
|
| 15 |
+
seed: 0
|
| 16 |
+
# Disable CUDNN Benchmark (Disable for variable resolution & number of view training)
|
| 17 |
+
disable_cudnn_benchmark: true
|
| 18 |
+
# Batch size for inference (Metrics are computed per multi-view set and averaged, not per batch of multi-view sets)
|
| 19 |
+
batch_size: 20
|
| 20 |
+
# Use mixed precision for inference
|
| 21 |
+
amp: 1
|
| 22 |
+
# Floating point type to use for mixed precision
|
| 23 |
+
amp_dtype: "bf16"
|
configs/dataset/ase_wai/default.yaml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- train: default
|
| 3 |
+
- val: default
|
configs/dataset/ase_wai/train/default.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"ASEWAI(
|
| 3 |
+
split='${dataset.ase_wai.train.split}',
|
| 4 |
+
resolution=${dataset.ase_wai.train.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.ase_wai.train.principal_point_centered},
|
| 6 |
+
aug_crop=${dataset.ase_wai.train.aug_crop},
|
| 7 |
+
transform='${dataset.ase_wai.train.transform}',
|
| 8 |
+
data_norm_type='${dataset.ase_wai.train.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.ase_wai.train.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.ase_wai.train.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.ase_wai.train.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.ase_wai.train.variable_num_views},
|
| 13 |
+
num_views=${dataset.ase_wai.train.num_views},
|
| 14 |
+
covisibility_thres=${dataset.ase_wai.train.covisibility_thres})"
|
| 15 |
+
split: 'train'
|
| 16 |
+
dataset_resolution: ${dataset.resolution_train}
|
| 17 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 18 |
+
aug_crop: 16
|
| 19 |
+
transform: 'colorjitter+grayscale+gaublur'
|
| 20 |
+
data_norm_type: ${model.data_norm_type}
|
| 21 |
+
ROOT: ${root_data_dir}/ase
|
| 22 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 23 |
+
overfit_num_sets: null
|
| 24 |
+
variable_num_views: ${dataset.train.variable_num_views}
|
| 25 |
+
num_views: ${dataset.num_views}
|
| 26 |
+
covisibility_thres: 0.25
|
configs/dataset/ase_wai/val/default.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"ASEWAI(
|
| 3 |
+
split='${dataset.ase_wai.val.split}',
|
| 4 |
+
resolution=${dataset.ase_wai.val.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.ase_wai.val.principal_point_centered},
|
| 6 |
+
seed=${dataset.ase_wai.val.seed},
|
| 7 |
+
transform='${dataset.ase_wai.val.transform}',
|
| 8 |
+
data_norm_type='${dataset.ase_wai.val.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.ase_wai.val.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.ase_wai.val.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.ase_wai.val.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.ase_wai.val.variable_num_views},
|
| 13 |
+
num_views=${dataset.ase_wai.val.num_views},
|
| 14 |
+
covisibility_thres=${dataset.ase_wai.val.covisibility_thres})"
|
| 15 |
+
split: 'val'
|
| 16 |
+
dataset_resolution: ${dataset.resolution_val_ase}
|
| 17 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 18 |
+
seed: 777
|
| 19 |
+
transform: 'imgnorm'
|
| 20 |
+
data_norm_type: ${model.data_norm_type}
|
| 21 |
+
ROOT: ${root_data_dir}/ase
|
| 22 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 23 |
+
overfit_num_sets: null
|
| 24 |
+
variable_num_views: ${dataset.val.variable_num_views}
|
| 25 |
+
num_views: ${dataset.num_views}
|
| 26 |
+
covisibility_thres: 0.25
|
configs/dataset/bedlam_wai/default.yaml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- train: default
|
| 3 |
+
- val: default
|
configs/dataset/bedlam_wai/train/default.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"BedlamWAI(
|
| 3 |
+
split='${dataset.bedlam_wai.train.split}',
|
| 4 |
+
resolution=${dataset.bedlam_wai.train.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.bedlam_wai.train.principal_point_centered},
|
| 6 |
+
aug_crop=${dataset.bedlam_wai.train.aug_crop},
|
| 7 |
+
transform='${dataset.bedlam_wai.train.transform}',
|
| 8 |
+
data_norm_type='${dataset.bedlam_wai.train.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.bedlam_wai.train.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.bedlam_wai.train.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.bedlam_wai.train.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.bedlam_wai.train.variable_num_views},
|
| 13 |
+
num_views=${dataset.bedlam_wai.train.num_views},
|
| 14 |
+
covisibility_thres=${dataset.bedlam_wai.train.covisibility_thres})"
|
| 15 |
+
split: 'train'
|
| 16 |
+
dataset_resolution: ${dataset.resolution_train}
|
| 17 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 18 |
+
aug_crop: 16
|
| 19 |
+
transform: 'colorjitter+grayscale+gaublur'
|
| 20 |
+
data_norm_type: ${model.data_norm_type}
|
| 21 |
+
ROOT: ${root_data_dir}/bedlam
|
| 22 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 23 |
+
overfit_num_sets: null
|
| 24 |
+
variable_num_views: ${dataset.train.variable_num_views}
|
| 25 |
+
num_views: ${dataset.num_views}
|
| 26 |
+
covisibility_thres: 0.25
|
configs/dataset/bedlam_wai/val/default.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"BedlamWAI(
|
| 3 |
+
split='${dataset.bedlam_wai.val.split}',
|
| 4 |
+
resolution=${dataset.bedlam_wai.val.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.bedlam_wai.val.principal_point_centered},
|
| 6 |
+
seed=${dataset.bedlam_wai.val.seed},
|
| 7 |
+
transform='${dataset.bedlam_wai.val.transform}',
|
| 8 |
+
data_norm_type='${dataset.bedlam_wai.val.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.bedlam_wai.val.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.bedlam_wai.val.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.bedlam_wai.val.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.bedlam_wai.val.variable_num_views},
|
| 13 |
+
num_views=${dataset.bedlam_wai.val.num_views},
|
| 14 |
+
covisibility_thres=${dataset.bedlam_wai.val.covisibility_thres})"
|
| 15 |
+
split: 'val'
|
| 16 |
+
dataset_resolution: ${dataset.resolution_val_bedlam}
|
| 17 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 18 |
+
seed: 777
|
| 19 |
+
transform: 'imgnorm'
|
| 20 |
+
data_norm_type: ${model.data_norm_type}
|
| 21 |
+
ROOT: ${root_data_dir}/bedlam
|
| 22 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 23 |
+
overfit_num_sets: null
|
| 24 |
+
variable_num_views: ${dataset.val.variable_num_views}
|
| 25 |
+
num_views: ${dataset.num_views}
|
| 26 |
+
covisibility_thres: 0.25
|
configs/dataset/benchmark_512_eth3d_snpp_tav2.yaml
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- default
|
| 3 |
+
|
| 4 |
+
# Number of views parameter for the multi-view datasets
|
| 5 |
+
num_views: 2
|
| 6 |
+
|
| 7 |
+
# Test Resolution
|
| 8 |
+
resolution_test_eth3d: ${dataset.resolution_options.512_1_52_ar}
|
| 9 |
+
resolution_test_scannetpp: ${dataset.resolution_options.512_1_52_ar}
|
| 10 |
+
resolution_test_tav2_wb: ${dataset.resolution_options.512_1_00_ar}
|
| 11 |
+
|
| 12 |
+
# Test Set
|
| 13 |
+
# Sample 10 multi-view sets from each scene
|
| 14 |
+
# ETH3D: 13 scenes
|
| 15 |
+
# ScanNet++V2: 30 scenes
|
| 16 |
+
# TartanAirV2-WB: 5 scenes
|
| 17 |
+
test_dataset:
|
| 18 |
+
"+ 130 @ ${dataset.eth3d_wai.test.dataset_str}
|
| 19 |
+
+ 300 @ ${dataset.scannetpp_wai.test.dataset_str}
|
| 20 |
+
+ 50 @ ${dataset.tav2_wb_wai.test.dataset_str}"
|
configs/dataset/benchmark_512_snpp_tav2.yaml
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- default
|
| 3 |
+
|
| 4 |
+
# Number of views parameter for the multi-view datasets
|
| 5 |
+
num_views: 2
|
| 6 |
+
|
| 7 |
+
# Test Resolution
|
| 8 |
+
resolution_test_scannetpp: ${dataset.resolution_options.512_1_52_ar}
|
| 9 |
+
resolution_test_tav2_wb: ${dataset.resolution_options.512_1_00_ar}
|
| 10 |
+
|
| 11 |
+
# Test Set
|
| 12 |
+
# Sample 10 multi-view sets from each scene
|
| 13 |
+
# ScanNet++V2: 30 scenes
|
| 14 |
+
# TartanAirV2-WB: 5 scenes
|
| 15 |
+
test_dataset:
|
| 16 |
+
"+ 300 @ ${dataset.scannetpp_wai.test.dataset_str}
|
| 17 |
+
+ 50 @ ${dataset.tav2_wb_wai.test.dataset_str}"
|
configs/dataset/benchmark_518_eth3d_snpp_tav2.yaml
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- default
|
| 3 |
+
|
| 4 |
+
# Number of views parameter for the multi-view datasets
|
| 5 |
+
num_views: 2
|
| 6 |
+
|
| 7 |
+
# Test Resolution
|
| 8 |
+
resolution_test_eth3d: ${dataset.resolution_options.518_1_52_ar}
|
| 9 |
+
resolution_test_scannetpp: ${dataset.resolution_options.518_1_52_ar}
|
| 10 |
+
resolution_test_tav2_wb: ${dataset.resolution_options.518_1_00_ar}
|
| 11 |
+
|
| 12 |
+
# Test Set
|
| 13 |
+
# Sample 10 multi-view sets from each scene
|
| 14 |
+
# ETH3D: 13 scenes
|
| 15 |
+
# ScanNet++V2: 30 scenes
|
| 16 |
+
# TartanAirV2-WB: 5 scenes
|
| 17 |
+
test_dataset:
|
| 18 |
+
"+ 130 @ ${dataset.eth3d_wai.test.dataset_str}
|
| 19 |
+
+ 300 @ ${dataset.scannetpp_wai.test.dataset_str}
|
| 20 |
+
+ 50 @ ${dataset.tav2_wb_wai.test.dataset_str}"
|
configs/dataset/benchmark_518_snpp_tav2.yaml
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- default
|
| 3 |
+
|
| 4 |
+
# Number of views parameter for the multi-view datasets
|
| 5 |
+
num_views: 2
|
| 6 |
+
|
| 7 |
+
# Test Resolution
|
| 8 |
+
resolution_test_scannetpp: ${dataset.resolution_options.518_1_52_ar}
|
| 9 |
+
resolution_test_tav2_wb: ${dataset.resolution_options.518_1_00_ar}
|
| 10 |
+
|
| 11 |
+
# Test Set
|
| 12 |
+
# Sample 10 multi-view sets from each scene
|
| 13 |
+
# ScanNet++V2: 30 scenes
|
| 14 |
+
# TartanAirV2-WB: 5 scenes
|
| 15 |
+
test_dataset:
|
| 16 |
+
"+ 300 @ ${dataset.scannetpp_wai.test.dataset_str}
|
| 17 |
+
+ 50 @ ${dataset.tav2_wb_wai.test.dataset_str}"
|
configs/dataset/benchmark_sv_calib_518_many_ar_eth3d_snpp_tav2.yaml
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- default
|
| 3 |
+
|
| 4 |
+
# Number of views parameter for the multi-view datasets
|
| 5 |
+
num_views: 1
|
| 6 |
+
|
| 7 |
+
# Test Resolution
|
| 8 |
+
resolution_test_eth3d: ${dataset.resolution_options.518_many_ar}
|
| 9 |
+
resolution_test_scannetpp: ${dataset.resolution_options.518_many_ar}
|
| 10 |
+
resolution_test_tav2_wb: ${dataset.resolution_options.518_many_ar}
|
| 11 |
+
|
| 12 |
+
# Test Set
|
| 13 |
+
# Sample 20 frames from each scene
|
| 14 |
+
# ETH3D: 13 scenes
|
| 15 |
+
# ScanNet++V2: 30 scenes
|
| 16 |
+
# TartanAirV2-WB: 5 scenes
|
| 17 |
+
test_dataset:
|
| 18 |
+
"+ 260 @ ${dataset.eth3d_wai.test.dataset_str}
|
| 19 |
+
+ 600 @ ${dataset.scannetpp_wai.test.dataset_str}
|
| 20 |
+
+ 100 @ ${dataset.tav2_wb_wai.test.dataset_str}"
|
configs/dataset/blendedmvs_wai/default.yaml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- train: default
|
| 3 |
+
- val: default
|
configs/dataset/blendedmvs_wai/train/default.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"BlendedMVSWAI(
|
| 3 |
+
split='${dataset.blendedmvs_wai.train.split}',
|
| 4 |
+
resolution=${dataset.blendedmvs_wai.train.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.blendedmvs_wai.train.principal_point_centered},
|
| 6 |
+
aug_crop=${dataset.blendedmvs_wai.train.aug_crop},
|
| 7 |
+
transform='${dataset.blendedmvs_wai.train.transform}',
|
| 8 |
+
data_norm_type='${dataset.blendedmvs_wai.train.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.blendedmvs_wai.train.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.blendedmvs_wai.train.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.blendedmvs_wai.train.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.blendedmvs_wai.train.variable_num_views},
|
| 13 |
+
num_views=${dataset.blendedmvs_wai.train.num_views},
|
| 14 |
+
covisibility_thres=${dataset.blendedmvs_wai.train.covisibility_thres})"
|
| 15 |
+
split: 'train'
|
| 16 |
+
dataset_resolution: ${dataset.resolution_train}
|
| 17 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 18 |
+
aug_crop: 16
|
| 19 |
+
transform: 'colorjitter+grayscale+gaublur'
|
| 20 |
+
data_norm_type: ${model.data_norm_type}
|
| 21 |
+
ROOT: ${root_data_dir}/blendedmvs
|
| 22 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 23 |
+
overfit_num_sets: null
|
| 24 |
+
variable_num_views: ${dataset.train.variable_num_views}
|
| 25 |
+
num_views: ${dataset.num_views}
|
| 26 |
+
covisibility_thres: 0.25
|
configs/dataset/blendedmvs_wai/val/default.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"BlendedMVSWAI(
|
| 3 |
+
split='${dataset.blendedmvs_wai.val.split}',
|
| 4 |
+
resolution=${dataset.blendedmvs_wai.val.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.blendedmvs_wai.val.principal_point_centered},
|
| 6 |
+
seed=${dataset.blendedmvs_wai.val.seed},
|
| 7 |
+
transform='${dataset.blendedmvs_wai.val.transform}',
|
| 8 |
+
data_norm_type='${dataset.blendedmvs_wai.val.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.blendedmvs_wai.val.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.blendedmvs_wai.val.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.blendedmvs_wai.val.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.blendedmvs_wai.val.variable_num_views},
|
| 13 |
+
num_views=${dataset.blendedmvs_wai.val.num_views},
|
| 14 |
+
covisibility_thres=${dataset.blendedmvs_wai.val.covisibility_thres})"
|
| 15 |
+
split: 'val'
|
| 16 |
+
dataset_resolution: ${dataset.resolution_val_blendedmvs}
|
| 17 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 18 |
+
seed: 777
|
| 19 |
+
transform: 'imgnorm'
|
| 20 |
+
data_norm_type: ${model.data_norm_type}
|
| 21 |
+
ROOT: ${root_data_dir}/blendedmvs
|
| 22 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 23 |
+
overfit_num_sets: null
|
| 24 |
+
variable_num_views: ${dataset.val.variable_num_views}
|
| 25 |
+
num_views: ${dataset.num_views}
|
| 26 |
+
covisibility_thres: 0.25
|
configs/dataset/default.yaml
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- resolution_options: default
|
| 3 |
+
- ase_wai: default
|
| 4 |
+
- bedlam_wai: default
|
| 5 |
+
- blendedmvs_wai: default
|
| 6 |
+
- dl3dv_wai: default
|
| 7 |
+
- dtu_wai: default
|
| 8 |
+
- dynamicreplica_wai: default
|
| 9 |
+
- eth3d_wai: default
|
| 10 |
+
- gta_sfm_wai: default
|
| 11 |
+
- matrixcity_wai: default
|
| 12 |
+
- megadepth_wai: default
|
| 13 |
+
- mpsd_wai: default
|
| 14 |
+
- mvs_synth_wai: default
|
| 15 |
+
- paralleldomain4d_wai: default
|
| 16 |
+
- sailvos3d_wai: default
|
| 17 |
+
- scannetpp_wai: default
|
| 18 |
+
- spring_wai: default
|
| 19 |
+
- structured3d_wai: default
|
| 20 |
+
- tav2_wb_wai: default
|
| 21 |
+
- unrealstereo4k_wai: default
|
| 22 |
+
- xrooms_wai: default
|
| 23 |
+
|
| 24 |
+
# Training Set, For example: BlendedMVS(split='train', resolution=(512, 384), transform=...)
|
| 25 |
+
train_dataset: ???
|
| 26 |
+
# Validation Set
|
| 27 |
+
test_dataset: "[null]"
|
| 28 |
+
# Number of workers for dataloader
|
| 29 |
+
num_workers: 12
|
| 30 |
+
# Default resolution for training
|
| 31 |
+
resolution_train: ???
|
| 32 |
+
# Default resolution for validation
|
| 33 |
+
resolution_val: ???
|
| 34 |
+
# Number of views parameter for multi-view datasets
|
| 35 |
+
num_views: 2
|
| 36 |
+
# Use a centered principal point for all images
|
| 37 |
+
principal_point_centered: false
|
| 38 |
+
# Default config for multi-view datasets
|
| 39 |
+
train:
|
| 40 |
+
# If True, the number of views can vary from batch to batch. The maximum number of views is num_views and minimum is 2. (On by default for N-view training)
|
| 41 |
+
variable_num_views: true
|
| 42 |
+
val:
|
| 43 |
+
variable_num_views: false
|
| 44 |
+
test:
|
| 45 |
+
variable_num_views: false
|
configs/dataset/dl3dv_wai/default.yaml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- train: default
|
| 3 |
+
- val: default
|
configs/dataset/dl3dv_wai/train/default.yaml
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"DL3DVWAI(
|
| 3 |
+
split='${dataset.dl3dv_wai.train.split}',
|
| 4 |
+
resolution=${dataset.dl3dv_wai.train.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.dl3dv_wai.train.principal_point_centered},
|
| 6 |
+
aug_crop=${dataset.dl3dv_wai.train.aug_crop},
|
| 7 |
+
transform='${dataset.dl3dv_wai.train.transform}',
|
| 8 |
+
data_norm_type='${dataset.dl3dv_wai.train.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.dl3dv_wai.train.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.dl3dv_wai.train.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.dl3dv_wai.train.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.dl3dv_wai.train.variable_num_views},
|
| 13 |
+
num_views=${dataset.dl3dv_wai.train.num_views},
|
| 14 |
+
covisibility_thres=${dataset.dl3dv_wai.train.covisibility_thres},
|
| 15 |
+
mvs_confidence_filter_thres=${dataset.dl3dv_wai.train.mvs_confidence_filter_thres})"
|
| 16 |
+
split: 'train'
|
| 17 |
+
dataset_resolution: ${dataset.resolution_train}
|
| 18 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 19 |
+
aug_crop: 16
|
| 20 |
+
transform: 'colorjitter+grayscale+gaublur'
|
| 21 |
+
data_norm_type: ${model.data_norm_type}
|
| 22 |
+
ROOT: ${root_data_dir}/dl3dv
|
| 23 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 24 |
+
overfit_num_sets: null
|
| 25 |
+
variable_num_views: ${dataset.train.variable_num_views}
|
| 26 |
+
num_views: ${dataset.num_views}
|
| 27 |
+
covisibility_thres: 0.25
|
| 28 |
+
mvs_confidence_filter_thres: 0.25
|
configs/dataset/dl3dv_wai/val/default.yaml
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"DL3DVWAI(
|
| 3 |
+
split='${dataset.dl3dv_wai.val.split}',
|
| 4 |
+
resolution=${dataset.dl3dv_wai.val.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.dl3dv_wai.val.principal_point_centered},
|
| 6 |
+
seed=${dataset.dl3dv_wai.val.seed},
|
| 7 |
+
transform='${dataset.dl3dv_wai.val.transform}',
|
| 8 |
+
data_norm_type='${dataset.dl3dv_wai.val.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.dl3dv_wai.val.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.dl3dv_wai.val.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.dl3dv_wai.val.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.dl3dv_wai.val.variable_num_views},
|
| 13 |
+
num_views=${dataset.dl3dv_wai.val.num_views},
|
| 14 |
+
covisibility_thres=${dataset.dl3dv_wai.val.covisibility_thres},
|
| 15 |
+
mvs_confidence_filter_thres=${dataset.dl3dv_wai.val.mvs_confidence_filter_thres})"
|
| 16 |
+
split: 'val'
|
| 17 |
+
dataset_resolution: ${dataset.resolution_val_dl3dv}
|
| 18 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 19 |
+
seed: 777
|
| 20 |
+
transform: 'imgnorm'
|
| 21 |
+
data_norm_type: ${model.data_norm_type}
|
| 22 |
+
ROOT: ${root_data_dir}/dl3dv
|
| 23 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 24 |
+
overfit_num_sets: null
|
| 25 |
+
variable_num_views: ${dataset.val.variable_num_views}
|
| 26 |
+
num_views: ${dataset.num_views}
|
| 27 |
+
covisibility_thres: 0.25
|
| 28 |
+
mvs_confidence_filter_thres: 0.25
|
configs/dataset/dtu_wai/default.yaml
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- test: default
|
configs/dataset/dtu_wai/test/default.yaml
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"DTUWAI(
|
| 3 |
+
resolution=${dataset.dtu_wai.test.dataset_resolution},
|
| 4 |
+
principal_point_centered=${dataset.dtu_wai.test.principal_point_centered},
|
| 5 |
+
seed=${dataset.dtu_wai.test.seed},
|
| 6 |
+
transform='${dataset.dtu_wai.test.transform}',
|
| 7 |
+
data_norm_type='${dataset.dtu_wai.test.data_norm_type}',
|
| 8 |
+
ROOT='${dataset.dtu_wai.test.ROOT}',
|
| 9 |
+
dataset_metadata_dir='${dataset.dtu_wai.test.dataset_metadata_dir}',
|
| 10 |
+
variable_num_views=${dataset.dtu_wai.test.variable_num_views},
|
| 11 |
+
num_views=${dataset.dtu_wai.test.num_views},
|
| 12 |
+
covisibility_thres=${dataset.dtu_wai.test.covisibility_thres})"
|
| 13 |
+
dataset_resolution: ${dataset.resolution_test_dtu}
|
| 14 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 15 |
+
seed: 777
|
| 16 |
+
transform: 'imgnorm'
|
| 17 |
+
data_norm_type: ${model.data_norm_type}
|
| 18 |
+
ROOT: ${root_data_dir}/dtu
|
| 19 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 20 |
+
variable_num_views: ${dataset.test.variable_num_views}
|
| 21 |
+
num_views: ${dataset.num_views}
|
| 22 |
+
covisibility_thres: 0.25
|
configs/dataset/dynamicreplica_wai/default.yaml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- train: default
|
| 3 |
+
- val: default
|
configs/dataset/dynamicreplica_wai/train/default.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"DynamicReplicaWAI(
|
| 3 |
+
split='${dataset.dynamicreplica_wai.train.split}',
|
| 4 |
+
resolution=${dataset.dynamicreplica_wai.train.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.dynamicreplica_wai.train.principal_point_centered},
|
| 6 |
+
aug_crop=${dataset.dynamicreplica_wai.train.aug_crop},
|
| 7 |
+
transform='${dataset.dynamicreplica_wai.train.transform}',
|
| 8 |
+
data_norm_type='${dataset.dynamicreplica_wai.train.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.dynamicreplica_wai.train.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.dynamicreplica_wai.train.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.dynamicreplica_wai.train.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.dynamicreplica_wai.train.variable_num_views},
|
| 13 |
+
num_views=${dataset.dynamicreplica_wai.train.num_views},
|
| 14 |
+
covisibility_thres=${dataset.dynamicreplica_wai.train.covisibility_thres})"
|
| 15 |
+
split: 'train'
|
| 16 |
+
dataset_resolution: ${dataset.resolution_train}
|
| 17 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 18 |
+
aug_crop: 16
|
| 19 |
+
transform: 'colorjitter+grayscale+gaublur'
|
| 20 |
+
data_norm_type: ${model.data_norm_type}
|
| 21 |
+
ROOT: ${root_data_dir}/dynamicreplica
|
| 22 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 23 |
+
overfit_num_sets: null
|
| 24 |
+
variable_num_views: ${dataset.train.variable_num_views}
|
| 25 |
+
num_views: ${dataset.num_views}
|
| 26 |
+
covisibility_thres: 0.25
|
configs/dataset/dynamicreplica_wai/val/default.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"DynamicReplicaWAI(
|
| 3 |
+
split='${dataset.dynamicreplica_wai.val.split}',
|
| 4 |
+
resolution=${dataset.dynamicreplica_wai.val.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.dynamicreplica_wai.val.principal_point_centered},
|
| 6 |
+
seed=${dataset.dynamicreplica_wai.val.seed},
|
| 7 |
+
transform='${dataset.dynamicreplica_wai.val.transform}',
|
| 8 |
+
data_norm_type='${dataset.dynamicreplica_wai.val.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.dynamicreplica_wai.val.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.dynamicreplica_wai.val.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.dynamicreplica_wai.val.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.dynamicreplica_wai.val.variable_num_views},
|
| 13 |
+
num_views=${dataset.dynamicreplica_wai.val.num_views},
|
| 14 |
+
covisibility_thres=${dataset.dynamicreplica_wai.val.covisibility_thres})"
|
| 15 |
+
split: 'val'
|
| 16 |
+
dataset_resolution: ${dataset.resolution_val_dynamicreplica}
|
| 17 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 18 |
+
seed: 777
|
| 19 |
+
transform: 'imgnorm'
|
| 20 |
+
data_norm_type: ${model.data_norm_type}
|
| 21 |
+
ROOT: ${root_data_dir}/dynamicreplica
|
| 22 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 23 |
+
overfit_num_sets: null
|
| 24 |
+
variable_num_views: ${dataset.val.variable_num_views}
|
| 25 |
+
num_views: ${dataset.num_views}
|
| 26 |
+
covisibility_thres: 0.25
|
configs/dataset/eth3d_wai/default.yaml
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- test: default
|
configs/dataset/eth3d_wai/test/default.yaml
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"ETH3DWAI(
|
| 3 |
+
resolution=${dataset.eth3d_wai.test.dataset_resolution},
|
| 4 |
+
principal_point_centered=${dataset.eth3d_wai.test.principal_point_centered},
|
| 5 |
+
seed=${dataset.eth3d_wai.test.seed},
|
| 6 |
+
transform='${dataset.eth3d_wai.test.transform}',
|
| 7 |
+
data_norm_type='${dataset.eth3d_wai.test.data_norm_type}',
|
| 8 |
+
ROOT='${dataset.eth3d_wai.test.ROOT}',
|
| 9 |
+
dataset_metadata_dir='${dataset.eth3d_wai.test.dataset_metadata_dir}',
|
| 10 |
+
variable_num_views=${dataset.eth3d_wai.test.variable_num_views},
|
| 11 |
+
num_views=${dataset.eth3d_wai.test.num_views},
|
| 12 |
+
covisibility_thres=${dataset.eth3d_wai.test.covisibility_thres})"
|
| 13 |
+
dataset_resolution: ${dataset.resolution_test_eth3d}
|
| 14 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 15 |
+
seed: 777
|
| 16 |
+
transform: 'imgnorm'
|
| 17 |
+
data_norm_type: ${model.data_norm_type}
|
| 18 |
+
ROOT: ${root_data_dir}/eth3d
|
| 19 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 20 |
+
variable_num_views: ${dataset.test.variable_num_views}
|
| 21 |
+
num_views: ${dataset.num_views}
|
| 22 |
+
covisibility_thres: 0.025
|
configs/dataset/gta_sfm_wai/default.yaml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- train: default
|
| 3 |
+
- val: default
|
configs/dataset/gta_sfm_wai/train/default.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"GTASfMWAI(
|
| 3 |
+
split='${dataset.gta_sfm_wai.train.split}',
|
| 4 |
+
resolution=${dataset.gta_sfm_wai.train.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.gta_sfm_wai.train.principal_point_centered},
|
| 6 |
+
aug_crop=${dataset.gta_sfm_wai.train.aug_crop},
|
| 7 |
+
transform='${dataset.gta_sfm_wai.train.transform}',
|
| 8 |
+
data_norm_type='${dataset.gta_sfm_wai.train.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.gta_sfm_wai.train.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.gta_sfm_wai.train.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.gta_sfm_wai.train.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.gta_sfm_wai.train.variable_num_views},
|
| 13 |
+
num_views=${dataset.gta_sfm_wai.train.num_views},
|
| 14 |
+
covisibility_thres=${dataset.gta_sfm_wai.train.covisibility_thres})"
|
| 15 |
+
split: 'train'
|
| 16 |
+
dataset_resolution: ${dataset.resolution_train}
|
| 17 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 18 |
+
aug_crop: 16
|
| 19 |
+
transform: 'colorjitter+grayscale+gaublur'
|
| 20 |
+
data_norm_type: ${model.data_norm_type}
|
| 21 |
+
ROOT: ${root_data_dir}/gta_sfm
|
| 22 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 23 |
+
overfit_num_sets: null
|
| 24 |
+
variable_num_views: ${dataset.train.variable_num_views}
|
| 25 |
+
num_views: ${dataset.num_views}
|
| 26 |
+
covisibility_thres: 0.25
|
configs/dataset/gta_sfm_wai/val/default.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"GTASfMWAI(
|
| 3 |
+
split='${dataset.gta_sfm_wai.val.split}',
|
| 4 |
+
resolution=${dataset.gta_sfm_wai.val.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.gta_sfm_wai.val.principal_point_centered},
|
| 6 |
+
seed=${dataset.gta_sfm_wai.val.seed},
|
| 7 |
+
transform='${dataset.gta_sfm_wai.val.transform}',
|
| 8 |
+
data_norm_type='${dataset.gta_sfm_wai.val.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.gta_sfm_wai.val.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.gta_sfm_wai.val.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.gta_sfm_wai.val.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.gta_sfm_wai.val.variable_num_views},
|
| 13 |
+
num_views=${dataset.gta_sfm_wai.val.num_views},
|
| 14 |
+
covisibility_thres=${dataset.gta_sfm_wai.val.covisibility_thres})"
|
| 15 |
+
split: 'val'
|
| 16 |
+
dataset_resolution: ${dataset.resolution_val_gta_sfm}
|
| 17 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 18 |
+
seed: 777
|
| 19 |
+
transform: 'imgnorm'
|
| 20 |
+
data_norm_type: ${model.data_norm_type}
|
| 21 |
+
ROOT: ${root_data_dir}/gta_sfm
|
| 22 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 23 |
+
overfit_num_sets: null
|
| 24 |
+
variable_num_views: ${dataset.val.variable_num_views}
|
| 25 |
+
num_views: ${dataset.num_views}
|
| 26 |
+
covisibility_thres: 0.25
|
configs/dataset/matrixcity_wai/default.yaml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- train: default
|
| 3 |
+
- val: default
|
configs/dataset/matrixcity_wai/train/default.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"MatrixCityWAI(
|
| 3 |
+
split='${dataset.matrixcity_wai.train.split}',
|
| 4 |
+
resolution=${dataset.matrixcity_wai.train.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.matrixcity_wai.train.principal_point_centered},
|
| 6 |
+
aug_crop=${dataset.matrixcity_wai.train.aug_crop},
|
| 7 |
+
transform='${dataset.matrixcity_wai.train.transform}',
|
| 8 |
+
data_norm_type='${dataset.matrixcity_wai.train.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.matrixcity_wai.train.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.matrixcity_wai.train.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.matrixcity_wai.train.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.matrixcity_wai.train.variable_num_views},
|
| 13 |
+
num_views=${dataset.matrixcity_wai.train.num_views},
|
| 14 |
+
covisibility_thres=${dataset.matrixcity_wai.train.covisibility_thres})"
|
| 15 |
+
split: 'train'
|
| 16 |
+
dataset_resolution: ${dataset.resolution_train}
|
| 17 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 18 |
+
aug_crop: 16
|
| 19 |
+
transform: 'colorjitter+grayscale+gaublur'
|
| 20 |
+
data_norm_type: ${model.data_norm_type}
|
| 21 |
+
ROOT: ${root_data_dir}/matrixcity
|
| 22 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 23 |
+
overfit_num_sets: null
|
| 24 |
+
variable_num_views: ${dataset.train.variable_num_views}
|
| 25 |
+
num_views: ${dataset.num_views}
|
| 26 |
+
covisibility_thres: 0.25
|
configs/dataset/matrixcity_wai/val/default.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"MatrixCityWAI(
|
| 3 |
+
split='${dataset.matrixcity_wai.val.split}',
|
| 4 |
+
resolution=${dataset.matrixcity_wai.val.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.matrixcity_wai.val.principal_point_centered},
|
| 6 |
+
seed=${dataset.matrixcity_wai.val.seed},
|
| 7 |
+
transform='${dataset.matrixcity_wai.val.transform}',
|
| 8 |
+
data_norm_type='${dataset.matrixcity_wai.val.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.matrixcity_wai.val.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.matrixcity_wai.val.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.matrixcity_wai.val.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.matrixcity_wai.val.variable_num_views},
|
| 13 |
+
num_views=${dataset.matrixcity_wai.val.num_views},
|
| 14 |
+
covisibility_thres=${dataset.matrixcity_wai.val.covisibility_thres})"
|
| 15 |
+
split: 'val'
|
| 16 |
+
dataset_resolution: ${dataset.resolution_val_matrixcity}
|
| 17 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 18 |
+
seed: 777
|
| 19 |
+
transform: 'imgnorm'
|
| 20 |
+
data_norm_type: ${model.data_norm_type}
|
| 21 |
+
ROOT: ${root_data_dir}/matrixcity
|
| 22 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 23 |
+
overfit_num_sets: null
|
| 24 |
+
variable_num_views: ${dataset.val.variable_num_views}
|
| 25 |
+
num_views: ${dataset.num_views}
|
| 26 |
+
covisibility_thres: 0.25
|
configs/dataset/megadepth_wai/default.yaml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- train: default
|
| 3 |
+
- val: default
|
configs/dataset/megadepth_wai/train/default.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"MegaDepthWAI(
|
| 3 |
+
split='${dataset.megadepth_wai.train.split}',
|
| 4 |
+
resolution=${dataset.megadepth_wai.train.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.megadepth_wai.train.principal_point_centered},
|
| 6 |
+
aug_crop=${dataset.megadepth_wai.train.aug_crop},
|
| 7 |
+
transform='${dataset.megadepth_wai.train.transform}',
|
| 8 |
+
data_norm_type='${dataset.megadepth_wai.train.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.megadepth_wai.train.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.megadepth_wai.train.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.megadepth_wai.train.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.megadepth_wai.train.variable_num_views},
|
| 13 |
+
num_views=${dataset.megadepth_wai.train.num_views},
|
| 14 |
+
covisibility_thres=${dataset.megadepth_wai.train.covisibility_thres})"
|
| 15 |
+
split: 'train'
|
| 16 |
+
dataset_resolution: ${dataset.resolution_train}
|
| 17 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 18 |
+
aug_crop: 16
|
| 19 |
+
transform: 'colorjitter+grayscale+gaublur'
|
| 20 |
+
data_norm_type: ${model.data_norm_type}
|
| 21 |
+
ROOT: ${root_data_dir}/megadepth
|
| 22 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 23 |
+
overfit_num_sets: null
|
| 24 |
+
variable_num_views: ${dataset.train.variable_num_views}
|
| 25 |
+
num_views: ${dataset.num_views}
|
| 26 |
+
covisibility_thres: 0.25
|
configs/dataset/megadepth_wai/val/default.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"MegaDepthWAI(
|
| 3 |
+
split='${dataset.megadepth_wai.val.split}',
|
| 4 |
+
resolution=${dataset.megadepth_wai.val.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.megadepth_wai.val.principal_point_centered},
|
| 6 |
+
seed=${dataset.megadepth_wai.val.seed},
|
| 7 |
+
transform='${dataset.megadepth_wai.val.transform}',
|
| 8 |
+
data_norm_type='${dataset.megadepth_wai.val.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.megadepth_wai.val.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.megadepth_wai.val.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.megadepth_wai.val.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.megadepth_wai.val.variable_num_views},
|
| 13 |
+
num_views=${dataset.megadepth_wai.val.num_views},
|
| 14 |
+
covisibility_thres=${dataset.megadepth_wai.val.covisibility_thres})"
|
| 15 |
+
split: 'val'
|
| 16 |
+
dataset_resolution: ${dataset.resolution_val_megadepth}
|
| 17 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 18 |
+
seed: 777
|
| 19 |
+
transform: 'imgnorm'
|
| 20 |
+
data_norm_type: ${model.data_norm_type}
|
| 21 |
+
ROOT: ${root_data_dir}/megadepth
|
| 22 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 23 |
+
overfit_num_sets: null
|
| 24 |
+
variable_num_views: ${dataset.val.variable_num_views}
|
| 25 |
+
num_views: ${dataset.num_views}
|
| 26 |
+
covisibility_thres: 0.25
|
configs/dataset/megatrain_11d_se_518_many_ar_48ipg_64g.yaml
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- default
|
| 3 |
+
|
| 4 |
+
# Number of views parameter for the multi-view datasets
|
| 5 |
+
num_views: 4
|
| 6 |
+
|
| 7 |
+
train:
|
| 8 |
+
# If True, the number of views can vary from batch to batch. The maximum number of views is num_views and minimum is 2. (On by default for N-view training)
|
| 9 |
+
variable_num_views: true
|
| 10 |
+
|
| 11 |
+
# Train Resolution
|
| 12 |
+
resolution_train: ${dataset.resolution_options.518_many_ar}
|
| 13 |
+
|
| 14 |
+
# Validation Resolution
|
| 15 |
+
resolution_val_ase: ${dataset.resolution_options.518_1_00_ar}
|
| 16 |
+
resolution_val_dl3dv: ${dataset.resolution_options.518_1_77_ar}
|
| 17 |
+
resolution_val_dynamicreplica: ${dataset.resolution_options.518_1_77_ar}
|
| 18 |
+
resolution_val_mpsd: ${dataset.resolution_options.518_1_77_ar}
|
| 19 |
+
resolution_val_mvs_synth: ${dataset.resolution_options.518_1_77_ar}
|
| 20 |
+
resolution_val_paralleldomain4d: ${dataset.resolution_options.518_1_33_ar}
|
| 21 |
+
resolution_val_sailvos3d: ${dataset.resolution_options.518_1_52_ar}
|
| 22 |
+
resolution_val_scannetpp: ${dataset.resolution_options.518_1_52_ar}
|
| 23 |
+
resolution_val_spring: ${dataset.resolution_options.518_1_77_ar}
|
| 24 |
+
resolution_val_tav2_wb: ${dataset.resolution_options.518_1_00_ar}
|
| 25 |
+
resolution_val_unrealstereo4k: ${dataset.resolution_options.518_1_77_ar}
|
| 26 |
+
|
| 27 |
+
# Training Set
|
| 28 |
+
train_dataset:
|
| 29 |
+
"+ 2_450_000 @ ${dataset.ase_wai.train.dataset_str}
|
| 30 |
+
+ 250_000 @ ${dataset.dl3dv_wai.train.dataset_str}
|
| 31 |
+
+ 12_400 @ ${dataset.dynamicreplica_wai.train.dataset_str}
|
| 32 |
+
+ 1_675_000 @ ${dataset.mpsd_wai.train.dataset_str}
|
| 33 |
+
+ 3_000 @ ${dataset.mvs_synth_wai.train.dataset_str}
|
| 34 |
+
+ 36_000 @ ${dataset.paralleldomain4d_wai.train.dataset_str}
|
| 35 |
+
+ 4_000 @ ${dataset.sailvos3d_wai.train.dataset_str}
|
| 36 |
+
+ 22_600 @ ${dataset.scannetpp_wai.train.dataset_str}
|
| 37 |
+
+ 800 @ ${dataset.spring_wai.train.dataset_str}
|
| 38 |
+
+ 4_000 @ ${dataset.tav2_wb_wai.train.dataset_str}
|
| 39 |
+
+ 200 @ ${dataset.unrealstereo4k_wai.train.dataset_str}"
|
| 40 |
+
|
| 41 |
+
# Validation Set
|
| 42 |
+
test_dataset:
|
| 43 |
+
"+ 4_000 @ ${dataset.ase_wai.val.dataset_str}
|
| 44 |
+
+ 4_000 @ ${dataset.dl3dv_wai.val.dataset_str}
|
| 45 |
+
+ 4_000 @ ${dataset.dynamicreplica_wai.val.dataset_str}
|
| 46 |
+
+ 4_000 @ ${dataset.mpsd_wai.val.dataset_str}
|
| 47 |
+
+ 4_000 @ ${dataset.mvs_synth_wai.val.dataset_str}
|
| 48 |
+
+ 4_000 @ ${dataset.paralleldomain4d_wai.val.dataset_str}
|
| 49 |
+
+ 4_000 @ ${dataset.sailvos3d_wai.val.dataset_str}
|
| 50 |
+
+ 4_000 @ ${dataset.scannetpp_wai.val.dataset_str}
|
| 51 |
+
+ 500 @ ${dataset.spring_wai.val.dataset_str}
|
| 52 |
+
+ 4_000 @ ${dataset.tav2_wb_wai.val.dataset_str}
|
| 53 |
+
+ 500 @ ${dataset.unrealstereo4k_wai.val.dataset_str}"
|
configs/dataset/megatrain_12d_518_many_ar_24ipg_16g.yaml
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- default
|
| 3 |
+
|
| 4 |
+
# Number of views parameter for the multi-view datasets
|
| 5 |
+
num_views: 4
|
| 6 |
+
|
| 7 |
+
train:
|
| 8 |
+
# If True, the number of views can vary from batch to batch. The maximum number of views is num_views and minimum is 2. (On by default for N-view training)
|
| 9 |
+
variable_num_views: true
|
| 10 |
+
|
| 11 |
+
# Train Resolution
|
| 12 |
+
resolution_train: ${dataset.resolution_options.518_many_ar}
|
| 13 |
+
|
| 14 |
+
# Validation Resolution
|
| 15 |
+
resolution_val_ase: ${dataset.resolution_options.518_1_00_ar}
|
| 16 |
+
resolution_val_blendedmvs: ${dataset.resolution_options.518_1_33_ar}
|
| 17 |
+
resolution_val_dynamicreplica: ${dataset.resolution_options.518_1_77_ar}
|
| 18 |
+
resolution_val_megadepth: ${dataset.resolution_options.518_1_52_ar}
|
| 19 |
+
resolution_val_mpsd: ${dataset.resolution_options.518_1_77_ar}
|
| 20 |
+
resolution_val_mvs_synth: ${dataset.resolution_options.518_1_77_ar}
|
| 21 |
+
resolution_val_paralleldomain4d: ${dataset.resolution_options.518_1_33_ar}
|
| 22 |
+
resolution_val_sailvos3d: ${dataset.resolution_options.518_1_52_ar}
|
| 23 |
+
resolution_val_scannetpp: ${dataset.resolution_options.518_1_52_ar}
|
| 24 |
+
resolution_val_spring: ${dataset.resolution_options.518_1_77_ar}
|
| 25 |
+
resolution_val_tav2_wb: ${dataset.resolution_options.518_1_00_ar}
|
| 26 |
+
resolution_val_unrealstereo4k: ${dataset.resolution_options.518_1_77_ar}
|
| 27 |
+
|
| 28 |
+
# Training Set
|
| 29 |
+
train_dataset:
|
| 30 |
+
"+ 58_000 @ ${dataset.ase_wai.train.dataset_str}
|
| 31 |
+
+ 58_000 @ ${dataset.blendedmvs_wai.train.dataset_str}
|
| 32 |
+
+ 45_000 @ ${dataset.dynamicreplica_wai.train.dataset_str}
|
| 33 |
+
+ 58_000 @ ${dataset.megadepth_wai.train.dataset_str}
|
| 34 |
+
+ 58_000 @ ${dataset.mpsd_wai.train.dataset_str}
|
| 35 |
+
+ 58_000 @ ${dataset.mvs_synth_wai.train.dataset_str}
|
| 36 |
+
+ 58_000 @ ${dataset.paralleldomain4d_wai.train.dataset_str}
|
| 37 |
+
+ 58_000 @ ${dataset.sailvos3d_wai.train.dataset_str}
|
| 38 |
+
+ 58_000 @ ${dataset.scannetpp_wai.train.dataset_str}
|
| 39 |
+
+ 2_000 @ ${dataset.spring_wai.train.dataset_str}
|
| 40 |
+
+ 58_000 @ ${dataset.tav2_wb_wai.train.dataset_str}
|
| 41 |
+
+ 5_500 @ ${dataset.unrealstereo4k_wai.train.dataset_str}"
|
| 42 |
+
|
| 43 |
+
# Validation Set
|
| 44 |
+
test_dataset:
|
| 45 |
+
"+ 4_000 @ ${dataset.ase_wai.val.dataset_str}
|
| 46 |
+
+ 4_000 @ ${dataset.blendedmvs_wai.val.dataset_str}
|
| 47 |
+
+ 4_000 @ ${dataset.dynamicreplica_wai.val.dataset_str}
|
| 48 |
+
+ 4_000 @ ${dataset.megadepth_wai.val.dataset_str}
|
| 49 |
+
+ 4_000 @ ${dataset.mpsd_wai.val.dataset_str}
|
| 50 |
+
+ 4_000 @ ${dataset.mvs_synth_wai.val.dataset_str}
|
| 51 |
+
+ 4_000 @ ${dataset.paralleldomain4d_wai.val.dataset_str}
|
| 52 |
+
+ 4_000 @ ${dataset.sailvos3d_wai.val.dataset_str}
|
| 53 |
+
+ 4_000 @ ${dataset.scannetpp_wai.val.dataset_str}
|
| 54 |
+
+ 500 @ ${dataset.spring_wai.val.dataset_str}
|
| 55 |
+
+ 4_000 @ ${dataset.tav2_wb_wai.val.dataset_str}
|
| 56 |
+
+ 500 @ ${dataset.unrealstereo4k_wai.val.dataset_str}"
|
configs/dataset/megatrain_13d_512_many_ar_24ipg_16g.yaml
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- default
|
| 3 |
+
|
| 4 |
+
# Number of views parameter for the multi-view datasets
|
| 5 |
+
num_views: 4
|
| 6 |
+
|
| 7 |
+
train:
|
| 8 |
+
# If True, the number of views can vary from batch to batch. The maximum number of views is num_views and minimum is 2. (On by default for N-view training)
|
| 9 |
+
variable_num_views: true
|
| 10 |
+
|
| 11 |
+
# Train Resolution
|
| 12 |
+
resolution_train: ${dataset.resolution_options.512_many_ar}
|
| 13 |
+
|
| 14 |
+
# Validation Resolution
|
| 15 |
+
resolution_val_ase: ${dataset.resolution_options.512_1_00_ar}
|
| 16 |
+
resolution_val_blendedmvs: ${dataset.resolution_options.512_1_33_ar}
|
| 17 |
+
resolution_val_dl3dv: ${dataset.resolution_options.512_1_77_ar}
|
| 18 |
+
resolution_val_dynamicreplica: ${dataset.resolution_options.512_1_77_ar}
|
| 19 |
+
resolution_val_megadepth: ${dataset.resolution_options.512_1_52_ar}
|
| 20 |
+
resolution_val_mpsd: ${dataset.resolution_options.512_1_77_ar}
|
| 21 |
+
resolution_val_mvs_synth: ${dataset.resolution_options.512_1_77_ar}
|
| 22 |
+
resolution_val_paralleldomain4d: ${dataset.resolution_options.512_1_33_ar}
|
| 23 |
+
resolution_val_sailvos3d: ${dataset.resolution_options.512_1_52_ar}
|
| 24 |
+
resolution_val_scannetpp: ${dataset.resolution_options.512_1_52_ar}
|
| 25 |
+
resolution_val_spring: ${dataset.resolution_options.512_1_77_ar}
|
| 26 |
+
resolution_val_tav2_wb: ${dataset.resolution_options.512_1_00_ar}
|
| 27 |
+
resolution_val_unrealstereo4k: ${dataset.resolution_options.512_1_77_ar}
|
| 28 |
+
|
| 29 |
+
# Training Set
|
| 30 |
+
train_dataset:
|
| 31 |
+
"+ 52_500 @ ${dataset.ase_wai.train.dataset_str}
|
| 32 |
+
+ 52_500 @ ${dataset.blendedmvs_wai.train.dataset_str}
|
| 33 |
+
+ 52_500 @ ${dataset.dl3dv_wai.train.dataset_str}
|
| 34 |
+
+ 40_000 @ ${dataset.dynamicreplica_wai.train.dataset_str}
|
| 35 |
+
+ 52_500 @ ${dataset.megadepth_wai.train.dataset_str}
|
| 36 |
+
+ 52_500 @ ${dataset.mpsd_wai.train.dataset_str}
|
| 37 |
+
+ 52_500 @ ${dataset.mvs_synth_wai.train.dataset_str}
|
| 38 |
+
+ 52_500 @ ${dataset.paralleldomain4d_wai.train.dataset_str}
|
| 39 |
+
+ 52_500 @ ${dataset.sailvos3d_wai.train.dataset_str}
|
| 40 |
+
+ 52_500 @ ${dataset.scannetpp_wai.train.dataset_str}
|
| 41 |
+
+ 2_000 @ ${dataset.spring_wai.train.dataset_str}
|
| 42 |
+
+ 52_500 @ ${dataset.tav2_wb_wai.train.dataset_str}
|
| 43 |
+
+ 5_500 @ ${dataset.unrealstereo4k_wai.train.dataset_str}"
|
| 44 |
+
|
| 45 |
+
# Validation Set
|
| 46 |
+
test_dataset:
|
| 47 |
+
"+ 4_000 @ ${dataset.ase_wai.val.dataset_str}
|
| 48 |
+
+ 4_000 @ ${dataset.blendedmvs_wai.val.dataset_str}
|
| 49 |
+
+ 4_000 @ ${dataset.dl3dv_wai.val.dataset_str}
|
| 50 |
+
+ 4_000 @ ${dataset.dynamicreplica_wai.val.dataset_str}
|
| 51 |
+
+ 4_000 @ ${dataset.megadepth_wai.val.dataset_str}
|
| 52 |
+
+ 4_000 @ ${dataset.mpsd_wai.val.dataset_str}
|
| 53 |
+
+ 4_000 @ ${dataset.mvs_synth_wai.val.dataset_str}
|
| 54 |
+
+ 4_000 @ ${dataset.paralleldomain4d_wai.val.dataset_str}
|
| 55 |
+
+ 4_000 @ ${dataset.sailvos3d_wai.val.dataset_str}
|
| 56 |
+
+ 4_000 @ ${dataset.scannetpp_wai.val.dataset_str}
|
| 57 |
+
+ 500 @ ${dataset.spring_wai.val.dataset_str}
|
| 58 |
+
+ 4_000 @ ${dataset.tav2_wb_wai.val.dataset_str}
|
| 59 |
+
+ 500 @ ${dataset.unrealstereo4k_wai.val.dataset_str}"
|
configs/dataset/megatrain_13d_518_many_ar_24ipg_16g.yaml
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- default
|
| 3 |
+
|
| 4 |
+
# Number of views parameter for the multi-view datasets
|
| 5 |
+
num_views: 4
|
| 6 |
+
|
| 7 |
+
train:
|
| 8 |
+
# If True, the number of views can vary from batch to batch. The maximum number of views is num_views and minimum is 2. (On by default for N-view training)
|
| 9 |
+
variable_num_views: true
|
| 10 |
+
|
| 11 |
+
# Train Resolution
|
| 12 |
+
resolution_train: ${dataset.resolution_options.518_many_ar}
|
| 13 |
+
|
| 14 |
+
# Validation Resolution
|
| 15 |
+
resolution_val_ase: ${dataset.resolution_options.518_1_00_ar}
|
| 16 |
+
resolution_val_blendedmvs: ${dataset.resolution_options.518_1_33_ar}
|
| 17 |
+
resolution_val_dl3dv: ${dataset.resolution_options.518_1_77_ar}
|
| 18 |
+
resolution_val_dynamicreplica: ${dataset.resolution_options.518_1_77_ar}
|
| 19 |
+
resolution_val_megadepth: ${dataset.resolution_options.518_1_52_ar}
|
| 20 |
+
resolution_val_mpsd: ${dataset.resolution_options.518_1_77_ar}
|
| 21 |
+
resolution_val_mvs_synth: ${dataset.resolution_options.518_1_77_ar}
|
| 22 |
+
resolution_val_paralleldomain4d: ${dataset.resolution_options.518_1_33_ar}
|
| 23 |
+
resolution_val_sailvos3d: ${dataset.resolution_options.518_1_52_ar}
|
| 24 |
+
resolution_val_scannetpp: ${dataset.resolution_options.518_1_52_ar}
|
| 25 |
+
resolution_val_spring: ${dataset.resolution_options.518_1_77_ar}
|
| 26 |
+
resolution_val_tav2_wb: ${dataset.resolution_options.518_1_00_ar}
|
| 27 |
+
resolution_val_unrealstereo4k: ${dataset.resolution_options.518_1_77_ar}
|
| 28 |
+
|
| 29 |
+
# Training Set
|
| 30 |
+
train_dataset:
|
| 31 |
+
"+ 52_500 @ ${dataset.ase_wai.train.dataset_str}
|
| 32 |
+
+ 52_500 @ ${dataset.blendedmvs_wai.train.dataset_str}
|
| 33 |
+
+ 52_500 @ ${dataset.dl3dv_wai.train.dataset_str}
|
| 34 |
+
+ 40_000 @ ${dataset.dynamicreplica_wai.train.dataset_str}
|
| 35 |
+
+ 52_500 @ ${dataset.megadepth_wai.train.dataset_str}
|
| 36 |
+
+ 52_500 @ ${dataset.mpsd_wai.train.dataset_str}
|
| 37 |
+
+ 52_500 @ ${dataset.mvs_synth_wai.train.dataset_str}
|
| 38 |
+
+ 52_500 @ ${dataset.paralleldomain4d_wai.train.dataset_str}
|
| 39 |
+
+ 52_500 @ ${dataset.sailvos3d_wai.train.dataset_str}
|
| 40 |
+
+ 52_500 @ ${dataset.scannetpp_wai.train.dataset_str}
|
| 41 |
+
+ 2_000 @ ${dataset.spring_wai.train.dataset_str}
|
| 42 |
+
+ 52_500 @ ${dataset.tav2_wb_wai.train.dataset_str}
|
| 43 |
+
+ 5_500 @ ${dataset.unrealstereo4k_wai.train.dataset_str}"
|
| 44 |
+
|
| 45 |
+
# Validation Set
|
| 46 |
+
test_dataset:
|
| 47 |
+
"+ 4_000 @ ${dataset.ase_wai.val.dataset_str}
|
| 48 |
+
+ 4_000 @ ${dataset.blendedmvs_wai.val.dataset_str}
|
| 49 |
+
+ 4_000 @ ${dataset.dl3dv_wai.val.dataset_str}
|
| 50 |
+
+ 4_000 @ ${dataset.dynamicreplica_wai.val.dataset_str}
|
| 51 |
+
+ 4_000 @ ${dataset.megadepth_wai.val.dataset_str}
|
| 52 |
+
+ 4_000 @ ${dataset.mpsd_wai.val.dataset_str}
|
| 53 |
+
+ 4_000 @ ${dataset.mvs_synth_wai.val.dataset_str}
|
| 54 |
+
+ 4_000 @ ${dataset.paralleldomain4d_wai.val.dataset_str}
|
| 55 |
+
+ 4_000 @ ${dataset.sailvos3d_wai.val.dataset_str}
|
| 56 |
+
+ 4_000 @ ${dataset.scannetpp_wai.val.dataset_str}
|
| 57 |
+
+ 500 @ ${dataset.spring_wai.val.dataset_str}
|
| 58 |
+
+ 4_000 @ ${dataset.tav2_wb_wai.val.dataset_str}
|
| 59 |
+
+ 500 @ ${dataset.unrealstereo4k_wai.val.dataset_str}"
|
configs/dataset/megatrain_13d_518_many_ar_48ipg_64g.yaml
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- default
|
| 3 |
+
|
| 4 |
+
# Number of views parameter for the multi-view datasets
|
| 5 |
+
num_views: 4
|
| 6 |
+
|
| 7 |
+
train:
|
| 8 |
+
# If True, the number of views can vary from batch to batch. The maximum number of views is num_views and minimum is 2. (On by default for N-view training)
|
| 9 |
+
variable_num_views: true
|
| 10 |
+
|
| 11 |
+
# Train Resolution
|
| 12 |
+
resolution_train: ${dataset.resolution_options.518_many_ar}
|
| 13 |
+
|
| 14 |
+
# Validation Resolution
|
| 15 |
+
resolution_val_ase: ${dataset.resolution_options.518_1_00_ar}
|
| 16 |
+
resolution_val_blendedmvs: ${dataset.resolution_options.518_1_33_ar}
|
| 17 |
+
resolution_val_dl3dv: ${dataset.resolution_options.518_1_77_ar}
|
| 18 |
+
resolution_val_dynamicreplica: ${dataset.resolution_options.518_1_77_ar}
|
| 19 |
+
resolution_val_megadepth: ${dataset.resolution_options.518_1_52_ar}
|
| 20 |
+
resolution_val_mpsd: ${dataset.resolution_options.518_1_77_ar}
|
| 21 |
+
resolution_val_mvs_synth: ${dataset.resolution_options.518_1_77_ar}
|
| 22 |
+
resolution_val_paralleldomain4d: ${dataset.resolution_options.518_1_33_ar}
|
| 23 |
+
resolution_val_sailvos3d: ${dataset.resolution_options.518_1_52_ar}
|
| 24 |
+
resolution_val_scannetpp: ${dataset.resolution_options.518_1_52_ar}
|
| 25 |
+
resolution_val_spring: ${dataset.resolution_options.518_1_77_ar}
|
| 26 |
+
resolution_val_tav2_wb: ${dataset.resolution_options.518_1_00_ar}
|
| 27 |
+
resolution_val_unrealstereo4k: ${dataset.resolution_options.518_1_77_ar}
|
| 28 |
+
|
| 29 |
+
# Training Set
|
| 30 |
+
train_dataset:
|
| 31 |
+
"+ 420_000 @ ${dataset.ase_wai.train.dataset_str}
|
| 32 |
+
+ 420_000 @ ${dataset.blendedmvs_wai.train.dataset_str}
|
| 33 |
+
+ 420_000 @ ${dataset.dl3dv_wai.train.dataset_str}
|
| 34 |
+
+ 320_000 @ ${dataset.dynamicreplica_wai.train.dataset_str}
|
| 35 |
+
+ 420_000 @ ${dataset.megadepth_wai.train.dataset_str}
|
| 36 |
+
+ 420_000 @ ${dataset.mpsd_wai.train.dataset_str}
|
| 37 |
+
+ 420_000 @ ${dataset.mvs_synth_wai.train.dataset_str}
|
| 38 |
+
+ 420_000 @ ${dataset.paralleldomain4d_wai.train.dataset_str}
|
| 39 |
+
+ 420_000 @ ${dataset.sailvos3d_wai.train.dataset_str}
|
| 40 |
+
+ 420_000 @ ${dataset.scannetpp_wai.train.dataset_str}
|
| 41 |
+
+ 16_000 @ ${dataset.spring_wai.train.dataset_str}
|
| 42 |
+
+ 420_000 @ ${dataset.tav2_wb_wai.train.dataset_str}
|
| 43 |
+
+ 44_000 @ ${dataset.unrealstereo4k_wai.train.dataset_str}"
|
| 44 |
+
|
| 45 |
+
# Validation Set
|
| 46 |
+
test_dataset:
|
| 47 |
+
"+ 4_000 @ ${dataset.ase_wai.val.dataset_str}
|
| 48 |
+
+ 4_000 @ ${dataset.blendedmvs_wai.val.dataset_str}
|
| 49 |
+
+ 4_000 @ ${dataset.dl3dv_wai.val.dataset_str}
|
| 50 |
+
+ 4_000 @ ${dataset.dynamicreplica_wai.val.dataset_str}
|
| 51 |
+
+ 4_000 @ ${dataset.megadepth_wai.val.dataset_str}
|
| 52 |
+
+ 4_000 @ ${dataset.mpsd_wai.val.dataset_str}
|
| 53 |
+
+ 4_000 @ ${dataset.mvs_synth_wai.val.dataset_str}
|
| 54 |
+
+ 4_000 @ ${dataset.paralleldomain4d_wai.val.dataset_str}
|
| 55 |
+
+ 4_000 @ ${dataset.sailvos3d_wai.val.dataset_str}
|
| 56 |
+
+ 4_000 @ ${dataset.scannetpp_wai.val.dataset_str}
|
| 57 |
+
+ 500 @ ${dataset.spring_wai.val.dataset_str}
|
| 58 |
+
+ 4_000 @ ${dataset.tav2_wb_wai.val.dataset_str}
|
| 59 |
+
+ 500 @ ${dataset.unrealstereo4k_wai.val.dataset_str}"
|
configs/dataset/megatrain_6d_518_many_ar_48ipg_64g.yaml
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- default
|
| 3 |
+
|
| 4 |
+
# Number of views parameter for the multi-view datasets
|
| 5 |
+
num_views: 4
|
| 6 |
+
|
| 7 |
+
train:
|
| 8 |
+
# If True, the number of views can vary from batch to batch. The maximum number of views is num_views and minimum is 2. (On by default for N-view training)
|
| 9 |
+
variable_num_views: true
|
| 10 |
+
|
| 11 |
+
# Train Resolution
|
| 12 |
+
resolution_train: ${dataset.resolution_options.518_many_ar}
|
| 13 |
+
|
| 14 |
+
# Validation Resolution
|
| 15 |
+
resolution_val_blendedmvs: ${dataset.resolution_options.518_1_33_ar}
|
| 16 |
+
resolution_val_mpsd: ${dataset.resolution_options.518_1_77_ar}
|
| 17 |
+
resolution_val_scannetpp: ${dataset.resolution_options.518_1_52_ar}
|
| 18 |
+
resolution_val_spring: ${dataset.resolution_options.518_1_77_ar}
|
| 19 |
+
resolution_val_tav2_wb: ${dataset.resolution_options.518_1_00_ar}
|
| 20 |
+
resolution_val_unrealstereo4k: ${dataset.resolution_options.518_1_77_ar}
|
| 21 |
+
|
| 22 |
+
# Training Set
|
| 23 |
+
train_dataset:
|
| 24 |
+
"+ 1_120_000 @ ${dataset.blendedmvs_wai.train.dataset_str}
|
| 25 |
+
+ 1_120_000 @ ${dataset.mpsd_wai.train.dataset_str}
|
| 26 |
+
+ 1_120_000 @ ${dataset.scannetpp_wai.train.dataset_str}
|
| 27 |
+
+ 44_000 @ ${dataset.spring_wai.train.dataset_str}
|
| 28 |
+
+ 1_120_000 @ ${dataset.tav2_wb_wai.train.dataset_str}
|
| 29 |
+
+ 116_000 @ ${dataset.unrealstereo4k_wai.train.dataset_str}"
|
| 30 |
+
|
| 31 |
+
# Validation Set
|
| 32 |
+
test_dataset:
|
| 33 |
+
"+ 4_000 @ ${dataset.blendedmvs_wai.val.dataset_str}
|
| 34 |
+
+ 4_000 @ ${dataset.mpsd_wai.val.dataset_str}
|
| 35 |
+
+ 4_000 @ ${dataset.scannetpp_wai.val.dataset_str}
|
| 36 |
+
+ 500 @ ${dataset.spring_wai.val.dataset_str}
|
| 37 |
+
+ 4_000 @ ${dataset.tav2_wb_wai.val.dataset_str}
|
| 38 |
+
+ 500 @ ${dataset.unrealstereo4k_wai.val.dataset_str}"
|
configs/dataset/megatrain_6d_518_many_ar_48ipg_8g.yaml
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- default
|
| 3 |
+
|
| 4 |
+
# Number of views parameter for the multi-view datasets
|
| 5 |
+
num_views: 4
|
| 6 |
+
|
| 7 |
+
train:
|
| 8 |
+
# If True, the number of views can vary from batch to batch. The maximum number of views is num_views and minimum is 2. (On by default for N-view training)
|
| 9 |
+
variable_num_views: true
|
| 10 |
+
|
| 11 |
+
# Train Resolution
|
| 12 |
+
resolution_train: ${dataset.resolution_options.518_many_ar}
|
| 13 |
+
|
| 14 |
+
# Validation Resolution
|
| 15 |
+
resolution_val_blendedmvs: ${dataset.resolution_options.518_1_33_ar}
|
| 16 |
+
resolution_val_mpsd: ${dataset.resolution_options.518_1_77_ar}
|
| 17 |
+
resolution_val_scannetpp: ${dataset.resolution_options.518_1_52_ar}
|
| 18 |
+
resolution_val_spring: ${dataset.resolution_options.518_1_77_ar}
|
| 19 |
+
resolution_val_tav2_wb: ${dataset.resolution_options.518_1_00_ar}
|
| 20 |
+
resolution_val_unrealstereo4k: ${dataset.resolution_options.518_1_77_ar}
|
| 21 |
+
|
| 22 |
+
# Training Set
|
| 23 |
+
train_dataset:
|
| 24 |
+
"+ 140_000 @ ${dataset.blendedmvs_wai.train.dataset_str}
|
| 25 |
+
+ 140_000 @ ${dataset.mpsd_wai.train.dataset_str}
|
| 26 |
+
+ 140_000 @ ${dataset.scannetpp_wai.train.dataset_str}
|
| 27 |
+
+ 5_500 @ ${dataset.spring_wai.train.dataset_str}
|
| 28 |
+
+ 140_000 @ ${dataset.tav2_wb_wai.train.dataset_str}
|
| 29 |
+
+ 14_500 @ ${dataset.unrealstereo4k_wai.train.dataset_str}"
|
| 30 |
+
|
| 31 |
+
# Validation Set
|
| 32 |
+
test_dataset:
|
| 33 |
+
"+ 4_000 @ ${dataset.blendedmvs_wai.val.dataset_str}
|
| 34 |
+
+ 4_000 @ ${dataset.mpsd_wai.val.dataset_str}
|
| 35 |
+
+ 4_000 @ ${dataset.scannetpp_wai.val.dataset_str}
|
| 36 |
+
+ 500 @ ${dataset.spring_wai.val.dataset_str}
|
| 37 |
+
+ 4_000 @ ${dataset.tav2_wb_wai.val.dataset_str}
|
| 38 |
+
+ 500 @ ${dataset.unrealstereo4k_wai.val.dataset_str}"
|