Add files using upload-large-folder tool
Browse files- .gitattributes +1 -0
- README.md +80 -0
- SAT_static.parquet +3 -0
- SAT_test.parquet +3 -0
- SAT_train.parquet +3 -0
- SAT_val.parquet +3 -0
- images_chunk_aa.tar.gz +3 -0
- images_chunk_ab.tar.gz +3 -0
- images_chunk_ac.tar.gz +3 -0
- images_chunk_ad.tar.gz +3 -0
- images_chunk_ae.tar.gz +3 -0
- images_chunk_af.tar.gz +3 -0
- images_chunk_ag.tar.gz +3 -0
- images_chunk_ah.tar.gz +3 -0
- images_chunk_ai.tar.gz +3 -0
- images_chunk_aj.tar.gz +3 -0
- images_chunk_ak.tar.gz +3 -0
- images_chunk_al.tar.gz +3 -0
- images_chunk_am.tar.gz +3 -0
- images_chunk_an.tar.gz +3 -0
- images_chunk_ao.tar.gz +3 -0
- images_chunk_ap.tar.gz +3 -0
- images_chunk_aq.tar.gz +3 -0
- images_chunk_ar.tar.gz +3 -0
- images_chunk_as.tar.gz +3 -0
- images_chunk_at.tar.gz +3 -0
- scripts/filter.py +170 -0
- scripts/load.py +34 -0
- scripts/process.py +58 -0
- scripts/split.py +90 -0
- scripts/to_convs.py +77 -0
- train_data_processed.json +3 -0
.gitattributes
CHANGED
|
@@ -57,3 +57,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 57 |
# Video files - compressed
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 57 |
# Video files - compressed
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 60 |
+
train_data_processed.json filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: mit
|
| 3 |
+
configs:
|
| 4 |
+
- config_name: default
|
| 5 |
+
data_files:
|
| 6 |
+
- split: train
|
| 7 |
+
path: "SAT_train.parquet"
|
| 8 |
+
- split: static
|
| 9 |
+
path: "SAT_static.parquet"
|
| 10 |
+
- split: val
|
| 11 |
+
path: "SAT_val.parquet"
|
| 12 |
+
- split: test
|
| 13 |
+
path: "SAT_test.parquet"
|
| 14 |
+
dataset_info:
|
| 15 |
+
features:
|
| 16 |
+
- name: image_bytes
|
| 17 |
+
list:
|
| 18 |
+
dtype: image
|
| 19 |
+
- name: question
|
| 20 |
+
dtype: string
|
| 21 |
+
- name: answers
|
| 22 |
+
list:
|
| 23 |
+
dtype: string
|
| 24 |
+
- name: question_type
|
| 25 |
+
dtype: string
|
| 26 |
+
- name: correct_answer
|
| 27 |
+
dtype: string
|
| 28 |
+
task_categories:
|
| 29 |
+
- question-answering
|
| 30 |
+
size_categories:
|
| 31 |
+
- 100K<n<1M
|
| 32 |
+
---
|
| 33 |
+
# SAT: Spatial Aptitude Training for Multimodal Language Models
|
| 34 |
+
|
| 35 |
+
[Project Page](https://arijitray1993.github.io/SAT/)
|
| 36 |
+
|
| 37 |
+

|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
To use the dataset, first make sure you have Python3.10 and Huggingface datasets version 3.0.2 (`pip install datasets==3.0.2`):
|
| 41 |
+
|
| 42 |
+
```python
|
| 43 |
+
from datasets import load_dataset
|
| 44 |
+
import io
|
| 45 |
+
|
| 46 |
+
split = "val"
|
| 47 |
+
dataset = load_dataset("array/SAT", batch_size=128)
|
| 48 |
+
|
| 49 |
+
example = dataset[split][10] # example 10th item
|
| 50 |
+
|
| 51 |
+
images = [Image.open(io.BytesIO(im_bytes)) for im_bytes in example['image_bytes']] # this is a list of images. Some questions are on one image, and some on 2 images
|
| 52 |
+
|
| 53 |
+
question = example['question']
|
| 54 |
+
answer_choices = example['answers']
|
| 55 |
+
correct_answer = example['correct_answer']
|
| 56 |
+
|
| 57 |
+
```
|
| 58 |
+
|
| 59 |
+
The available `split` choices are:
|
| 60 |
+
- `train`: (175K image QA pairs) Train split of SAT data that includes both static relationships and dyamic spatial QAs involving object and scene motion. For motion-based questions, there are two images.
|
| 61 |
+
- `static`: (127K image QA pairs) Train split of SAT data that includes _only_ static QAs. Always has one image only.
|
| 62 |
+
- `val`: (4K image QA pairs) Synthetic validation split.
|
| 63 |
+
- `test`: (150 image QA pairs) Real-image dynamic test set.
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
If you find this data useful, please consider citing:
|
| 69 |
+
|
| 70 |
+
```
|
| 71 |
+
@misc{ray2025satdynamicspatialaptitude,
|
| 72 |
+
title={SAT: Dynamic Spatial Aptitude Training for Multimodal Language Models},
|
| 73 |
+
author={Arijit Ray and Jiafei Duan and Ellis Brown and Reuben Tan and Dina Bashkirova and Rose Hendrix and Kiana Ehsani and Aniruddha Kembhavi and Bryan A. Plummer and Ranjay Krishna and Kuo-Hao Zeng and Kate Saenko},
|
| 74 |
+
year={2025},
|
| 75 |
+
eprint={2412.07755},
|
| 76 |
+
archivePrefix={arXiv},
|
| 77 |
+
primaryClass={cs.CV},
|
| 78 |
+
url={https://arxiv.org/abs/2412.07755},
|
| 79 |
+
}
|
| 80 |
+
```
|
SAT_static.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:73df7e5859d24a718c50067941e79418fef45f37d2df08a133385852aecab0a1
|
| 3 |
+
size 2633068260
|
SAT_test.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:eea844ab8c45da93254a676d0f79d2d4e5d310489fa55cf992aaa73e7f567be2
|
| 3 |
+
size 88172845
|
SAT_train.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7103623171c5270a633d7ba9bc646b61bb4401e3efe3b9d1dc441762e082821a
|
| 3 |
+
size 4558508645
|
SAT_val.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3e606d36368a54106cd02314660adadf40d2587a914ce565798141e0357483e1
|
| 3 |
+
size 283473698
|
images_chunk_aa.tar.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c4428df31da2a981d8c3305b98a08dead27eee646e80752790012a981152e86d
|
| 3 |
+
size 79178334
|
images_chunk_ab.tar.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:512ca6e89af8abf23c39e96cfee3d9e30ebc9fc10d26e7af417a305ba2d646b8
|
| 3 |
+
size 81538144
|
images_chunk_ac.tar.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0928fee5e5e6fbf99cf58a4ba2137ab2b73a73764a7213984e484878033896a5
|
| 3 |
+
size 80055103
|
images_chunk_ad.tar.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:85deee23a474391a41cd642603c7e0a8746ded86e4fd6ab685634091e4ab0d6a
|
| 3 |
+
size 80077294
|
images_chunk_ae.tar.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5031e1ea2987d134eacdde8db25d9f4c3e7b8881f60fb740dd2f291087503ea1
|
| 3 |
+
size 83134603
|
images_chunk_af.tar.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:165c750be851ce69d4b10293a538f48e0b493da46157d2ca927c42de57497e39
|
| 3 |
+
size 86399902
|
images_chunk_ag.tar.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:250ea530b2de18766280de8e952c833735459886f0ecae8538c6461ece37f0bf
|
| 3 |
+
size 65611363
|
images_chunk_ah.tar.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7f1aa54d6a58a858ca097ee9f9ebd834c4658efc464b3fee94cb80018e9a0982
|
| 3 |
+
size 82489626
|
images_chunk_ai.tar.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:19e174037345282c32a8bd29c8dacd38b69211379c9032afb8eae8ef0f893966
|
| 3 |
+
size 62063325
|
images_chunk_aj.tar.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:83eacbad05be74fe5d12ae8ef7ce8ba7885f7db8937ddc2bca55745fe82db9b5
|
| 3 |
+
size 71940190
|
images_chunk_ak.tar.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:70beb7a8f6bfe432d8476270b0c0fd38991b32941329b503686f8d398dc0f609
|
| 3 |
+
size 79643502
|
images_chunk_al.tar.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4971201ad4ce67c798f1d32244cd7a4990cfc95fc40b362353d51eebac04fc45
|
| 3 |
+
size 77992822
|
images_chunk_am.tar.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:82a89871e0caaa3ebaa5ea5063f53960eb0f24441d82f82170d3aca8b214e042
|
| 3 |
+
size 95665827
|
images_chunk_an.tar.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4afbcd7050f04558fda4d73cfcd473b5bd4004c941783928d2579930633afbd8
|
| 3 |
+
size 147772588
|
images_chunk_ao.tar.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1ee41c8196fa4e53ac25653c6f04f46638a05c345c72df5c9581b97003308859
|
| 3 |
+
size 142916708
|
images_chunk_ap.tar.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:607f6cdf9e07c4d24a67cb1e3114395ad0dfba6e4c40247fdbf08b58970fa444
|
| 3 |
+
size 142697040
|
images_chunk_aq.tar.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:14009fb88ce3c9b82175ebfbae892970695bbd9871884e938c4c9e5d794101d9
|
| 3 |
+
size 147602454
|
images_chunk_ar.tar.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d2f48359f319a1609097964661ca34d30218ed9b2674bc62f2a22442ad8d0ed6
|
| 3 |
+
size 161153944
|
images_chunk_as.tar.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:16bbe025bc6891874409e3c2a9748ee6430f06273327568f885073c26d51f4c6
|
| 3 |
+
size 207823562
|
images_chunk_at.tar.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0bcba901d98b25986d1a610077b51cd87b43996671d8bdc49d0111c366d3f2c4
|
| 3 |
+
size 131770184
|
scripts/filter.py
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
from tqdm import tqdm
|
| 5 |
+
|
| 6 |
+
def check_json_data(json_file_path, images_root_dir):
|
| 7 |
+
"""检查JSON数据的完整性 - 只过滤有image字段但图像不存在的数据"""
|
| 8 |
+
|
| 9 |
+
# 加载JSON文件
|
| 10 |
+
try:
|
| 11 |
+
with open(json_file_path, 'r', encoding='utf-8') as f:
|
| 12 |
+
data = json.load(f)
|
| 13 |
+
except Exception as e:
|
| 14 |
+
print(f"加载JSON文件失败: {e}")
|
| 15 |
+
return []
|
| 16 |
+
|
| 17 |
+
anomalous_data = []
|
| 18 |
+
total_entries = len(data)
|
| 19 |
+
|
| 20 |
+
print(f"开始检查 {total_entries} 条数据...")
|
| 21 |
+
|
| 22 |
+
for i, entry in enumerate(tqdm(data, desc="检查图像文件", unit="条")):
|
| 23 |
+
issues = []
|
| 24 |
+
|
| 25 |
+
# 只检查image字段相关的问题
|
| 26 |
+
if 'image' in entry:
|
| 27 |
+
image_path = entry['image']
|
| 28 |
+
# if not image_path or image_path.strip() == '':
|
| 29 |
+
# issues.append("图像路径为空")
|
| 30 |
+
# else:
|
| 31 |
+
# # 检查图像文件是否存在
|
| 32 |
+
# if images_root_dir:
|
| 33 |
+
# # 构建完整的图像文件路径
|
| 34 |
+
# full_image_path = Path(images_root_dir) / image_path
|
| 35 |
+
# else:
|
| 36 |
+
# full_image_path = Path(json_file_path).parent / image_path
|
| 37 |
+
# if not full_image_path.exists():
|
| 38 |
+
# issues.append(f"图像文件不存在: {image_path}")
|
| 39 |
+
|
| 40 |
+
if len(image_path) == 0:
|
| 41 |
+
issues.append("图像路径为空")
|
| 42 |
+
else:
|
| 43 |
+
for img in image_path:
|
| 44 |
+
if not Path(img).exists():
|
| 45 |
+
issues.append(f"图像文件不存在: {img}")
|
| 46 |
+
|
| 47 |
+
# 如果有问题,记录异常数据
|
| 48 |
+
if issues:
|
| 49 |
+
anomalous_entry = {
|
| 50 |
+
'index': i,
|
| 51 |
+
'id': entry.get('id', 'Unknown'),
|
| 52 |
+
'image': entry.get('image', 'N/A'),
|
| 53 |
+
'entry': entry,
|
| 54 |
+
'issues': issues
|
| 55 |
+
}
|
| 56 |
+
anomalous_data.append(anomalous_entry)
|
| 57 |
+
|
| 58 |
+
return anomalous_data
|
| 59 |
+
|
| 60 |
+
def save_valid_data(json_file_path, anomalous_data, output_file='filtered_data.json'):
|
| 61 |
+
"""保存过滤后的有效数据"""
|
| 62 |
+
# 加载原始数据
|
| 63 |
+
with open(json_file_path, 'r', encoding='utf-8') as f:
|
| 64 |
+
original_data = json.load(f)
|
| 65 |
+
|
| 66 |
+
# 获取异常数据的索引
|
| 67 |
+
anomalous_indices = {anomaly['index'] for anomaly in anomalous_data}
|
| 68 |
+
|
| 69 |
+
# 过滤出有效数据
|
| 70 |
+
valid_data = [entry for i, entry in enumerate(original_data) if i not in anomalous_indices]
|
| 71 |
+
|
| 72 |
+
# 保存有效数据
|
| 73 |
+
with open(output_file, 'w', encoding='utf-8') as f:
|
| 74 |
+
json.dump(valid_data, f, indent=2, ensure_ascii=False)
|
| 75 |
+
|
| 76 |
+
print(f"过滤后的有效数据已保存到: {output_file}")
|
| 77 |
+
print(f"原始数据: {len(original_data)} 条")
|
| 78 |
+
print(f"有效数据: {len(valid_data)} 条")
|
| 79 |
+
print(f"过滤掉: {len(anomalous_data)} 条")
|
| 80 |
+
|
| 81 |
+
def save_anomalous_data(anomalous_data, output_file='anomalous_data.json'):
|
| 82 |
+
"""保存异常数据到文件"""
|
| 83 |
+
if anomalous_data:
|
| 84 |
+
with open(output_file, 'w', encoding='utf-8') as f:
|
| 85 |
+
json.dump(anomalous_data, f, indent=2, ensure_ascii=False)
|
| 86 |
+
print(f"异常数据已保存到: {output_file}")
|
| 87 |
+
else:
|
| 88 |
+
print("没有异常数据需要保存")
|
| 89 |
+
|
| 90 |
+
def print_statistics(json_file_path, anomalous_data):
|
| 91 |
+
"""打印统计信息"""
|
| 92 |
+
with open(json_file_path, 'r', encoding='utf-8') as f:
|
| 93 |
+
data = json.load(f)
|
| 94 |
+
|
| 95 |
+
total_entries = len(data)
|
| 96 |
+
anomalous_count = len(anomalous_data)
|
| 97 |
+
|
| 98 |
+
print(f"\n=== 数据统计 ===")
|
| 99 |
+
print(f"总条目数: {total_entries}")
|
| 100 |
+
print(f"图像异常条目数: {anomalous_count}")
|
| 101 |
+
print(f"有效条目数: {total_entries - anomalous_count}")
|
| 102 |
+
print(f"数据有效率: {((total_entries - anomalous_count) / total_entries * 100):.2f}%")
|
| 103 |
+
|
| 104 |
+
# 字段存在统计
|
| 105 |
+
fields_stats = {
|
| 106 |
+
'id': sum(1 for entry in data if 'id' in entry),
|
| 107 |
+
'image': sum(1 for entry in data if 'image' in entry),
|
| 108 |
+
'conversations': sum(1 for entry in data if 'conversations' in entry)
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
print(f"\n=== 字段存在统计 ===")
|
| 112 |
+
for field, count in fields_stats.items():
|
| 113 |
+
print(f"包含'{field}'字段的条目: {count}/{total_entries} ({count/total_entries*100:.1f}%)")
|
| 114 |
+
|
| 115 |
+
# 主执行函数
|
| 116 |
+
def main():
|
| 117 |
+
json_file = 'train_data_processed.json'
|
| 118 |
+
images_root_dir = 'images'
|
| 119 |
+
|
| 120 |
+
# 检查文件是否存在
|
| 121 |
+
if not os.path.exists(json_file):
|
| 122 |
+
print(f"文件不存在: {json_file}")
|
| 123 |
+
return
|
| 124 |
+
|
| 125 |
+
# 检查数据
|
| 126 |
+
anomalous_data = check_json_data(json_file, images_root_dir)
|
| 127 |
+
|
| 128 |
+
# 输出结果
|
| 129 |
+
if anomalous_data:
|
| 130 |
+
print(f"\n发现 {len(anomalous_data)} 条图像异常数据:")
|
| 131 |
+
print("=" * 50)
|
| 132 |
+
|
| 133 |
+
# 只显示前10条异常数据,避免输出过多
|
| 134 |
+
for anomaly in anomalous_data[:10]:
|
| 135 |
+
print(f"索引: {anomaly['index']}")
|
| 136 |
+
print(f"ID: {anomaly['id']}")
|
| 137 |
+
print(f"图像: {anomaly['image']}")
|
| 138 |
+
print(f"问题: {', '.join(anomaly['issues'])}")
|
| 139 |
+
print("-" * 30)
|
| 140 |
+
|
| 141 |
+
if len(anomalous_data) > 10:
|
| 142 |
+
print(f"... 还有 {len(anomalous_data) - 10} 条异常数据")
|
| 143 |
+
|
| 144 |
+
# 保存异常数据
|
| 145 |
+
# save_anomalous_data(anomalous_data)
|
| 146 |
+
|
| 147 |
+
# 保存过滤后的有效数据
|
| 148 |
+
# save_valid_data(json_file, anomalous_data)
|
| 149 |
+
|
| 150 |
+
# 按问题类型统计
|
| 151 |
+
issue_counts = {}
|
| 152 |
+
for anomaly in anomalous_data:
|
| 153 |
+
for issue in anomaly['issues']:
|
| 154 |
+
issue_type = issue.split(':')[0] if ':' in issue else issue
|
| 155 |
+
issue_counts[issue_type] = issue_counts.get(issue_type, 0) + 1
|
| 156 |
+
|
| 157 |
+
print(f"\n=== 问题类型统计 ===")
|
| 158 |
+
for issue_type, count in sorted(issue_counts.items()):
|
| 159 |
+
print(f"{issue_type}: {count}次")
|
| 160 |
+
|
| 161 |
+
else:
|
| 162 |
+
print("✅ 所有数据的图像文件都正常!")
|
| 163 |
+
# 即使没有异常数据,也保存一份完整的数据
|
| 164 |
+
# save_valid_data(json_file, anomalous_data, 'filtered_filtered_data.json')
|
| 165 |
+
|
| 166 |
+
# 打印统计信息
|
| 167 |
+
print_statistics(json_file, anomalous_data)
|
| 168 |
+
|
| 169 |
+
if __name__ == "__main__":
|
| 170 |
+
main()
|
scripts/load.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datasets import load_dataset
|
| 2 |
+
import io
|
| 3 |
+
from PIL import Image
|
| 4 |
+
|
| 5 |
+
split = "val"
|
| 6 |
+
# # 修改为直接加载单个文件
|
| 7 |
+
# dataset = load_dataset(
|
| 8 |
+
# "parquet",
|
| 9 |
+
# data_files="/mnt/dolphinfs/ssd_pool/docker/user/hadoop-mlm-hl/hadoop-mlm/common/spatial_data/spatial_relation/SAT/SAT_train.parquet",
|
| 10 |
+
# streaming=False
|
| 11 |
+
# )
|
| 12 |
+
dataset = load_dataset(
|
| 13 |
+
"/mnt/dolphinfs/ssd_pool/docker/user/hadoop-mlm-hl/hadoop-mlm/common/spatial_data/spatial_relation/SAT",
|
| 14 |
+
data_files={
|
| 15 |
+
"train": "SAT_train.parquet",
|
| 16 |
+
"validation": "SAT_val.parquet",
|
| 17 |
+
},
|
| 18 |
+
batch_size=128,
|
| 19 |
+
)
|
| 20 |
+
# 注意:现在 dataset 是一个 DatasetDict,需要选择对应的split(通常是'train')
|
| 21 |
+
print(dataset)
|
| 22 |
+
# train_dataset = dataset['train']
|
| 23 |
+
|
| 24 |
+
# example = train_dataset[10] # example 10th item
|
| 25 |
+
# print(example)
|
| 26 |
+
|
| 27 |
+
# images = example['image_bytes']
|
| 28 |
+
|
| 29 |
+
# question = example['question']
|
| 30 |
+
# print(question)
|
| 31 |
+
# answer_choices = example['answers']
|
| 32 |
+
# print(answer_choices)
|
| 33 |
+
# correct_answer = example['correct_answer']
|
| 34 |
+
# print(correct_answer)
|
scripts/process.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datasets import load_dataset
|
| 2 |
+
import io
|
| 3 |
+
from PIL import Image
|
| 4 |
+
import json
|
| 5 |
+
import os
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
|
| 8 |
+
# 创建保存图像的目录
|
| 9 |
+
os.makedirs("images", exist_ok=True)
|
| 10 |
+
|
| 11 |
+
dataset = load_dataset(
|
| 12 |
+
"/mnt/dolphinfs/ssd_pool/docker/user/hadoop-mlm-hl/hadoop-mlm/common/spatial_data/spatial_relation/SAT",
|
| 13 |
+
data_files={
|
| 14 |
+
"train": "SAT_train.parquet",
|
| 15 |
+
"validation": "SAT_val.parquet",
|
| 16 |
+
},
|
| 17 |
+
batch_size=128,
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
def process_dataset(dataset, split_name):
|
| 21 |
+
processed_data = []
|
| 22 |
+
|
| 23 |
+
for i, example in enumerate(tqdm(dataset[split_name])):
|
| 24 |
+
# 保存图像到本地
|
| 25 |
+
image_paths = []
|
| 26 |
+
|
| 27 |
+
# 计算子目录路径(每1000张图像一个目录)
|
| 28 |
+
subdir_num = i // 1000
|
| 29 |
+
subdir_path = os.path.join("images", split_name, f"{subdir_num:03d}")
|
| 30 |
+
os.makedirs(subdir_path, exist_ok=True)
|
| 31 |
+
|
| 32 |
+
for j, img in enumerate(example['image_bytes']):
|
| 33 |
+
# 生成唯一的图像文件名
|
| 34 |
+
img_filename = f"{split_name}_{i:06d}_{j}.jpg"
|
| 35 |
+
img_path = os.path.join(subdir_path, img_filename)
|
| 36 |
+
img.save(img_path)
|
| 37 |
+
image_paths.append(img_path)
|
| 38 |
+
|
| 39 |
+
# 创建新的数据样本
|
| 40 |
+
processed_example = {
|
| 41 |
+
'image': image_paths,
|
| 42 |
+
'question': example['question'],
|
| 43 |
+
'answers': example['answers'],
|
| 44 |
+
'question_type': example['question_type'],
|
| 45 |
+
'correct_answer': example['correct_answer']
|
| 46 |
+
}
|
| 47 |
+
processed_data.append(processed_example)
|
| 48 |
+
|
| 49 |
+
# 保存为JSON文件
|
| 50 |
+
output_file = f"{split_name}_data.json"
|
| 51 |
+
with open(output_file, 'w', encoding='utf-8') as f:
|
| 52 |
+
json.dump(processed_data, f, ensure_ascii=False, indent=2)
|
| 53 |
+
|
| 54 |
+
print(f"Saved {len(processed_data)} examples to {output_file}")
|
| 55 |
+
|
| 56 |
+
# 处理训练集和验证集
|
| 57 |
+
process_dataset(dataset, "train")
|
| 58 |
+
process_dataset(dataset, "validation")
|
scripts/split.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
def process_dataset(input_file, output_file, base_path="."):
|
| 5 |
+
"""
|
| 6 |
+
处理数据集,只保留image和conversations字段,添加id字段,转换图像路径为绝对路径
|
| 7 |
+
|
| 8 |
+
Args:
|
| 9 |
+
input_file: 输入JSON文件路径
|
| 10 |
+
output_file: 输出JSON文件路径
|
| 11 |
+
base_path: 图像文件的基础路径
|
| 12 |
+
"""
|
| 13 |
+
# 读取原始数据
|
| 14 |
+
with open(input_file, 'r', encoding='utf-8') as f:
|
| 15 |
+
input_data = json.load(f)
|
| 16 |
+
|
| 17 |
+
processed_data = []
|
| 18 |
+
|
| 19 |
+
for item in input_data:
|
| 20 |
+
# 获取所有图像路径并转换为绝对路径
|
| 21 |
+
absolute_image_paths = []
|
| 22 |
+
for image_path in item["image"]:
|
| 23 |
+
absolute_path = os.path.abspath(os.path.join(base_path, image_path))
|
| 24 |
+
absolute_image_paths.append(absolute_path)
|
| 25 |
+
|
| 26 |
+
# 提取第一张图像名作为id(不包含扩展名)
|
| 27 |
+
first_image_name = os.path.basename(item["image"][0])
|
| 28 |
+
image_id = os.path.splitext(first_image_name)[0]
|
| 29 |
+
|
| 30 |
+
img_len = len(item["image"])
|
| 31 |
+
|
| 32 |
+
item["conversations"][0]['value'] = "<image>\n"*img_len + item["conversations"][0]['value']
|
| 33 |
+
|
| 34 |
+
# 构建新的数据项
|
| 35 |
+
new_item = {
|
| 36 |
+
"id": image_id,
|
| 37 |
+
"image": absolute_image_paths, # 保存所有图像的绝对路径
|
| 38 |
+
"conversations": item["conversations"]
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
processed_data.append(new_item)
|
| 42 |
+
|
| 43 |
+
# 保存处理后的数据
|
| 44 |
+
with open(output_file, 'w', encoding='utf-8') as f:
|
| 45 |
+
json.dump(processed_data, f, indent=2, ensure_ascii=False)
|
| 46 |
+
|
| 47 |
+
print(f"处理完成!共处理 {len(processed_data)} 条数据")
|
| 48 |
+
print(f"输出文件:{output_file}")
|
| 49 |
+
|
| 50 |
+
def process_data_directly(input_data, base_path="."):
|
| 51 |
+
"""
|
| 52 |
+
直接处理数据列表(如果数据已经在内存中)
|
| 53 |
+
|
| 54 |
+
Args:
|
| 55 |
+
input_data: 原始数据列表
|
| 56 |
+
base_path: 图像文件的基础路径
|
| 57 |
+
|
| 58 |
+
Returns:
|
| 59 |
+
处理后的数据列表
|
| 60 |
+
"""
|
| 61 |
+
processed_data = []
|
| 62 |
+
|
| 63 |
+
for item in input_data:
|
| 64 |
+
# 获取图像路径
|
| 65 |
+
image_path = item["image"][0]
|
| 66 |
+
|
| 67 |
+
# 提取图像名作为id
|
| 68 |
+
image_name = os.path.basename(image_path)
|
| 69 |
+
image_id = os.path.splitext(image_name)[0]
|
| 70 |
+
|
| 71 |
+
# 转换为绝对路径
|
| 72 |
+
absolute_image_path = os.path.abspath(os.path.join(base_path, image_path))
|
| 73 |
+
|
| 74 |
+
# 构建新的数据项
|
| 75 |
+
new_item = {
|
| 76 |
+
"id": image_id,
|
| 77 |
+
"image": [absolute_image_path],
|
| 78 |
+
"conversations": item["conversations"]
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
processed_data.append(new_item)
|
| 82 |
+
|
| 83 |
+
return processed_data
|
| 84 |
+
|
| 85 |
+
# 示例使用
|
| 86 |
+
if __name__ == "__main__":
|
| 87 |
+
# 方法1: 从文件读取并处理
|
| 88 |
+
process_dataset("train_data_processed.json", "train_data_convs.json", base_path="/mnt/dolphinfs/ssd_pool/docker/user/hadoop-mlm-hl/hadoop-mlm/common/spatial_data/spatial_relation/SAT")
|
| 89 |
+
|
| 90 |
+
|
scripts/to_convs.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import random
|
| 3 |
+
|
| 4 |
+
def get_qa_type(question):
|
| 5 |
+
question_type = "other"
|
| 6 |
+
|
| 7 |
+
if "how did the camera" in question.lower() or "is the camera moving" in question.lower():
|
| 8 |
+
question_type = "action_sequence"
|
| 9 |
+
|
| 10 |
+
if ("need to go" in question.lower()):
|
| 11 |
+
question_type = "goal_aim"
|
| 12 |
+
|
| 13 |
+
if "any of the objects in the initial" in question.lower():
|
| 14 |
+
question_type = "obj_movement"
|
| 15 |
+
|
| 16 |
+
if "if i" in question.lower():
|
| 17 |
+
question_type = "action_consequence"
|
| 18 |
+
|
| 19 |
+
if 'if i move to the' in question.lower() or "for someone at the" in question.lower():
|
| 20 |
+
question_type = "perspective"
|
| 21 |
+
|
| 22 |
+
return question_type
|
| 23 |
+
|
| 24 |
+
def convert_to_conversation(json_data):
|
| 25 |
+
for item in json_data:
|
| 26 |
+
question = item["question"]
|
| 27 |
+
answers = item["answers"]
|
| 28 |
+
correct_answer = item["correct_answer"]
|
| 29 |
+
|
| 30 |
+
# 更新question_type
|
| 31 |
+
item["question_type"] = get_qa_type(question)
|
| 32 |
+
|
| 33 |
+
# 构造human部分
|
| 34 |
+
prompt = question + " Answer the question using a single word or phrase."
|
| 35 |
+
|
| 36 |
+
# 处理答案选项
|
| 37 |
+
if len(answers) > 1:
|
| 38 |
+
ans_choice_order = answers.copy()
|
| 39 |
+
ans_choice_order = ['"' + ans + '"' for ans in ans_choice_order]
|
| 40 |
+
random.shuffle(ans_choice_order)
|
| 41 |
+
answer_choices_format = " or ".join(ans_choice_order)
|
| 42 |
+
|
| 43 |
+
if answer_choices_format != "":
|
| 44 |
+
prompt += f" Choose between the following options: {answer_choices_format}."
|
| 45 |
+
|
| 46 |
+
# 添加conversations字段到原数据
|
| 47 |
+
item["conversations"] = [
|
| 48 |
+
{
|
| 49 |
+
"from": "human",
|
| 50 |
+
"value": prompt
|
| 51 |
+
},
|
| 52 |
+
{
|
| 53 |
+
"from": "gpt",
|
| 54 |
+
"value": correct_answer
|
| 55 |
+
}
|
| 56 |
+
]
|
| 57 |
+
|
| 58 |
+
return json_data
|
| 59 |
+
|
| 60 |
+
# 读取JSON文件
|
| 61 |
+
def process_json_file(input_file, output_file):
|
| 62 |
+
with open(input_file, 'r', encoding='utf-8') as f:
|
| 63 |
+
data = json.load(f)
|
| 64 |
+
|
| 65 |
+
# 转换为conversations格式
|
| 66 |
+
enhanced_data = convert_to_conversation(data)
|
| 67 |
+
|
| 68 |
+
# 保存结果
|
| 69 |
+
with open(output_file, 'w', encoding='utf-8') as f:
|
| 70 |
+
json.dump(enhanced_data, f, ensure_ascii=False, indent=2)
|
| 71 |
+
|
| 72 |
+
print(f"已处理 {len(enhanced_data)} 条数据,结果保存到 {output_file}")
|
| 73 |
+
|
| 74 |
+
# 使用示例
|
| 75 |
+
if __name__ == "__main__":
|
| 76 |
+
# 如果要处理文件,取消注释下面的行
|
| 77 |
+
process_json_file('train_data.json', 'train_data_convs.json')
|
train_data_processed.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c929965ecf1adbf1d70be6f14c53a2d7ec60f8135e500f8d5fa8cb8138da4830
|
| 3 |
+
size 130827121
|