SAT / scripts /split.py
lifuguan's picture
Add files using upload-large-folder tool
f706a86 verified
import json
import os
def process_dataset(input_file, output_file, base_path="."):
"""
处理数据集,只保留image和conversations字段,添加id字段,转换图像路径为绝对路径
Args:
input_file: 输入JSON文件路径
output_file: 输出JSON文件路径
base_path: 图像文件的基础路径
"""
# 读取原始数据
with open(input_file, 'r', encoding='utf-8') as f:
input_data = json.load(f)
processed_data = []
for item in input_data:
# 获取所有图像路径并转换为绝对路径
absolute_image_paths = []
for image_path in item["image"]:
absolute_path = os.path.abspath(os.path.join(base_path, image_path))
absolute_image_paths.append(absolute_path)
# 提取第一张图像名作为id(不包含扩展名)
first_image_name = os.path.basename(item["image"][0])
image_id = os.path.splitext(first_image_name)[0]
img_len = len(item["image"])
item["conversations"][0]['value'] = "<image>\n"*img_len + item["conversations"][0]['value']
# 构建新的数据项
new_item = {
"id": image_id,
"image": absolute_image_paths, # 保存所有图像的绝对路径
"conversations": item["conversations"]
}
processed_data.append(new_item)
# 保存处理后的数据
with open(output_file, 'w', encoding='utf-8') as f:
json.dump(processed_data, f, indent=2, ensure_ascii=False)
print(f"处理完成!共处理 {len(processed_data)} 条数据")
print(f"输出文件:{output_file}")
def process_data_directly(input_data, base_path="."):
"""
直接处理数据列表(如果数据已经在内存中)
Args:
input_data: 原始数据列表
base_path: 图像文件的基础路径
Returns:
处理后的数据列表
"""
processed_data = []
for item in input_data:
# 获取图像路径
image_path = item["image"][0]
# 提取图像名作为id
image_name = os.path.basename(image_path)
image_id = os.path.splitext(image_name)[0]
# 转换为绝对路径
absolute_image_path = os.path.abspath(os.path.join(base_path, image_path))
# 构建新的数据项
new_item = {
"id": image_id,
"image": [absolute_image_path],
"conversations": item["conversations"]
}
processed_data.append(new_item)
return processed_data
# 示例使用
if __name__ == "__main__":
# 方法1: 从文件读取并处理
process_dataset("train_data_processed.json", "train_data_convs.json", base_path="/mnt/dolphinfs/ssd_pool/docker/user/hadoop-mlm-hl/hadoop-mlm/common/spatial_data/spatial_relation/SAT")