|
|
import json |
|
|
import os |
|
|
|
|
|
def process_dataset(input_file, output_file, base_path="."): |
|
|
""" |
|
|
处理数据集,只保留image和conversations字段,添加id字段,转换图像路径为绝对路径 |
|
|
|
|
|
Args: |
|
|
input_file: 输入JSON文件路径 |
|
|
output_file: 输出JSON文件路径 |
|
|
base_path: 图像文件的基础路径 |
|
|
""" |
|
|
|
|
|
with open(input_file, 'r', encoding='utf-8') as f: |
|
|
input_data = json.load(f) |
|
|
|
|
|
processed_data = [] |
|
|
|
|
|
for item in input_data: |
|
|
|
|
|
absolute_image_paths = [] |
|
|
for image_path in item["image"]: |
|
|
absolute_path = os.path.abspath(os.path.join(base_path, image_path)) |
|
|
absolute_image_paths.append(absolute_path) |
|
|
|
|
|
|
|
|
first_image_name = os.path.basename(item["image"][0]) |
|
|
image_id = os.path.splitext(first_image_name)[0] |
|
|
|
|
|
img_len = len(item["image"]) |
|
|
|
|
|
item["conversations"][0]['value'] = "<image>\n"*img_len + item["conversations"][0]['value'] |
|
|
|
|
|
|
|
|
new_item = { |
|
|
"id": image_id, |
|
|
"image": absolute_image_paths, |
|
|
"conversations": item["conversations"] |
|
|
} |
|
|
|
|
|
processed_data.append(new_item) |
|
|
|
|
|
|
|
|
with open(output_file, 'w', encoding='utf-8') as f: |
|
|
json.dump(processed_data, f, indent=2, ensure_ascii=False) |
|
|
|
|
|
print(f"处理完成!共处理 {len(processed_data)} 条数据") |
|
|
print(f"输出文件:{output_file}") |
|
|
|
|
|
def process_data_directly(input_data, base_path="."): |
|
|
""" |
|
|
直接处理数据列表(如果数据已经在内存中) |
|
|
|
|
|
Args: |
|
|
input_data: 原始数据列表 |
|
|
base_path: 图像文件的基础路径 |
|
|
|
|
|
Returns: |
|
|
处理后的数据列表 |
|
|
""" |
|
|
processed_data = [] |
|
|
|
|
|
for item in input_data: |
|
|
|
|
|
image_path = item["image"][0] |
|
|
|
|
|
|
|
|
image_name = os.path.basename(image_path) |
|
|
image_id = os.path.splitext(image_name)[0] |
|
|
|
|
|
|
|
|
absolute_image_path = os.path.abspath(os.path.join(base_path, image_path)) |
|
|
|
|
|
|
|
|
new_item = { |
|
|
"id": image_id, |
|
|
"image": [absolute_image_path], |
|
|
"conversations": item["conversations"] |
|
|
} |
|
|
|
|
|
processed_data.append(new_item) |
|
|
|
|
|
return processed_data |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
process_dataset("train_data_processed.json", "train_data_convs.json", base_path="/mnt/dolphinfs/ssd_pool/docker/user/hadoop-mlm-hl/hadoop-mlm/common/spatial_data/spatial_relation/SAT") |
|
|
|
|
|
|