Fhrozen commited on
Commit
6a4cefb
·
1 Parent(s): db47b20

Adding files

Browse files
README.md CHANGED
@@ -1,3 +1,55 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ task_categories:
4
+ - image-text-to-text
5
+ tags:
6
+ - image
7
+ language:
8
+ - en
9
+ size_categories:
10
+ - 10K<n<100K
11
+ configs:
12
+ - config_name: default
13
+ data_files:
14
+ - split: train
15
+ path: data/train/*
16
+ - split: val
17
+ path: data/val/*
18
+ - split: test
19
+ path: data/test/*
20
+ ---
21
+
22
+ # Flick30K Narratives
23
+
24
+ [Original Source](https://www.kaggle.com/datasets/hsankesara/flickr-image-dataset) | [Google Localized Narrative](https://google.github.io/localized-narratives/)
25
+
26
+ ## 📌 Introduction
27
+
28
+ This dataset collects the images and annotations from the original Flickr30K and the annotations from the project [localized-narratives](https://github.com/google/localized-narratives)
29
+
30
+ ## 🙏 Acknowledgement
31
+
32
+ All credits to the original Flickr30K project and the localized-narratives teams.
33
+
34
+ ## 📜 Cite
35
+
36
+ Please consider to cite the following related papers:
37
+
38
+ ```bibtex
39
+ @article{young2014image,
40
+ title={From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions},
41
+ author={Young, Peter and Lai, Alice and Hodosh, Micah and Hockenmaier, Julia},
42
+ journal={Transactions of the Association for Computational Linguistics},
43
+ volume={2},
44
+ pages={67--78},
45
+ year={2014},
46
+ publisher={MIT Press}
47
+ }
48
+
49
+ @inproceedings{PontTuset_eccv2020,
50
+ author = {Jordi Pont-Tuset and Jasper Uijlings and Soravit Changpinyo and Radu Soricut and Vittorio Ferrari},
51
+ title = {Connecting Vision and Language with Localized Narratives},
52
+ booktitle = {ECCV},
53
+ year = {2020}
54
+ }
55
+ ```
data/test/data-00000-of-00001.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40882aa0981f191e3ba76472189b83f17a39825895f0229047e1059db40b12f6
3
+ size 138470560
data/train/data-00000-of-00010.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2e6c283391bb9a8f7ee40a3e2dea490566b18af60cd3754793ce0c3e58aa01c
3
+ size 398295120
data/train/data-00001-of-00010.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4630db609494505f2950acc72444b305c0d2587899475e389f3723793dd91fca
3
+ size 397582952
data/train/data-00002-of-00010.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a18c34428f00a3d81497e85f79c09c09c3a04cdce90b84a6b30f2758478ac6f
3
+ size 414227512
data/train/data-00003-of-00010.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:130dc26be826dd9d9cbd6fc594984b295b310bb44291a1617d951a6ac9787cf8
3
+ size 399969608
data/train/data-00004-of-00010.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b48695042ad6b69bc45caf385c1f61dccdee0ac9dfeebb75948e9a250dd54bd
3
+ size 407304096
data/train/data-00005-of-00010.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f46dcaa0547f50d03dc86e012f722da3189b04e8725b6a0e4ec6a2dd2d7a5446
3
+ size 419602272
data/train/data-00006-of-00010.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31bafa79db94c84a6c54a42a56dc03c80c16e6d1964d9b5ffcbbf56eb32fddd7
3
+ size 437495792
data/train/data-00007-of-00010.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67764d7fbd50ad3fbc27bb68652bec4444f144590058bda33eeec11c3c7468a2
3
+ size 454562576
data/train/data-00008-of-00010.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be9c53943d31e6d05659b45160a5b6bef1d453428491e5cb78f66d76c634053b
3
+ size 428587392
data/train/data-00009-of-00010.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73a6ff7d113ae89f040ca8a943d7f0c57c5448de47240f9c35e6383c6c5e285b
3
+ size 404246168
data/val/data-00000-of-00001.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ee07f586ad9b14d05585726eec9e1f71fdebe8f436ecea8b071857cc8c40c1a
3
+ size 139224360
make_flickr_ds.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from tqdm import tqdm
4
+
5
+ from datasets import Dataset, load_dataset, Image
6
+ import pandas as pd
7
+
8
+
9
+ def load_jsonl(file_path):
10
+ """
11
+ Loads a JSONL file and returns a list of Python dictionaries.
12
+ Each dictionary represents a JSON object from a line in the file.
13
+ """
14
+ data = []
15
+ with open(file_path, 'r', encoding='utf-8') as f:
16
+ for line in f:
17
+ try:
18
+ # Parse each line as a JSON object
19
+ json_object = json.loads(line.strip())
20
+ data.append(json_object)
21
+ except json.JSONDecodeError as e:
22
+ print(f"Error decoding JSON on line: {line.strip()} - {e}")
23
+ return data
24
+
25
+
26
+ def main():
27
+ dsets = ["train", "val", "test"] # test val
28
+ workdir = "./flickr30k"
29
+
30
+ # Load annotations
31
+ annot_fn = os.path.join(workdir, "results.csv")
32
+ df = pd.read_csv(annot_fn, delimiter="|")
33
+ df = pd.DataFrame(df)
34
+
35
+ datadict = {}
36
+ for _, row in df.iterrows():
37
+ idx = row["image_name"].replace(".jpg", "")
38
+ if idx not in datadict:
39
+ datadict[idx] = {
40
+ "image_name": row["image_name"],
41
+ "image": os.path.join(workdir, "flickr30k_images", row["image_name"]),
42
+ "sentids": [],
43
+ "split": None,
44
+ "caption": [],
45
+ "narratives": []
46
+ }
47
+
48
+ datadict[idx]["sentids"].append(row[" comment_number"])
49
+ datadict[idx]["caption"].append(row[" comment"])
50
+
51
+ # Align to narratives splits.
52
+ for split in dsets:
53
+ narr = load_jsonl(os.path.join(workdir, "narratives", f"flickr30k_{split}_captions.jsonl"))
54
+ for item in narr:
55
+ idx = item["image_id"]
56
+ datadict[idx]["split"] = split
57
+ datadict[idx]["narratives"].append(item["caption"])
58
+
59
+ # make datasets
60
+ for split in dsets:
61
+ df = pd.DataFrame.from_dict(datadict, orient="index")
62
+ df = df[df["split"] == split]
63
+ ds = Dataset.from_pandas(df)
64
+ ds = ds.remove_columns(["__index_level_0__", "split"])
65
+ ds = ds.cast_column("image", Image())
66
+ ds.save_to_disk(os.path.join(workdir, "datasets", "data", split), max_shard_size="400MB")
67
+
68
+ return
69
+
70
+
71
+ def test_dataset():
72
+ ds = load_dataset("./flickr30k/datasets") # , split="val"
73
+ print(ds["train"][0])
74
+
75
+
76
+ if __name__ == "__main__":
77
+ # main()
78
+ test_dataset()