TomTBT commited on
Commit
1088738
·
1 Parent(s): 34dc2c6

Disabled incrementals and put iter archive into a generator

Browse files
Files changed (1) hide show
  1. pmc_open_access_figure.py +33 -29
pmc_open_access_figure.py CHANGED
@@ -158,47 +158,39 @@ class OpenAccessFigure(datasets.GeneratorBasedBuilder):
158
  baseline_file_list_urls = [f"{url}{basename}PMC00{i}xxxxxx.baseline.{_BASELINE_DATE}.filelist.csv" for i in range(9) if (subset!="non_commercial" or i>0)]
159
  baseline_file_list_l.extend(dl_manager.download(baseline_file_list_urls))
160
 
161
- date_delta = datetime.date.today() - datetime.date.fromisoformat(_BASELINE_DATE)
162
- incremental_dates = [
163
- (datetime.date.fromisoformat(_BASELINE_DATE) + datetime.timedelta(days=i + 1)).isoformat()
164
- for i in range(date_delta.days)
165
- ]
166
- incremental_urls = [f"{url}{basename}incr.{date}.filelist.csv" for date in incremental_dates]
167
- for url in incremental_urls:
168
- try:
169
- incremental_file_list_l.append(dl_manager.download(url))
170
- except FileNotFoundError: # Some increment don't exist
171
- continue
172
 
173
  oa_package_list = pd.read_csv(baseline_package_list, index_col="Accession ID")
174
  oa_package_list = oa_package_list[["File"]]
175
  figure_archives = []
176
  df_l = []
177
  set_article = set()
178
- for l, baseline_file_list in enumerate(incremental_file_list_l[::-1] + baseline_file_list_l):
179
  try:
180
  file_list = pd.read_csv(baseline_file_list, index_col="AccessionID")
181
  except FileNotFoundError: # File not found can happen here in stream mode
182
  continue
183
  file_list = file_list.join(oa_package_list).reset_index().set_index("Article File")
184
  file_list.File = file_list.File.fillna('')
185
- mask = (~file_list.File.isin(set_article)) & (file_list.File!="")
186
- file_list = file_list[mask]
187
  figure_url_l = [f"{_URL_ROOT}{figure_path}" for figure_path in file_list.File]
188
 
189
- n_file = len(file_list.File)
190
- n_step = 1000
191
- #for low_lim in range(0, n_file+n_step, n_step):
192
- # up_lim = min(low_lim + n_step, n_file)
193
- # if up_lim <= low_lim:
194
- # break
195
- # slc_ = slice(low_lim, up_lim)
196
- slc_ = slice(0, n_file)
197
  try:
198
- figure_archives.extend(dl_manager.download(figure_url_l[slc_]))
199
- if l < len(incremental_file_list_l): # Only adding the incrementals to the list, the rest don't have overlap in pmid
200
- set_article.union(file_list.File[slc_])
201
- df_l.append(file_list[slc_])
202
  except FileNotFoundError:
203
  continue
204
 
@@ -208,25 +200,37 @@ class OpenAccessFigure(datasets.GeneratorBasedBuilder):
208
  datasets.SplitGenerator(
209
  name=datasets.Split.TRAIN,
210
  gen_kwargs={
211
- "figure_archive_lists": [dl_manager.iter_archive(archive) for k, archive in enumerate(figure_archives) if k%10 < 8],
212
  "package_df": package_df[np.arange(len(package_df))%10 < 8],
213
  },
214
  ),
215
  datasets.SplitGenerator(
216
  name=datasets.Split.TEST,
217
  gen_kwargs={
218
- "figure_archive_lists": [dl_manager.iter_archive(archive) for k, archive in enumerate(figure_archives) if k%10 == 8],
219
  "package_df": package_df[np.arange(len(package_df))%10 == 8],
220
  },
221
  ),
222
  datasets.SplitGenerator(
223
  name=datasets.Split.VALIDATION,
224
  gen_kwargs={
225
- "figure_archive_lists": [dl_manager.iter_archive(archive) for k, archive in enumerate(figure_archives) if k%10 == 9],
226
  "package_df": package_df[np.arange(len(package_df))%10 == 9],
227
  },
228
  ),
229
  ]
 
 
 
 
 
 
 
 
 
 
 
 
230
 
231
  def _generate_examples(self, figure_archive_lists, package_df):
232
  #Loading the file listing folders of individual PMC Article package (with medias and graphics)
 
158
  baseline_file_list_urls = [f"{url}{basename}PMC00{i}xxxxxx.baseline.{_BASELINE_DATE}.filelist.csv" for i in range(9) if (subset!="non_commercial" or i>0)]
159
  baseline_file_list_l.extend(dl_manager.download(baseline_file_list_urls))
160
 
161
+ #date_delta = datetime.date.today() - datetime.date.fromisoformat(_BASELINE_DATE)
162
+ #incremental_dates = [
163
+ # (datetime.date.fromisoformat(_BASELINE_DATE) + datetime.timedelta(days=i + 1)).isoformat()
164
+ # for i in range(date_delta.days)
165
+ # ]
166
+ #incremental_urls = [f"{url}{basename}incr.{date}.filelist.csv" for date in incremental_dates]
167
+ #for url in incremental_urls:
168
+ # try:
169
+ # incremental_file_list_l.append(dl_manager.download(url))
170
+ # except FileNotFoundError: # Some increment don't exist
171
+ # continue
172
 
173
  oa_package_list = pd.read_csv(baseline_package_list, index_col="Accession ID")
174
  oa_package_list = oa_package_list[["File"]]
175
  figure_archives = []
176
  df_l = []
177
  set_article = set()
178
+ for l, baseline_file_list in enumerate(baseline_file_list_l): # incremental_file_list_l[::-1] +
179
  try:
180
  file_list = pd.read_csv(baseline_file_list, index_col="AccessionID")
181
  except FileNotFoundError: # File not found can happen here in stream mode
182
  continue
183
  file_list = file_list.join(oa_package_list).reset_index().set_index("Article File")
184
  file_list.File = file_list.File.fillna('')
185
+ #mask = (~file_list.File.isin(set_article)) & (file_list.File!="")
186
+ #file_list = file_list[mask]
187
  figure_url_l = [f"{_URL_ROOT}{figure_path}" for figure_path in file_list.File]
188
 
 
 
 
 
 
 
 
 
189
  try:
190
+ figure_archives.extend(dl_manager.download(figure_url_l))
191
+ #if l < len(incremental_file_list_l): # Only adding the incrementals to the list, the rest don't have overlap in pmid
192
+ # set_article.union(file_list.File[slc_])
193
+ df_l.append(file_list)
194
  except FileNotFoundError:
195
  continue
196
 
 
200
  datasets.SplitGenerator(
201
  name=datasets.Split.TRAIN,
202
  gen_kwargs={
203
+ "figure_archive_lists": self.archive_generator(dl_manager, figure_archives, "train"),
204
  "package_df": package_df[np.arange(len(package_df))%10 < 8],
205
  },
206
  ),
207
  datasets.SplitGenerator(
208
  name=datasets.Split.TEST,
209
  gen_kwargs={
210
+ "figure_archive_lists": self.archive_generator(dl_manager, figure_archives, "test"),
211
  "package_df": package_df[np.arange(len(package_df))%10 == 8],
212
  },
213
  ),
214
  datasets.SplitGenerator(
215
  name=datasets.Split.VALIDATION,
216
  gen_kwargs={
217
+ "figure_archive_lists": self.archive_generator(dl_manager, figure_archives, "validation"),
218
  "package_df": package_df[np.arange(len(package_df))%10 == 9],
219
  },
220
  ),
221
  ]
222
+
223
+ def archive_generator(self, dl_manager, figure_archives, name):
224
+ if name == "train":
225
+ for k, archive in enumerate(figure_archives):
226
+ if k%10 < 8:
227
+ yield dl_manager.iter_archive(archive)
228
+ elif name == "test":
229
+ for k, archive in enumerate(figure_archives[8::10]):
230
+ yield dl_manager.iter_archive(archive)
231
+ elif name == "validation":
232
+ for k, archive in enumerate(figure_archives[9::10]):
233
+ yield dl_manager.iter_archive(archive)
234
 
235
  def _generate_examples(self, figure_archive_lists, package_df):
236
  #Loading the file listing folders of individual PMC Article package (with medias and graphics)