Datasets:

ArXiv:
License:
ZacharyyyK commited on
Commit
abd5619
·
verified ·
1 Parent(s): c0ed5a8

Upload 5 files

Browse files
Files changed (5) hide show
  1. Code/data.py +649 -0
  2. Code/database.md +79 -0
  3. Code/lmdb_access.py +39 -0
  4. Code/main.py +56 -0
  5. Code/utils.py +77 -0
Code/data.py ADDED
@@ -0,0 +1,649 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ from concurrent.futures import ProcessPoolExecutor
4
+ from functools import partial
5
+ from pathlib import Path
6
+ from typing import Optional
7
+ import torch.nn.functional as F
8
+ import lmdb
9
+ import gzip
10
+ import pickle
11
+ import json
12
+
13
+ from itertools import product
14
+
15
+ import numpy as np
16
+ from tqdm import tqdm
17
+ import torch
18
+
19
+ from torch_geometric.data import Data, Dataset
20
+ from torch_geometric.loader import DataLoader
21
+ import periodictable
22
+ from torch.utils.data.distributed import DistributedSampler
23
+ from torch.utils.data import SubsetRandomSampler, random_split, Subset
24
+ import bisect
25
+
26
+ HARTREE_2_EV = 27.2114
27
+ BOHR_2_ANGSTROM = 1.8897
28
+ _MEAN_ENERGY = -4.269320623583757
29
+ _STD_ENERGY = 1.0
30
+ _STD_FORCE_SCALE = 1.0
31
+
32
+ atomic_number_mapping = {}
33
+ for element in periodictable.elements:
34
+ atomic_number_mapping[element.symbol] = element.number
35
+ atomic_number_mapping[element.symbol.upper()] = element.number
36
+ atomic_number_mapping[element.symbol.lower()] = element.number
37
+
38
+
39
+ atom_energy = {
40
+ 1: -0.5002727762,
41
+ 4: -14.6684425428,
42
+ 5: -24.6543539532,
43
+ 6: -37.8462799513,
44
+ 7: -54.5844893657,
45
+ 8: -75.0606214015,
46
+ 9: -99.7155354215,
47
+ 14: -289.3723539998,
48
+ 15: -341.2580898032,
49
+ 16: -398.1049925382,
50
+ 17: -460.1362417086,
51
+ 21: -760.5813501324,
52
+ 22: -849.3013849537,
53
+ 23: -943.8255794204,
54
+ 24: -1044.2810289455,
55
+ 25: -1150.8680174849,
56
+ 26: -1263.5207828239406,
57
+ 27: -1382.5485719267936,
58
+ 28: -1508.0542451335,
59
+ 29: -1640.1731641564784,
60
+ 31: -1924.5926070018,
61
+ 32: -2076.6914561594,
62
+ 33: -2235.5683127287,
63
+ 34: -2401.2347730327,
64
+ 35: -2573.8397377628
65
+ }
66
+
67
+ def find_last_index_with_key(objects, key):
68
+ last_index = -1
69
+ for i in range(len(objects) - 1, -1, -1):
70
+ if key in objects[i] and objects[i][key] is not None:
71
+ last_index = i
72
+ break
73
+ return last_index
74
+
75
+
76
+ def data_to_pyg(data, key, stage='1st', filter=False):
77
+ def process_data(phase):
78
+ nonlocal data
79
+ nonlocal key
80
+ nonlocal stage
81
+ datas = []
82
+ if phase is None or len(phase) == 0:
83
+ return datas
84
+
85
+ if stage == 'mixing':
86
+ if len(data['DFT_2nd']) != 0:
87
+ last_index = find_last_index_with_key(data['DFT_2nd'], 'energy')
88
+ if last_index == -1:
89
+ if data['DFT_1st'] is None or len(data['DFT_1st']) == 0:
90
+ return datas
91
+ last_index = find_last_index_with_key(data['DFT_1st'], 'energy')
92
+ last_data = data['DFT_1st'][last_index]
93
+ else:
94
+ last_data = data['DFT_2nd'][last_index]
95
+ else:
96
+ if data['DFT_1st'] is None or len(data['DFT_1st']) == 0:
97
+ return datas
98
+ last_index = find_last_index_with_key(data['DFT_1st'], 'energy')
99
+ last_data = data['DFT_1st'][last_index]
100
+
101
+ elif stage == '1st':
102
+ last_index = find_last_index_with_key(data['DFT_1st'], 'energy')
103
+ if last_index == -1:
104
+ return datas
105
+ last_data = phase[last_index]
106
+
107
+ elif stage == '1st_smash':
108
+ last_index = find_last_index_with_key(data['DFT_1st'], 'energy')
109
+ if last_index == -1:
110
+ return datas
111
+ last_data = phase[last_index]
112
+
113
+ elif stage == '2nd':
114
+ last_index = find_last_index_with_key(data['DFT_2nd'], 'energy')
115
+ if last_index == -1:
116
+ return datas
117
+ last_data = phase[last_index]
118
+
119
+ elif stage == 'hf':
120
+ last_index = find_last_index_with_key(data['hf'], 'energy')
121
+ if last_index == -1:
122
+ return datas
123
+ last_data = phase[last_index]
124
+
125
+ elif stage == 'pm3':
126
+ last_index = find_last_index_with_key(data['pm3'], 'energy')
127
+ if last_index == -1:
128
+ return datas
129
+ last_data = phase[last_index]
130
+ else:
131
+ raise Exception('Unknown stage')
132
+
133
+
134
+ last_coordinates = last_data['coordinates']
135
+ last_energy = last_data['energy']
136
+
137
+ if stage == '1st_smash':
138
+ if 'charge' in last_coordinates[0]:
139
+ return datas
140
+
141
+ for d in phase:
142
+ coords = d['coordinates']
143
+ energy = d['energy']
144
+ gradient = d['gradient']
145
+ formation_energies = []
146
+ atomic_numbers = []
147
+ positions = []
148
+ last_positions = []
149
+ forces = []
150
+
151
+ if coords is None or len(
152
+ coords) == 0:
153
+ continue
154
+
155
+ if stage == '1st_smash':
156
+ if 'charge' in coords[0]:
157
+ continue
158
+
159
+ if energy is None:
160
+ continue
161
+ if len(coords) != len(last_coordinates):
162
+ continue
163
+ if len(gradient) != len(coords):
164
+ continue
165
+ for i, atom_info in enumerate(coords):
166
+ atom = atom_info['atom']
167
+ atomic_number = atomic_number_mapping[atom]
168
+ x = atom_info['x']
169
+ y = atom_info['y']
170
+ z = atom_info['z']
171
+
172
+ atomic_numbers.append(atomic_number)
173
+ formation_energies.append(atom_energy[atomic_number])
174
+ positions.append([x, y, z])
175
+ last_positions.append([last_coordinates[i]['x'], last_coordinates[i]['y'],
176
+ last_coordinates[i]['z']])
177
+ forces.append([-gradient[i]['dx'] * HARTREE_2_EV * BOHR_2_ANGSTROM, -gradient[i]['dy'] * HARTREE_2_EV * BOHR_2_ANGSTROM,
178
+ -gradient[i]['dz'] * HARTREE_2_EV * BOHR_2_ANGSTROM])
179
+
180
+ x = torch.tensor(atomic_numbers, dtype=torch.long).view(-1,
181
+ 1)
182
+ pos = torch.tensor(positions, dtype=torch.float)
183
+ last_pos = torch.tensor(last_positions,
184
+ dtype=torch.float)
185
+ y = torch.tensor([(energy - sum(formation_energies)) * HARTREE_2_EV / x.size(0)],
186
+ dtype=torch.float)
187
+ last_y = torch.tensor([(last_energy - sum(formation_energies)) * HARTREE_2_EV / x.size(0)],
188
+ dtype=torch.float)
189
+ y_force = torch.tensor(forces,
190
+ dtype=torch.float)
191
+
192
+ if (torch.isnan(x).any() or torch.isnan(pos).any() or torch.isnan(last_pos).any() or torch.isnan(
193
+ y).any() or torch.isnan(last_y).any() or torch.isnan(y_force).any()):
194
+ continue
195
+
196
+ ds = Data(x=x, natoms=x.size(0), pos=pos, last_pos=last_pos, y=y, last_y=last_y, y_force=y_force, cid=str(key))
197
+ datas.append(ds)
198
+
199
+ return datas
200
+
201
+ if stage == '1st':
202
+ return process_data(data['DFT_1st'])
203
+ elif stage == '1st_smash':
204
+ return process_data(data['DFT_1st'])
205
+ elif stage == '2nd':
206
+ return process_data(data['DFT_2nd'])
207
+ elif stage == 'mixing':
208
+ return process_data(data['DFT_1st']) + process_data(data['DFT_2nd'])
209
+ elif stage == 'pm3':
210
+ return process_data(data['pm3'])
211
+ elif stage == 'hf':
212
+ return process_data(data['hf'])
213
+ else:
214
+ raise Exception('Unknown stage')
215
+
216
+
217
+ def process_key(key, db_path, stage, filtering):
218
+ env = lmdb.open(str(db_path), subdir=False, readonly=True, lock=False)
219
+ with env.begin(write=False) as txn:
220
+ datapoint_pickled = txn.get(key)
221
+ data_objects = data_to_pyg(pickle.loads(gzip.decompress(datapoint_pickled)), stage, filter=filtering)
222
+
223
+ if len(data_objects) > 0:
224
+ return key
225
+ else:
226
+ return None
227
+
228
+ def process_num(key, db_path, stage, filtering):
229
+ env = lmdb.open(str(db_path), subdir=False, readonly=True, lock=False)
230
+ with env.begin(write=False) as txn:
231
+ datapoint_pickled = txn.get(key)
232
+ data_objects = data_to_pyg(pickle.loads(gzip.decompress(datapoint_pickled)), stage, filter=filtering)
233
+
234
+ if len(data_objects) > 0:
235
+ return len(data_objects)
236
+ else:
237
+ return None
238
+
239
+
240
+ def get_valid_nums(db_path, keys, stage, filtering):
241
+
242
+ valid_nums = []
243
+
244
+ worker_func = partial(process_num, db_path=db_path, stage=stage, filtering=filtering)
245
+
246
+ with ProcessPoolExecutor(max_workers=32) as executor:
247
+ results = executor.map(worker_func, keys)
248
+ for maybe_len in tqdm(results, total=len(keys), desc="Get valid numbers"):
249
+ if maybe_len is not None:
250
+ valid_nums.append(maybe_len)
251
+
252
+ return valid_nums
253
+
254
+ def filter_valid_keys(db_path, keys, stage, filtering):
255
+
256
+ valid_keys = []
257
+
258
+ worker_func = partial(process_key, db_path=db_path, stage=stage, filtering=filtering)
259
+
260
+ with ProcessPoolExecutor(max_workers=32) as executor:
261
+ results = executor.map(worker_func, keys)
262
+ for maybe_key in tqdm(results, total=len(keys), desc="Filtering valid keys"):
263
+ if maybe_key is not None:
264
+ valid_keys.append(maybe_key)
265
+
266
+ return valid_keys
267
+
268
+ class LMDBDataset(Dataset):
269
+
270
+ def __init__(self, path, transform=None, keys_file='valid_keys', stage='1st', total_traj=True,
271
+ SubsetOnly=False, getTest = False, stochastic_frame = False) -> None:
272
+
273
+ super(LMDBDataset, self).__init__()
274
+
275
+ self.path = Path(path)
276
+
277
+ self.keys_file = keys_file
278
+
279
+ self.stage = stage
280
+
281
+ self.total_traj = total_traj
282
+ self.stochastic_frame = stochastic_frame
283
+
284
+ assert self.path.is_dir(), "Path is not a directory"
285
+
286
+ db_paths = sorted(self.path.glob("*.lmdb"))
287
+
288
+ assert len(db_paths) > 0, f"No LMDBs found in '{self.path}'"
289
+
290
+ self._keys = []
291
+
292
+ if total_traj:
293
+ self._nums = []
294
+
295
+ self.envs = []
296
+
297
+ self.SubsetOnly = SubsetOnly
298
+
299
+ self.postfix = ""
300
+ if SubsetOnly:
301
+ self.postfix = "_Subset"
302
+
303
+ for i, db_path in enumerate(db_paths):
304
+
305
+ if SubsetOnly:
306
+ if 'Data06.lmdb' not in str(db_path):
307
+ continue
308
+
309
+ # If we're generating the test set, skip all lmdbs that aren't the test otherwise skip only the test lmdb
310
+ if getTest:
311
+ if 'test.lmdb' not in str(db_path):
312
+ continue
313
+ else:
314
+ if 'test.lmdb' in str(db_path):
315
+ continue
316
+
317
+ cur_env = self.connect_db(db_path)
318
+ self.envs.append(cur_env)
319
+
320
+ lmdb_name = Path(str(db_path)).stem
321
+
322
+ if os.path.exists(self.path / Path(self.keys_file + f'_{lmdb_name}_{self.stage}{self.postfix}.txt')):
323
+ self._keys.append(self.load_keys(lmdb_name))
324
+ else:
325
+ with cur_env.begin() as txn:
326
+ all_keys = [key for key in tqdm(txn.cursor().iternext(values=False))]
327
+ filter_keys = filter_valid_keys(db_path, all_keys, self.stage, not self.SubsetOnly)
328
+ self._keys.append(filter_keys)
329
+ self.save_keys(filter_keys, lmdb_name)
330
+
331
+ if total_traj:
332
+ if os.path.exists(
333
+ self.path / Path(self.keys_file + f'_{lmdb_name}_{self.stage}{self.postfix}_number.txt')):
334
+ self._nums.append(self.load_nums(lmdb_name))
335
+ else:
336
+ numbers = get_valid_nums(db_path, self._keys[-1], self.stage, not self.SubsetOnly)
337
+ self._nums.append(numbers)
338
+ self.save_numbers(numbers, lmdb_name)
339
+
340
+ if not total_traj:
341
+ keylens = [len(k) for k in self._keys]
342
+ self._keylen_cumulative = np.cumsum(keylens).tolist()
343
+ self.num_samples = sum(keylens)
344
+
345
+ else:
346
+ keylens = [sum(k) for k in self._nums]
347
+ self._keylen_cumulative = np.cumsum(keylens).tolist()
348
+ self._num_cumulative = [np.cumsum(k).tolist() for k in
349
+ self._nums]
350
+ self.num_samples = sum(
351
+ keylens)
352
+ nums_flat = np.concatenate([np.array(nums) for nums in self._nums])
353
+ cumulative_nums = np.cumsum(nums_flat)
354
+ start_indices = np.concatenate(([0], cumulative_nums[:-1]))
355
+ self.trajectory_indices = list(zip(start_indices.tolist(), cumulative_nums.tolist()))
356
+
357
+ self.transform = transform
358
+
359
+ self.maximum_dist = 0
360
+
361
+ def save_keys(self, keys, lmdb_name):
362
+ with open(self.path / Path(self.keys_file + f'_{lmdb_name}_{self.stage}{self.postfix}.txt'), 'w') as f:
363
+ for key in keys:
364
+ f.write(key.hex() + '\n')
365
+
366
+ def save_numbers(self, numbers, lmdb_name):
367
+ with open(self.path / Path(self.keys_file + f'_{lmdb_name}_{self.stage}{self.postfix}_number.txt'), 'w') as f:
368
+ for num in numbers:
369
+ f.write(str(num) + '\n')
370
+
371
+ def load_keys(self, lmdb_name):
372
+
373
+ with open(self.path / Path(self.keys_file + f'_{lmdb_name}_{self.stage}{self.postfix}.txt'), 'r') as f:
374
+ keys = [bytes.fromhex(line.strip()) for line in f]
375
+
376
+ return keys
377
+
378
+ def load_nums(self, lmdb_name):
379
+
380
+ with open(self.path / Path(self.keys_file + f'_{lmdb_name}_{self.stage}{self.postfix}_number.txt'), 'r') as f:
381
+ nums = [int(line.strip()) for line in f]
382
+ return nums
383
+
384
+ def __len__(self) -> int:
385
+ return self.num_samples
386
+
387
+ def __getitem__(self, idx: int):
388
+
389
+ db_idx = bisect.bisect(self._keylen_cumulative, idx)
390
+ el_idx = idx
391
+
392
+ if db_idx != 0:
393
+ el_idx = idx - self._keylen_cumulative[db_idx - 1]
394
+ assert el_idx >= 0
395
+
396
+ if not self.total_traj:
397
+ datapoint_pickled = (
398
+ self.envs[db_idx]
399
+ .begin()
400
+ .get(self._keys[db_idx][el_idx])
401
+ )
402
+ data_objects = data_to_pyg(pickle.loads(gzip.decompress(datapoint_pickled)), self._keys[db_idx][el_idx], filter=not self.SubsetOnly)
403
+ if len(data_objects) == 0:
404
+ return None
405
+
406
+ if self.transform is not None:
407
+ data_objects = [self.transform(data_object, self.stochastic_frame) for data_object in data_objects]
408
+
409
+ return random.choice(data_objects)
410
+
411
+ else:
412
+ num_idx = bisect.bisect(self._num_cumulative[db_idx], el_idx)
413
+ data_idx = el_idx
414
+ if num_idx != 0:
415
+ data_idx = el_idx - self._num_cumulative[db_idx][num_idx - 1]
416
+ assert data_idx >= 0
417
+ datapoint_pickled = (
418
+ self.envs[db_idx]
419
+ .begin()
420
+ .get(self._keys[db_idx][num_idx])
421
+ )
422
+ data_objects = data_to_pyg(pickle.loads(gzip.decompress(datapoint_pickled)), self._keys[db_idx][num_idx], filter=not self.SubsetOnly)
423
+
424
+ data_object = data_objects[data_idx]
425
+ if self.transform is not None:
426
+ data_object = self.transform(data_object, self.stochastic_frame)
427
+ return data_object
428
+
429
+ def connect_db(self, lmdb_path: Optional[Path] = None) -> lmdb.Environment:
430
+ env = lmdb.open(
431
+ str(lmdb_path),
432
+ subdir=False,
433
+ readonly=True,
434
+ lock=False,
435
+ readahead=False,
436
+ meminit=False,
437
+ max_readers=128,
438
+ )
439
+ return env
440
+
441
+ def close_db(self) -> None:
442
+ self.env.close()
443
+
444
+
445
+ class CommonLMDBDataset(Dataset):
446
+
447
+ def __init__(self, path, transform=None) -> None:
448
+
449
+ super(CommonLMDBDataset, self).__init__()
450
+ self.path = Path(path)
451
+
452
+ assert self.path.is_file(), "Path is not a file"
453
+ self.env = self.connect_db(self.path)
454
+
455
+ self.transform = transform
456
+
457
+ def __len__(self) -> int:
458
+ with self.env.begin() as txn:
459
+ self.all_keys = [key for key in tqdm(txn.cursor().iternext(values=False))]
460
+ return len(self.all_keys)
461
+
462
+ def __getitem__(self, idx: int):
463
+ datapoint_pickled = self.env.begin().get(self.all_keys[idx])
464
+ data_object = pickle.loads(gzip.decompress(datapoint_pickled))
465
+ pos = random.choice([pos for pos in data_object.pos])
466
+ data_object.pos = pos
467
+ if self.transform is not None:
468
+ data_object = self.transform(data_object, self.stochastic_frame)
469
+
470
+ return data_object
471
+
472
+ def connect_db(self, lmdb_path: Optional[Path] = None) -> lmdb.Environment:
473
+ env = lmdb.open(
474
+ str(lmdb_path),
475
+ subdir=False,
476
+ readonly=True,
477
+ lock=False,
478
+ readahead=False,
479
+ meminit=False,
480
+ max_readers=128,
481
+ )
482
+ return env
483
+
484
+ def close_db(self) -> None:
485
+ self.env.close()
486
+
487
+
488
+ def initialize_datasets(root, transform, stage, total_traj, SubsetOnly, stochastic_frame):
489
+ lmdb_dataset = LMDBDataset(
490
+ root,
491
+ transform=transform,
492
+ stage=stage,
493
+ total_traj=total_traj,
494
+ SubsetOnly=SubsetOnly,
495
+ stochastic_frame=stochastic_frame
496
+ )
497
+
498
+ if not total_traj:
499
+
500
+ train_size = int(0.8 * len(lmdb_dataset))
501
+ val_size = len(lmdb_dataset) - train_size
502
+
503
+ with open('splits/new_split.json' if SubsetOnly else 'splits/new_split_full.json', 'r') as f:
504
+ split = json.load(f)
505
+
506
+ mol_indices = list(range(len(lmdb_dataset)))
507
+ mol_indices_np = np.array(mol_indices)
508
+
509
+ train_trajectory_indices = (mol_indices_np[split['train']]).tolist()
510
+ val_trajectory_indices = (mol_indices_np[split['val']]).tolist()
511
+
512
+ train_dataset = Subset(lmdb_dataset, train_trajectory_indices)
513
+ val_dataset = Subset(lmdb_dataset, val_trajectory_indices)
514
+
515
+ else:
516
+ num_trajectories = len(lmdb_dataset.trajectory_indices)
517
+ trajectory_indices = list(range(num_trajectories))
518
+
519
+ with open('splits/new_split.json' if SubsetOnly else 'splits/new_split_full.json', 'r') as f:
520
+ split = json.load(f)
521
+
522
+ trajectory_indices_np = np.array(trajectory_indices)
523
+
524
+ train_trajectory_indices = (trajectory_indices_np[split['train']]).tolist()
525
+ val_trajectory_indices = (trajectory_indices_np[split['val']]).tolist()
526
+
527
+ train_snapshot_indices = []
528
+ val_snapshot_indices = []
529
+
530
+ for idx_set, snapshot_indices_set in zip(
531
+ [train_trajectory_indices, val_trajectory_indices],
532
+ [train_snapshot_indices, val_snapshot_indices],
533
+ ):
534
+ for traj_idx in idx_set:
535
+ start_idx, end_idx = lmdb_dataset.trajectory_indices[traj_idx]
536
+ snapshot_indices_set.extend(range(start_idx, end_idx))
537
+
538
+ train_dataset = Subset(lmdb_dataset, train_snapshot_indices)
539
+ val_dataset = Subset(lmdb_dataset, val_snapshot_indices)
540
+
541
+ lmdb_test_dataset = LMDBDataset(
542
+ root,
543
+ transform=transform,
544
+ stage=stage,
545
+ total_traj=True,
546
+ SubsetOnly=False,
547
+ getTest=True,
548
+ stochastic_frame=stochastic_frame
549
+ )
550
+
551
+ test_snapshot_indices = []
552
+ for start_idx, end_idx in lmdb_test_dataset.trajectory_indices:
553
+ test_snapshot_indices.extend(range(start_idx, end_idx))
554
+ test_dataset = Subset(lmdb_test_dataset, test_snapshot_indices)
555
+
556
+ return {"train": train_dataset, "val": val_dataset, "test": test_dataset}
557
+
558
+ def scale_transform(data, stochastic_frame=False):
559
+ y_scale = (data.y - _MEAN_ENERGY) / _STD_ENERGY
560
+ data.y = y_scale
561
+ data.y_force = data.y_force / _STD_FORCE_SCALE
562
+ data.pos = data.pos - data.pos.mean(0, keepdim=True)
563
+ data.num_atoms = data.pos.size(0)
564
+ if stochastic_frame:
565
+ plus_minus_list = list(product([1, -1], repeat=3))
566
+ index = random.randint(0, len(plus_minus_list) - 1)
567
+ signs = plus_minus_list[index]
568
+ Q = torch.linalg.eig(data.pos.T @ data.pos)[1] * torch.tensor(signs).unsqueeze(0)
569
+ data.Q = Q.to(torch.float32).unsqueeze(0).expand(data.pos.size(0), 3, 3)
570
+
571
+ return data
572
+
573
+ class LMDBDataLoader:
574
+ def __init__(
575
+ self,
576
+ root,
577
+ batch_size=32,
578
+ num_workers=4,
579
+ stage='1st',
580
+ total_traj=False,
581
+ SubsetOnly=False,
582
+ stochastic_frame=False
583
+ ) -> None:
584
+ self.batch_size = batch_size
585
+ self.num_workers = num_workers
586
+ self.datasets = initialize_datasets(root, scale_transform, stage, total_traj,
587
+ SubsetOnly=SubsetOnly, stochastic_frame=stochastic_frame)
588
+
589
+
590
+ def train_loader(self, distributed=False):
591
+ if distributed:
592
+ sampler = DistributedSampler(self.datasets["train"])
593
+ else:
594
+ subset_indices = torch.randperm(len(self.datasets["train"]))
595
+ sampler = SubsetRandomSampler(subset_indices)
596
+ return DataLoader(
597
+ self.datasets["train"],
598
+ batch_size=self.batch_size,
599
+ drop_last=False,
600
+ num_workers=self.num_workers,
601
+ sampler=sampler,
602
+ pin_memory=True,
603
+ )
604
+
605
+ def val_loader(self, distributed=False):
606
+ if distributed:
607
+ sampler = DistributedSampler(self.datasets["val"])
608
+ return DataLoader(
609
+ self.datasets["val"],
610
+ batch_size=self.batch_size,
611
+ drop_last=False,
612
+ num_workers=self.num_workers,
613
+ sampler=sampler,
614
+ pin_memory=True,
615
+ )
616
+ return DataLoader(
617
+ self.datasets["val"],
618
+ batch_size=self.batch_size,
619
+ drop_last=False,
620
+ num_workers=self.num_workers,
621
+ pin_memory=True,
622
+
623
+ )
624
+
625
+ def test_loader(self, distributed=False):
626
+ if distributed:
627
+ sampler = DistributedSampler(self.datasets["test"])
628
+ return DataLoader(
629
+ self.datasets["test"],
630
+ batch_size=self.batch_size,
631
+ drop_last=False,
632
+ num_workers=self.num_workers,
633
+ sampler=sampler,
634
+ pin_memory=True,
635
+ )
636
+
637
+ return DataLoader(
638
+ self.datasets["test"],
639
+ batch_size=self.batch_size,
640
+ drop_last=False,
641
+ num_workers=self.num_workers,
642
+ pin_memory=True,
643
+ )
644
+
645
+ def serialize_and_compress(data: Data):
646
+ """
647
+ Serializes the Data object using msgpack and compresses it using lz4.
648
+ """
649
+ return gzip.compress(pickle.dumps(data))
Code/database.md ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Structure of LMDB
2
+
3
+ ### Notes
4
+ - The coordinates at the last step of one stage match the first step coordinates of the next stage
5
+ - In some cases, the Hartree Fork results were copied to the raw DFT 1st file and in this case DFT 1st key is None
6
+
7
+ ### Key-Value Structure
8
+ - **Keys**:
9
+ - CIDs as strings, e.g., `b'000015111'`
10
+ - Note: These are byte-encoded using `string.encode()` or `b'string'`
11
+
12
+ - **Values**:
13
+ - A nested dictionary containing multiple calculation methods:
14
+ - Uncompress values with:
15
+ ```python
16
+ pickle.loads(gzip.decompress(val))
17
+ ```
18
+
19
+ - Structure example:
20
+ ```python
21
+ b'000015111' : {
22
+ 'pm3' : [{step1}, {step2}, ..., {step_n}],
23
+ 'hf' : [{step1}, {step2}, ..., {step_m}],
24
+ 'DFT_1st' : [{step1}, {step2}, ..., {step_z}],
25
+ 'DFT_2nd' : [{step1}, {step2}, ..., {step_k}]
26
+ }
27
+ ```
28
+
29
+ - Each step is a nested dictionary with the following structure:
30
+ ```python
31
+ {
32
+ 'coordinates': {'atom': f'{element_letter}', 'charge': float(charge_val), 'x': float(x_val), 'y': float(y_val), 'z': float(z_val)}, ...
33
+ 'energy': float(energy_val),
34
+ 'gradient': {'atom': f'{element_letter}', 'charge': float(charge_val), 'dx': float(dx_val), 'dy': float(dy_val), 'dz': float(dz_val)}, ...
35
+ }
36
+ ```
37
+ Note: DFT 1st stage for each molecule is calculated with either FireFly or SMASH. For SMASH method, it does not contain a charge value associated with each atom.
38
+
39
+
40
+ ### Accessing LMDB Example
41
+ ```python
42
+ import lmdb
43
+ import pickle
44
+ import gzip
45
+
46
+ lmdb_file = '/data/zacharykrueger321/Full/PubChemQC-Traj-Test/hokusai2017.lmdb'
47
+
48
+ with lmdb.open(lmdb_file, readonly=True, subdir=False) as env:
49
+ with env.begin() as txn:
50
+ val = pickle.loads(gzip.decompress((txn.get(b'000000984'))))
51
+
52
+ pm3_val = val['pm3']
53
+ hf_val = val['hf']
54
+ dft1st_val = val['DFT_1st']
55
+ dft2nd_val = val['DFT_2nd']
56
+
57
+ for step in dft1st_val:
58
+
59
+ # coords & grad is a list of dictionaries that stores the relevant information of each atom
60
+ # energy is a scalar representing the energy for that conformer
61
+
62
+ coords = step['coordinates']
63
+ energy = step['energy']
64
+ grad = step['gradient']
65
+
66
+ for atom in coords:
67
+ # access atom's attributes
68
+ element = atom['atom']
69
+ x = atom['x']
70
+ y = atom['y']
71
+ z = atom['z']
72
+
73
+ for atom in grad:
74
+ # access atom's attributes
75
+ element = atom['atom']
76
+ dx = atom['dx']
77
+ dy = atom['dy']
78
+ dz = atom['dz']
79
+ ```
Code/lmdb_access.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import lmdb
2
+ import pickle
3
+ import gzip
4
+
5
+ lmdb_file = '/data/zacharykrueger321/Full/PubChemQC-Traj-Test/hokusai2017.lmdb'
6
+ lmdb_file = '/data/zacharykrueger321/Full/PubChemQC-Traj-Test/test/test.lmdb'
7
+
8
+ with lmdb.open(lmdb_file, readonly=True, subdir=False) as env:
9
+ with env.begin() as txn:
10
+ val = pickle.loads(gzip.decompress((txn.get(b'000000984'))))
11
+
12
+ pm3_val = val['pm3']
13
+ hf_val = val['hf']
14
+ dft1st_val = val['DFT_1st']
15
+ dft2nd_val = val['DFT_2nd']
16
+
17
+ for step in dft1st_val:
18
+
19
+ # coords & grad is a list of dictionaries that stores the relevant information of each atom
20
+ # energy is a scalar representing the energy for that conformer
21
+
22
+ coords = step['coordinates']
23
+ energy = step['energy']
24
+ grad = step['gradient']
25
+
26
+ for atom in coords:
27
+ # access atom's attributes
28
+ element = atom['atom']
29
+ x = atom['x']
30
+ y = atom['y']
31
+ z = atom['z']
32
+
33
+ for atom in grad:
34
+ # access atom's attributes
35
+ element = atom['atom']
36
+ dx = atom['dx']
37
+ dy = atom['dy']
38
+ dz = atom['dz']
39
+
Code/main.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from data import LMDBDataLoader
4
+ from torch.optim import Adam
5
+ from models.schnet import SchNet
6
+ from utils import train, evaluate, ForceRMSELoss
7
+ from data import LMDBDataLoader, _STD_ENERGY, _STD_FORCE_SCALE
8
+
9
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
10
+
11
+ root = '/data/zacharykrueger321/Full/PubChemQC-Traj-Test'
12
+ batch_size = 128
13
+ num_workers = 16
14
+ stage = '1st'
15
+ total_traj = True
16
+ SubsetOnly=True
17
+
18
+ loader = LMDBDataLoader(root=root, batch_size=batch_size, num_workers=num_workers, stage=stage, total_traj=total_traj, SubsetOnly=SubsetOnly)
19
+
20
+ train_set = loader.train_loader()
21
+ val_set = loader.val_loader()
22
+ test_set = loader.test_loader()
23
+
24
+ hidden_channels = 128
25
+ num_gaussians = 128
26
+ num_filters = 128
27
+
28
+ batch_size = 128
29
+ num_interactions = 4
30
+ cutoff = 4.5
31
+
32
+ model = SchNet(num_gaussians=num_gaussians, num_filters=num_filters, hidden_channels=hidden_channels, num_interactions=num_interactions, cutoff=cutoff)
33
+ model = model.to(device)
34
+
35
+ max_epochs = 100
36
+
37
+ params = [param for _, param in model.named_parameters() if param.requires_grad]
38
+ lr = 5e-4
39
+ weight_decay = 0.0
40
+
41
+ optimizer = Adam([{'params' : params},], lr=lr, weight_decay=weight_decay)
42
+ criterion_energy = nn.L1Loss()
43
+
44
+ criterion_force = ForceRMSELoss()
45
+
46
+ for epoch in range(max_epochs):
47
+
48
+ train_energy_loss, train_force_loss = train(model, device, train_set, optimizer, criterion_energy, criterion_force)
49
+
50
+ val_energy_loss, val_force_loss = evaluate(model, device, val_set, criterion_energy, criterion_force)
51
+
52
+ print(f"#IN#Epoch {epoch + 1}, Train Energy Loss: {train_energy_loss * _STD_ENERGY:.5f}, Val Energy Loss: {val_energy_loss * _STD_ENERGY:.5f}, Train Force Loss: {train_force_loss * _STD_FORCE_SCALE:.5f}, Val Force Loss: {val_force_loss * _STD_FORCE_SCALE:.5f}")
53
+
54
+ test_energy_loss, test_force_loss = evaluate(model, device, test_set, criterion_energy, criterion_force)
55
+
56
+ print(f'Test Energy Loss: {test_energy_loss * _STD_ENERGY:.5f}, Test Force Loss: {test_force_loss * _STD_FORCE_SCALE:.5f}')
Code/utils.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from data import _STD_ENERGY, _STD_FORCE_SCALE
4
+ from torch_scatter import scatter
5
+
6
+ from tqdm import tqdm
7
+
8
+ class ForceRMSELoss(nn.Module):
9
+ def __init__(self):
10
+ super().__init__()
11
+
12
+ def forward(self, pred, target, batch):
13
+ return scatter((pred - target).pow(2).sum(dim=-1), batch, reduce="mean", dim=0, dim_size=batch.max().item() + 1).sqrt().mean()
14
+
15
+ def train(model, device, train_loader, optimizer, criterion_energy, criterion_force, energy_weight=1.0, force_weight=1.0, clip_gradients=False, grad_clip_norm=1.0):
16
+ model.train()
17
+
18
+ total_energy_loss = 0.
19
+ total_force_loss = 0.
20
+
21
+ progress_bar = tqdm(train_loader, desc='Training', bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')
22
+
23
+ for batch in progress_bar:
24
+ optimizer.zero_grad()
25
+ data = batch.to(device, non_blocking=True)
26
+
27
+ energies, forces, mask = model(data)
28
+
29
+ energy_loss = criterion_energy(energies, data.y)
30
+
31
+ force_loss = criterion_force(forces, data.y_force[mask], data.batch[mask])
32
+
33
+ loss = energy_weight * energy_loss + force_weight * force_loss
34
+
35
+ total_energy_loss += energy_loss.item()
36
+ total_force_loss += force_loss.item()
37
+
38
+ loss.backward()
39
+
40
+ if clip_gradients:
41
+ torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=grad_clip_norm)
42
+
43
+ optimizer.step()
44
+
45
+ progress_bar.set_description(
46
+ f"Training - Energy Loss: {energy_loss * _STD_ENERGY:.5f}, "
47
+ f"Force Loss: {force_loss * _STD_FORCE_SCALE:.5f}")
48
+
49
+ average_energy_loss = total_energy_loss / len(train_loader)
50
+ average_force_loss = total_force_loss / len(train_loader)
51
+ return average_energy_loss, average_force_loss
52
+
53
+ def evaluate(model, device, loader, criterion_energy, criterion_force):
54
+ model.eval()
55
+
56
+ total_energy_loss = 0.
57
+ total_force_loss = 0.
58
+
59
+ progress_bar = tqdm(loader, desc='Evaluating', bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')
60
+
61
+ for batch in progress_bar:
62
+ data = batch.to(device, non_blocking=True)
63
+
64
+ energies, forces, mask = model(data)
65
+
66
+ energy_loss = criterion_energy(energies, data.y)
67
+ force_loss = criterion_force(forces, data.y_force[mask], data.batch[mask])
68
+
69
+ total_energy_loss += energy_loss.item()
70
+ total_force_loss += force_loss.item()
71
+
72
+ progress_bar.set_description(
73
+ f"Evaluation - Energy Loss: {energy_loss * _STD_ENERGY:.5f}, Force Loss: {force_loss * _STD_FORCE_SCALE:.5f}")
74
+
75
+ average_energy_loss = total_energy_loss / len(loader)
76
+ average_force_loss = total_force_loss / len(loader)
77
+ return average_energy_loss, average_force_loss