nablaDFT
1# Global variables
2name: DimeNet++
3dataset_name: dataset_train_tiny
4max_steps: 1000000
5job_type: train
6pretrained: null # name of pretrained split or 'null'
7ckpt_path: null # path to checkpoint for training resume or test run
8
9# Datamodule parameters
10root: ./datasets/nablaDFT/${.job_type}
11batch_size: 32
12num_workers: 8
13
14# Devices
15devices: [0]
16
17# Trainer parameters
18gradient_clip_algorithm: null
19gradient_clip_val: null
20
21# configs
22defaults:
23- _self_
24- datamodule: nablaDFT_pyg.yaml # dataset config
25- model: dimenetplusplus.yaml # model config
26- callbacks: default.yaml # pl callbacks config
27- loggers: wandb.yaml # pl loggers config
28- trainer: train.yaml # trainer config
29
30# need this to set working dir as current dir
31hydra:
32output_subdir: null
33run:
34dir: .
35original_work_dir: ${hydra:runtime.cwd}
36
37seed: 23
38