-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrain.yaml
65 lines (52 loc) · 2.33 KB
/
train.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
# @package _global_
# specify here default configuration
# order of defaults determines the order in which configs override each other
defaults:
- _self_
- fit: default.yaml
- lr_tuner: default.yaml
- eval: default.yaml
- predict: default.yaml
- callbacks: default.yaml
- trainer: auto.yaml
- logger: mlflow.yaml
# debugging config (enable through command line, e.g. `python train.py debug=default)
- debug: null
- datamodule: veas_pilot.yaml
- model: rnn.yaml
# override to set ensemble config, e.g. to use same eval.kwargs.start as the ensemble model will use
- ensemble: null
#- logger: mlflow # Torch models will use tensorboard and mlflow while nontorch models use mlflow by default. Override here or on command line
- paths: default.yaml
- extras: default.yaml
- hydra: default.yaml
# experiment configs allow for version control of specific hyperparameters
# e.g. best hyperparameters for given model and datamodule
- experiment: null
# config for hyperparameter optimization
- hparams_search: null
# optional local config for machine/user specific settings
# it's optional since it doesn't need to exist and is excluded from version control
- optional local: default.yaml
# task name, determines output directory path
task_name: "train"
# tags to help you identify your experiments
# you can overwrite this in experiment configs
# overwrite from command line with `python train.py tags="[first_tag, second_tag]"`
# appending lists from command line is currently not supported :(
# https://github.com/facebookresearch/hydra/issues/1547
tags: ["dev"]
# set False to skip model training
train: True
# if validate is True and the model is not a TorchForecastingModel (which will validate as part of training), the model is evaluated on the validation set
validate: True
# if test is True, the model is evaluated on test set, using best model weights achieved during training and arguments provided in eval block
test: False
# simply provide checkpoint path to resume training
ckpt_path: null
# seed for random number generators in pytorch, numpy and python.random
seed: null
# plot datasets to loggers. Remove or set to false or None to disable.
plot_datasets:
separate_components: True # if true, will plot each variable in targets, covariates etc. separately, if false: all in one plot
measure_execution_time: True