diff --git a/nbs/models.autoformer.ipynb b/nbs/models.autoformer.ipynb index ef74fb2e..872a7b28 100644 --- a/nbs/models.autoformer.ipynb +++ b/nbs/models.autoformer.ipynb @@ -458,7 +458,10 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", - " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", + " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", "\t*References*
\n", "\t- [Wu, Haixu, Jiehui Xu, Jianmin Wang, and Mingsheng Long. \"Autoformer: Decomposition transformers with auto-correlation for long-term series forecasting\"](https://proceedings.neurips.cc/paper/2021/hash/bcc0d400288793e8bdcd7c19a8ac0c2b-Abstract.html)
\n", @@ -503,6 +506,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs=None,\n", + " config_optimizers=None,\n", " **trainer_kwargs):\n", " super(Autoformer, self).__init__(h=h,\n", " input_size=input_size,\n", @@ -527,6 +531,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", "\n", " # Architecture\n", diff --git a/nbs/models.bitcn.ipynb b/nbs/models.bitcn.ipynb index 84bb73b0..5723648a 100644 --- a/nbs/models.bitcn.ipynb +++ b/nbs/models.bitcn.ipynb @@ -178,6 +178,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", " **References**
\n", @@ -216,6 +219,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs=None,\n", + " config_optimizers=None,\n", " **trainer_kwargs):\n", " super(BiTCN, self).__init__(\n", " h=h,\n", @@ -241,6 +245,7 @@ " random_seed=random_seed,\n", " drop_last_loader=drop_last_loader,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs\n", " )\n", "\n", diff --git a/nbs/models.deepar.ipynb b/nbs/models.deepar.ipynb index feb9b272..350a21ca 100644 --- a/nbs/models.deepar.ipynb +++ b/nbs/models.deepar.ipynb @@ -183,6 +183,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", " **References**
\n", @@ -226,6 +229,7 @@ " random_seed: int = 1,\n", " drop_last_loader = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers=None,\n", " **trainer_kwargs):\n", "\n", " if exclude_insample_y:\n", @@ -264,6 +268,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", "\n", " self.horizon_backup = self.h # Used because h=0 during training\n", diff --git a/nbs/models.deepnpts.ipynb b/nbs/models.deepnpts.ipynb index f5398288..1a0704c0 100644 --- a/nbs/models.deepnpts.ipynb +++ b/nbs/models.deepnpts.ipynb @@ -121,6 +121,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", " **References**
\n", @@ -161,6 +164,7 @@ " random_seed: int = 1,\n", " drop_last_loader = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", "\n", " if exclude_insample_y:\n", @@ -196,6 +200,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", "\n", " self.h = h\n", diff --git a/nbs/models.dilated_rnn.ipynb b/nbs/models.dilated_rnn.ipynb index e2960e4b..b8f5ea08 100644 --- a/nbs/models.dilated_rnn.ipynb +++ b/nbs/models.dilated_rnn.ipynb @@ -390,6 +390,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \"\"\"\n", " # Class attributes\n", @@ -425,6 +428,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers=None,\n", " **trainer_kwargs):\n", " super(DilatedRNN, self).__init__(\n", " h=h,\n", @@ -446,6 +450,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs\n", " )\n", "\n", diff --git a/nbs/models.dlinear.ipynb b/nbs/models.dlinear.ipynb index 0cccb748..efe0878e 100644 --- a/nbs/models.dlinear.ipynb +++ b/nbs/models.dlinear.ipynb @@ -162,6 +162,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", "\t*References*
\n", @@ -198,6 +201,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs=None,\n", + " config_optimizers=None,\n", " **trainer_kwargs):\n", " super(DLinear, self).__init__(h=h,\n", " input_size=input_size,\n", @@ -222,6 +226,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", " \n", " # Architecture\n", diff --git a/nbs/models.fedformer.ipynb b/nbs/models.fedformer.ipynb index 6b58bcaa..7558530e 100644 --- a/nbs/models.fedformer.ipynb +++ b/nbs/models.fedformer.ipynb @@ -451,6 +451,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", " \"\"\"\n", @@ -495,6 +498,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers=None,\n", " **trainer_kwargs):\n", " super(FEDformer, self).__init__(h=h,\n", " input_size=input_size,\n", @@ -517,7 +521,8 @@ " scaler_type=scaler_type,\n", " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", - " dataloader_kwargs=dataloader_kwargs, \n", + " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", " # Architecture\n", " self.label_len = int(np.ceil(input_size * decoder_input_size_multiplier))\n", diff --git a/nbs/models.gru.ipynb b/nbs/models.gru.ipynb index 5d979bc0..eebe2362 100644 --- a/nbs/models.gru.ipynb +++ b/nbs/models.gru.ipynb @@ -134,6 +134,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \"\"\"\n", " # Class attributes\n", @@ -170,6 +173,7 @@ " random_seed=1,\n", " drop_last_loader = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers=None,\n", " **trainer_kwargs):\n", " super(GRU, self).__init__(\n", " h=h,\n", @@ -191,6 +195,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs\n", " )\n", "\n", diff --git a/nbs/models.informer.ipynb b/nbs/models.informer.ipynb index 51a765bc..bde9492f 100644 --- a/nbs/models.informer.ipynb +++ b/nbs/models.informer.ipynb @@ -306,6 +306,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", "\t*References*
\n", @@ -351,6 +354,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers=None,\n", " **trainer_kwargs):\n", " super(Informer, self).__init__(h=h,\n", " input_size=input_size,\n", @@ -375,6 +379,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", "\n", " # Architecture\n", diff --git a/nbs/models.itransformer.ipynb b/nbs/models.itransformer.ipynb index 7ee4e7ea..5ea9736d 100644 --- a/nbs/models.itransformer.ipynb +++ b/nbs/models.itransformer.ipynb @@ -228,6 +228,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \n", " **References**
\n", @@ -267,7 +270,8 @@ " scaler_type: str = 'identity',\n", " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", - " dataloader_kwargs = None, \n", + " dataloader_kwargs = None,\n", + " config_optimizers=None, \n", " **trainer_kwargs):\n", " \n", " super(iTransformer, self).__init__(h=h,\n", @@ -289,6 +293,7 @@ " random_seed=random_seed,\n", " drop_last_loader=drop_last_loader,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", " \n", " self.enc_in = n_series\n", diff --git a/nbs/models.kan.ipynb b/nbs/models.kan.ipynb index 50a75ddf..cdf08b7a 100644 --- a/nbs/models.kan.ipynb +++ b/nbs/models.kan.ipynb @@ -362,6 +362,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", " **References**
\n", @@ -408,6 +411,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", " \n", " # Inherit BaseWindows class\n", @@ -434,6 +438,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs = dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", " \n", " # Architecture\n", diff --git a/nbs/models.lstm.ipynb b/nbs/models.lstm.ipynb index 0ac6314e..af9f77f9 100644 --- a/nbs/models.lstm.ipynb +++ b/nbs/models.lstm.ipynb @@ -121,6 +121,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \"\"\"\n", " # Class attributes\n", @@ -156,6 +159,7 @@ " random_seed = 1,\n", " drop_last_loader = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", " super(LSTM, self).__init__(\n", " h=h,\n", @@ -177,6 +181,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs\n", " )\n", "\n", diff --git a/nbs/models.mlp.ipynb b/nbs/models.mlp.ipynb index 67063d69..ae24586c 100644 --- a/nbs/models.mlp.ipynb +++ b/nbs/models.mlp.ipynb @@ -114,6 +114,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \"\"\"\n", " # Class attributes\n", @@ -148,6 +151,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", "\n", " # Inherit BaseWindows class\n", @@ -174,6 +178,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", "\n", " # Architecture\n", diff --git a/nbs/models.mlpmultivariate.ipynb b/nbs/models.mlpmultivariate.ipynb index b998e4b1..ed2cfefd 100644 --- a/nbs/models.mlpmultivariate.ipynb +++ b/nbs/models.mlpmultivariate.ipynb @@ -108,6 +108,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \"\"\"\n", " # Class attributes\n", @@ -138,6 +141,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", "\n", " # Inherit BaseMultivariate class\n", @@ -160,6 +164,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", "\n", " # Architecture\n", diff --git a/nbs/models.nbeats.ipynb b/nbs/models.nbeats.ipynb index 64541fb0..090df8fb 100644 --- a/nbs/models.nbeats.ipynb +++ b/nbs/models.nbeats.ipynb @@ -270,6 +270,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", " **References:**
\n", @@ -310,6 +313,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", " \n", " # Protect horizon collapsed seasonality and trend NBEATSx-i basis\n", @@ -338,6 +342,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", "\n", " # Architecture\n", diff --git a/nbs/models.nbeatsx.ipynb b/nbs/models.nbeatsx.ipynb index b0befa8d..121fb3d7 100644 --- a/nbs/models.nbeatsx.ipynb +++ b/nbs/models.nbeatsx.ipynb @@ -414,6 +414,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", " **References:**
\n", @@ -460,6 +463,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs,\n", " ):\n", " # Protect horizon collapsed seasonality and trend NBEATSx-i basis\n", @@ -492,6 +496,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", "\n", " # Architecture\n", diff --git a/nbs/models.nhits.ipynb b/nbs/models.nhits.ipynb index 79b05b98..d9a6fadf 100644 --- a/nbs/models.nhits.ipynb +++ b/nbs/models.nhits.ipynb @@ -303,6 +303,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", " **References:**
\n", @@ -349,6 +352,7 @@ " random_seed: int = 1,\n", " drop_last_loader = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", "\n", " # Inherit BaseWindows class\n", @@ -375,6 +379,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", "\n", " # Architecture\n", diff --git a/nbs/models.nlinear.ipynb b/nbs/models.nlinear.ipynb index 30152e9e..5d1999ec 100644 --- a/nbs/models.nlinear.ipynb +++ b/nbs/models.nlinear.ipynb @@ -102,6 +102,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", "\t*References*
\n", @@ -137,6 +140,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", " super(NLinear, self).__init__(h=h,\n", " input_size=input_size,\n", @@ -161,6 +165,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", "\n", " # Architecture\n", diff --git a/nbs/models.patchtst.ipynb b/nbs/models.patchtst.ipynb index 99d5a5bc..1c48ac0e 100644 --- a/nbs/models.patchtst.ipynb +++ b/nbs/models.patchtst.ipynb @@ -662,6 +662,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", " **References:**
\n", @@ -714,6 +717,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers= None,\n", " **trainer_kwargs):\n", " super(PatchTST, self).__init__(h=h,\n", " input_size=input_size,\n", @@ -738,6 +742,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs) \n", "\n", " # Enforce correct patch_len, regardless of user input\n", diff --git a/nbs/models.rmok.ipynb b/nbs/models.rmok.ipynb index 86bc7012..b666cf83 100644 --- a/nbs/models.rmok.ipynb +++ b/nbs/models.rmok.ipynb @@ -359,6 +359,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", " Reference
\n", @@ -395,7 +398,8 @@ " scaler_type: str = 'identity',\n", " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", - " dataloader_kwargs = None, \n", + " dataloader_kwargs = None,\n", + " config_optimizers = None, \n", " **trainer_kwargs):\n", " \n", " super(RMoK, self).__init__(h=h,\n", @@ -417,6 +421,7 @@ " random_seed=random_seed,\n", " drop_last_loader=drop_last_loader,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", " \n", " self.input_size = input_size\n", diff --git a/nbs/models.rnn.ipynb b/nbs/models.rnn.ipynb index 9fefcd58..cfff45df 100644 --- a/nbs/models.rnn.ipynb +++ b/nbs/models.rnn.ipynb @@ -126,7 +126,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `alias`: str, optional, Custom name of the model.
\n", - "\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \"\"\"\n", " # Class attributes\n", @@ -162,7 +164,8 @@ " scaler_type: str='robust',\n", " random_seed=1,\n", " drop_last_loader=False,\n", - " dataloader_kwargs = None, \n", + " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", " super(RNN, self).__init__(\n", " h=h,\n", @@ -184,6 +187,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs\n", " )\n", "\n", diff --git a/nbs/models.softs.ipynb b/nbs/models.softs.ipynb index 4cf421c8..2f73995b 100644 --- a/nbs/models.softs.ipynb +++ b/nbs/models.softs.ipynb @@ -200,6 +200,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \n", " **References**
\n", @@ -237,7 +240,8 @@ " scaler_type: str = 'identity',\n", " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", - " dataloader_kwargs = None, \n", + " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", " \n", " super(SOFTS, self).__init__(h=h,\n", @@ -259,6 +263,7 @@ " random_seed=random_seed,\n", " drop_last_loader=drop_last_loader,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", " \n", " self.h = h\n", diff --git a/nbs/models.stemgnn.ipynb b/nbs/models.stemgnn.ipynb index e9f07795..1a1edc78 100644 --- a/nbs/models.stemgnn.ipynb +++ b/nbs/models.stemgnn.ipynb @@ -204,6 +204,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \"\"\"\n", " # Class attributes\n", @@ -236,6 +239,7 @@ " random_seed: int = 1,\n", " drop_last_loader = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers= None,\n", " **trainer_kwargs):\n", "\n", " # Inherit BaseMultivariate class\n", @@ -258,6 +262,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", " # Quick fix for now, fix the model later.\n", " if n_stacks != 2:\n", diff --git a/nbs/models.tcn.ipynb b/nbs/models.tcn.ipynb index c2c2f3cc..06cc8c9d 100644 --- a/nbs/models.tcn.ipynb +++ b/nbs/models.tcn.ipynb @@ -126,6 +126,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \"\"\"\n", " # Class attributes\n", @@ -160,7 +163,8 @@ " scaler_type: str ='robust',\n", " random_seed: int = 1,\n", " drop_last_loader = False,\n", - " dataloader_kwargs = None, \n", + " dataloader_kwargs = None,\n", + " config_optimizers=None,\n", " **trainer_kwargs):\n", " super(TCN, self).__init__(\n", " h=h,\n", @@ -182,6 +186,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs = dataloader_kwargs,\n", + " config_optimizers = config_optimizers,\n", " **trainer_kwargs\n", " )\n", "\n", diff --git a/nbs/models.tft.ipynb b/nbs/models.tft.ipynb index faaab552..1a5bd339 100644 --- a/nbs/models.tft.ipynb +++ b/nbs/models.tft.ipynb @@ -696,6 +696,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", " **References:**
\n", @@ -739,6 +742,7 @@ " drop_last_loader=False,\n", " random_seed: int = 1,\n", " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs,\n", " ):\n", "\n", @@ -766,6 +770,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs,\n", " )\n", " self.example_length = input_size + h\n", diff --git a/nbs/models.tide.ipynb b/nbs/models.tide.ipynb index 3a586cc1..49a0643b 100644 --- a/nbs/models.tide.ipynb +++ b/nbs/models.tide.ipynb @@ -167,6 +167,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", " **References:**
\n", @@ -211,6 +214,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", "\n", " # Inherit BaseWindows class\n", @@ -238,6 +242,7 @@ " random_seed=random_seed,\n", " drop_last_loader=drop_last_loader,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs\n", " ) \n", " self.h = h\n", diff --git a/nbs/models.timellm.ipynb b/nbs/models.timellm.ipynb index 1ba9472e..b8a17b19 100755 --- a/nbs/models.timellm.ipynb +++ b/nbs/models.timellm.ipynb @@ -291,6 +291,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", " **References:**
\n", @@ -343,6 +346,7 @@ " drop_last_loader: bool = False,\n", " random_seed: int = 1,\n", " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", " super(TimeLLM, self).__init__(h=h,\n", " input_size=input_size,\n", @@ -366,6 +370,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", " \n", " # Architecture\n", diff --git a/nbs/models.timemixer.ipynb b/nbs/models.timemixer.ipynb index 129a9d09..5ee98d4f 100644 --- a/nbs/models.timemixer.ipynb +++ b/nbs/models.timemixer.ipynb @@ -360,6 +360,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", " **References**
\n", @@ -404,7 +407,8 @@ " scaler_type: str = 'identity',\n", " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", - " dataloader_kwargs = None, \n", + " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", " \n", " super(TimeMixer, self).__init__(h=h,\n", @@ -426,6 +430,7 @@ " random_seed=random_seed,\n", " drop_last_loader=drop_last_loader,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", " \n", " self.label_len = int(np.ceil(input_size * decoder_input_size_multiplier))\n", diff --git a/nbs/models.timesnet.ipynb b/nbs/models.timesnet.ipynb index 1e1a3d37..fadfb1c3 100644 --- a/nbs/models.timesnet.ipynb +++ b/nbs/models.timesnet.ipynb @@ -263,6 +263,9 @@ " If True `TimeSeriesDataLoader` drops last non-full batch.\n", " `dataloader_kwargs`: dict, optional (default=None)\n", " List of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " **trainer_kwargs\n", " Keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer)\n", "\n", @@ -305,7 +308,8 @@ " scaler_type: str = 'standard',\n", " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", - " dataloader_kwargs = None, \n", + " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", " super(TimesNet, self).__init__(h=h,\n", " input_size=input_size,\n", @@ -330,6 +334,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", "\n", " # Architecture\n", diff --git a/nbs/models.tsmixer.ipynb b/nbs/models.tsmixer.ipynb index 2b05bd51..bef03803 100644 --- a/nbs/models.tsmixer.ipynb +++ b/nbs/models.tsmixer.ipynb @@ -250,6 +250,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", " **References:**
\n", @@ -286,6 +289,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", "\n", " # Inherit BaseMultivariate class\n", @@ -308,6 +312,7 @@ " random_seed=random_seed,\n", " drop_last_loader=drop_last_loader,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", "\n", " # Reversible InstanceNormalization layer\n", diff --git a/nbs/models.tsmixerx.ipynb b/nbs/models.tsmixerx.ipynb index 7d340cde..4916129a 100644 --- a/nbs/models.tsmixerx.ipynb +++ b/nbs/models.tsmixerx.ipynb @@ -274,6 +274,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", " **References:**
\n", @@ -310,6 +313,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", "\n", " # Inherit BaseMultvariate class\n", @@ -332,6 +336,7 @@ " random_seed=random_seed,\n", " drop_last_loader=drop_last_loader,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", " # Reversible InstanceNormalization layer\n", " self.revin = revin\n", diff --git a/nbs/models.vanillatransformer.ipynb b/nbs/models.vanillatransformer.ipynb index f1d60f36..78ef923b 100644 --- a/nbs/models.vanillatransformer.ipynb +++ b/nbs/models.vanillatransformer.ipynb @@ -198,6 +198,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", "\t*References*
\n", @@ -240,6 +243,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", " super(VanillaTransformer, self).__init__(h=h,\n", " input_size=input_size,\n", @@ -263,6 +267,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", "\n", " # Architecture\n", diff --git a/neuralforecast/models/autoformer.py b/neuralforecast/models/autoformer.py index 5e97561d..6deeb800 100644 --- a/neuralforecast/models/autoformer.py +++ b/neuralforecast/models/autoformer.py @@ -442,7 +442,10 @@ class Autoformer(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
- `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
+ `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
*References*
- [Wu, Haixu, Jiehui Xu, Jianmin Wang, and Mingsheng Long. "Autoformer: Decomposition transformers with auto-correlation for long-term series forecasting"](https://proceedings.neurips.cc/paper/2021/hash/bcc0d400288793e8bdcd7c19a8ac0c2b-Abstract.html)
@@ -489,6 +492,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs, ): super(Autoformer, self).__init__( @@ -515,6 +519,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs, ) diff --git a/neuralforecast/models/bitcn.py b/neuralforecast/models/bitcn.py index 856727c0..a631164c 100644 --- a/neuralforecast/models/bitcn.py +++ b/neuralforecast/models/bitcn.py @@ -116,6 +116,9 @@ class BiTCN(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
**References**
@@ -156,6 +159,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): super(BiTCN, self).__init__( @@ -182,6 +186,7 @@ def __init__( random_seed=random_seed, drop_last_loader=drop_last_loader, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/deepar.py b/neuralforecast/models/deepar.py index 047c5349..eedcc482 100644 --- a/neuralforecast/models/deepar.py +++ b/neuralforecast/models/deepar.py @@ -87,6 +87,9 @@ class DeepAR(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
**References**
@@ -134,6 +137,7 @@ def __init__( random_seed: int = 1, drop_last_loader=False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): @@ -178,6 +182,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/deepnpts.py b/neuralforecast/models/deepnpts.py index 2ccde349..77fd2cc2 100644 --- a/neuralforecast/models/deepnpts.py +++ b/neuralforecast/models/deepnpts.py @@ -49,6 +49,9 @@ class DeepNPTS(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
**References**
@@ -91,6 +94,7 @@ def __init__( random_seed: int = 1, drop_last_loader=False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): @@ -132,6 +136,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/dilated_rnn.py b/neuralforecast/models/dilated_rnn.py index 296e4de7..c2c5a299 100644 --- a/neuralforecast/models/dilated_rnn.py +++ b/neuralforecast/models/dilated_rnn.py @@ -317,6 +317,9 @@ class DilatedRNN(BaseRecurrent): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
""" @@ -354,6 +357,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): super(DilatedRNN, self).__init__( @@ -376,6 +380,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/dlinear.py b/neuralforecast/models/dlinear.py index a43f167c..8a0c5892 100644 --- a/neuralforecast/models/dlinear.py +++ b/neuralforecast/models/dlinear.py @@ -75,6 +75,9 @@ class DLinear(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
*References*
@@ -113,6 +116,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): super(DLinear, self).__init__( @@ -139,6 +143,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/fedformer.py b/neuralforecast/models/fedformer.py index 68990131..8620d9df 100644 --- a/neuralforecast/models/fedformer.py +++ b/neuralforecast/models/fedformer.py @@ -440,6 +440,9 @@ class FEDformer(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
""" @@ -486,6 +489,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs, ): super(FEDformer, self).__init__( @@ -511,6 +515,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs, ) # Architecture diff --git a/neuralforecast/models/gru.py b/neuralforecast/models/gru.py index 8e428a19..f969d2dd 100644 --- a/neuralforecast/models/gru.py +++ b/neuralforecast/models/gru.py @@ -52,6 +52,9 @@ class GRU(BaseRecurrent): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
""" @@ -90,6 +93,7 @@ def __init__( random_seed=1, drop_last_loader=False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): super(GRU, self).__init__( @@ -112,6 +116,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/informer.py b/neuralforecast/models/informer.py index aefa8afa..2865b7a8 100644 --- a/neuralforecast/models/informer.py +++ b/neuralforecast/models/informer.py @@ -226,6 +226,9 @@ class Informer(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
*References*
@@ -273,6 +276,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs, ): super(Informer, self).__init__( @@ -299,6 +303,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs, ) diff --git a/neuralforecast/models/itransformer.py b/neuralforecast/models/itransformer.py index 35a56f77..30e33795 100644 --- a/neuralforecast/models/itransformer.py +++ b/neuralforecast/models/itransformer.py @@ -134,6 +134,9 @@ class iTransformer(BaseMultivariate): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
**References**
@@ -175,6 +178,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): @@ -198,6 +202,7 @@ def __init__( random_seed=random_seed, drop_last_loader=drop_last_loader, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/kan.py b/neuralforecast/models/kan.py index db6c0ae0..a9e95012 100644 --- a/neuralforecast/models/kan.py +++ b/neuralforecast/models/kan.py @@ -284,6 +284,9 @@ class KAN(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
**References**
@@ -331,6 +334,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): @@ -359,6 +363,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/lstm.py b/neuralforecast/models/lstm.py index ff901ca5..eaa67115 100644 --- a/neuralforecast/models/lstm.py +++ b/neuralforecast/models/lstm.py @@ -50,6 +50,9 @@ class LSTM(BaseRecurrent): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
""" @@ -87,6 +90,7 @@ def __init__( random_seed=1, drop_last_loader=False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): super(LSTM, self).__init__( @@ -109,6 +113,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/mlp.py b/neuralforecast/models/mlp.py index 48b780b5..c3f5f452 100644 --- a/neuralforecast/models/mlp.py +++ b/neuralforecast/models/mlp.py @@ -49,6 +49,9 @@ class MLP(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
""" @@ -85,6 +88,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): @@ -113,6 +117,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/mlpmultivariate.py b/neuralforecast/models/mlpmultivariate.py index 31225c64..dcb06505 100644 --- a/neuralforecast/models/mlpmultivariate.py +++ b/neuralforecast/models/mlpmultivariate.py @@ -43,6 +43,9 @@ class MLPMultivariate(BaseMultivariate): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
""" @@ -75,6 +78,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): @@ -99,6 +103,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/nbeats.py b/neuralforecast/models/nbeats.py index a2a36822..cd55307a 100644 --- a/neuralforecast/models/nbeats.py +++ b/neuralforecast/models/nbeats.py @@ -228,6 +228,9 @@ class NBEATS(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
**References:**
@@ -270,6 +273,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs, ): @@ -300,6 +304,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs, ) diff --git a/neuralforecast/models/nbeatsx.py b/neuralforecast/models/nbeatsx.py index 0baa0c6c..b08bc06d 100644 --- a/neuralforecast/models/nbeatsx.py +++ b/neuralforecast/models/nbeatsx.py @@ -315,6 +315,9 @@ class NBEATSx(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
**References:**
@@ -361,6 +364,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs, ): # Protect horizon collapsed seasonality and trend NBEATSx-i basis @@ -394,6 +398,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs, ) diff --git a/neuralforecast/models/nhits.py b/neuralforecast/models/nhits.py index c1e56f5b..d28936d5 100644 --- a/neuralforecast/models/nhits.py +++ b/neuralforecast/models/nhits.py @@ -226,6 +226,9 @@ class NHITS(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
**References:**
@@ -274,6 +277,7 @@ def __init__( random_seed: int = 1, drop_last_loader=False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs, ): @@ -302,6 +306,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs, ) diff --git a/neuralforecast/models/nlinear.py b/neuralforecast/models/nlinear.py index 19e17646..bf633fe0 100644 --- a/neuralforecast/models/nlinear.py +++ b/neuralforecast/models/nlinear.py @@ -39,6 +39,9 @@ class NLinear(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
*References*
@@ -76,6 +79,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): super(NLinear, self).__init__( @@ -102,6 +106,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/patchtst.py b/neuralforecast/models/patchtst.py index 314d5620..95a5d7e7 100644 --- a/neuralforecast/models/patchtst.py +++ b/neuralforecast/models/patchtst.py @@ -836,6 +836,9 @@ class PatchTST(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
**References:**
@@ -890,6 +893,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): super(PatchTST, self).__init__( @@ -916,6 +920,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/rmok.py b/neuralforecast/models/rmok.py index 2542b775..97e179d0 100644 --- a/neuralforecast/models/rmok.py +++ b/neuralforecast/models/rmok.py @@ -284,6 +284,9 @@ class RMoK(BaseMultivariate): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
Reference
@@ -322,6 +325,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): @@ -345,6 +349,7 @@ def __init__( random_seed=random_seed, drop_last_loader=drop_last_loader, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/rnn.py b/neuralforecast/models/rnn.py index cb346ed4..8b486512 100644 --- a/neuralforecast/models/rnn.py +++ b/neuralforecast/models/rnn.py @@ -51,7 +51,9 @@ class RNN(BaseRecurrent): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`alias`: str, optional, Custom name of the model.
- + `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
""" @@ -90,6 +92,7 @@ def __init__( random_seed=1, drop_last_loader=False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): super(RNN, self).__init__( @@ -112,6 +115,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/softs.py b/neuralforecast/models/softs.py index 3b1aadd8..1a521974 100644 --- a/neuralforecast/models/softs.py +++ b/neuralforecast/models/softs.py @@ -109,6 +109,9 @@ class SOFTS(BaseMultivariate): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
**References**
@@ -148,6 +151,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): @@ -171,6 +175,7 @@ def __init__( random_seed=random_seed, drop_last_loader=drop_last_loader, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/stemgnn.py b/neuralforecast/models/stemgnn.py index 3d575acc..f3513e8f 100644 --- a/neuralforecast/models/stemgnn.py +++ b/neuralforecast/models/stemgnn.py @@ -169,6 +169,9 @@ class StemGNN(BaseMultivariate): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
""" @@ -203,6 +206,7 @@ def __init__( random_seed: int = 1, drop_last_loader=False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): @@ -227,6 +231,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) # Quick fix for now, fix the model later. diff --git a/neuralforecast/models/tcn.py b/neuralforecast/models/tcn.py index 79434c32..a63f1d38 100644 --- a/neuralforecast/models/tcn.py +++ b/neuralforecast/models/tcn.py @@ -47,6 +47,9 @@ class TCN(BaseRecurrent): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
""" @@ -84,6 +87,7 @@ def __init__( random_seed: int = 1, drop_last_loader=False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): super(TCN, self).__init__( @@ -106,6 +110,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/tft.py b/neuralforecast/models/tft.py index eaef3802..ba27b59e 100644 --- a/neuralforecast/models/tft.py +++ b/neuralforecast/models/tft.py @@ -457,6 +457,9 @@ class TFT(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
**References:**
@@ -500,6 +503,7 @@ def __init__( drop_last_loader=False, random_seed: int = 1, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs, ): @@ -527,6 +531,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs, ) self.example_length = input_size + h diff --git a/neuralforecast/models/tide.py b/neuralforecast/models/tide.py index d331d108..7a8aadaa 100644 --- a/neuralforecast/models/tide.py +++ b/neuralforecast/models/tide.py @@ -81,6 +81,9 @@ class TiDE(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
**References:**
@@ -127,6 +130,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): @@ -155,6 +159,7 @@ def __init__( random_seed=random_seed, drop_last_loader=drop_last_loader, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) self.h = h diff --git a/neuralforecast/models/timellm.py b/neuralforecast/models/timellm.py index aa8d7f07..dbf0869b 100644 --- a/neuralforecast/models/timellm.py +++ b/neuralforecast/models/timellm.py @@ -214,6 +214,9 @@ class TimeLLM(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
**References:**
@@ -267,6 +270,7 @@ def __init__( drop_last_loader: bool = False, random_seed: int = 1, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs, ): super(TimeLLM, self).__init__( @@ -292,6 +296,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs, ) diff --git a/neuralforecast/models/timemixer.py b/neuralforecast/models/timemixer.py index 0d5ea5a3..da8af5ab 100644 --- a/neuralforecast/models/timemixer.py +++ b/neuralforecast/models/timemixer.py @@ -285,6 +285,9 @@ class TimeMixer(BaseMultivariate): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
**References**
@@ -331,6 +334,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs, ): @@ -354,6 +358,7 @@ def __init__( random_seed=random_seed, drop_last_loader=drop_last_loader, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs, ) diff --git a/neuralforecast/models/timesnet.py b/neuralforecast/models/timesnet.py index d854ae52..031fd699 100644 --- a/neuralforecast/models/timesnet.py +++ b/neuralforecast/models/timesnet.py @@ -182,6 +182,9 @@ class TimesNet(BaseWindows): If True `TimeSeriesDataLoader` drops last non-full batch. `dataloader_kwargs`: dict, optional (default=None) List of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
**trainer_kwargs Keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer) @@ -227,6 +230,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): super(TimesNet, self).__init__( @@ -253,6 +257,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/tsmixer.py b/neuralforecast/models/tsmixer.py index b41b4181..35542615 100644 --- a/neuralforecast/models/tsmixer.py +++ b/neuralforecast/models/tsmixer.py @@ -160,6 +160,9 @@ class TSMixer(BaseMultivariate): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
**References:**
@@ -198,6 +201,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): @@ -222,6 +226,7 @@ def __init__( random_seed=random_seed, drop_last_loader=drop_last_loader, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/tsmixerx.py b/neuralforecast/models/tsmixerx.py index 8f5101a7..ef025fd6 100644 --- a/neuralforecast/models/tsmixerx.py +++ b/neuralforecast/models/tsmixerx.py @@ -188,6 +188,9 @@ class TSMixerx(BaseMultivariate): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
**References:**
@@ -226,6 +229,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): @@ -250,6 +254,7 @@ def __init__( random_seed=random_seed, drop_last_loader=drop_last_loader, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) # Reversible InstanceNormalization layer diff --git a/neuralforecast/models/vanillatransformer.py b/neuralforecast/models/vanillatransformer.py index 1c4645c2..86177a4f 100644 --- a/neuralforecast/models/vanillatransformer.py +++ b/neuralforecast/models/vanillatransformer.py @@ -117,6 +117,9 @@ class VanillaTransformer(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
*References*
@@ -161,6 +164,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs, ): super(VanillaTransformer, self).__init__( @@ -186,6 +190,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs, )