Skip to content

Commit

Permalink
change to verbose, and add support for tf1
Browse files Browse the repository at this point in the history
Signed-off-by: GiulioZizzo <[email protected]>
  • Loading branch information
GiulioZizzo committed Dec 14, 2023
1 parent 6c1bc43 commit 39ab9cd
Show file tree
Hide file tree
Showing 3 changed files with 39 additions and 37 deletions.
30 changes: 16 additions & 14 deletions art/estimators/classification/pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -375,6 +375,7 @@ def fit( # pylint: disable=W0221
training_mode: bool = True,
drop_last: bool = False,
scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None,
verbose: bool = False,
**kwargs,
) -> None:
"""
Expand All @@ -390,14 +391,13 @@ def fit( # pylint: disable=W0221
the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then
the last batch will be smaller. (default: ``False``)
:param scheduler: Learning rate scheduler to run at the start of every epoch.
:param kwargs: Dictionary of framework-specific arguments. Currently supports "display_progress_bar" to
display training progress.
:param verbose: If to display the progress bar information.
:param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch
and providing it takes no effect.
"""
import torch
from torch.utils.data import TensorDataset, DataLoader

display_progress_bar = kwargs.get("display_progress_bar", False)

# Set model mode
self._model.train(mode=training_mode)

Expand All @@ -419,8 +419,8 @@ def fit( # pylint: disable=W0221
dataloader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, drop_last=drop_last)

# Start training
for _ in tqdm(range(nb_epochs), disable=not display_progress_bar, desc="Epochs"):
for x_batch, y_batch in tqdm(dataloader, disable=not display_progress_bar, desc="Batches"):
for _ in tqdm(range(nb_epochs), disable=not verbose, desc="Epochs"):
for x_batch, y_batch in tqdm(dataloader, disable=not verbose, desc="Batches"):
# Move inputs to device
x_batch = x_batch.to(self._device)
y_batch = y_batch.to(self._device)
Expand Down Expand Up @@ -456,20 +456,19 @@ def fit( # pylint: disable=W0221
if scheduler is not None:
scheduler.step()

def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwargs) -> None:
def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, verbose: bool = False, **kwargs) -> None:
"""
Fit the classifier using the generator that yields batches as specified.
:param generator: Batch generator providing `(x, y)` for each epoch.
:param nb_epochs: Number of epochs to use for training.
:param kwargs: Dictionary of framework-specific arguments. Currently supports "display_progress_bar" to
display training progress.
:param verbose: If to display the progress bar information.
:param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch
and providing it takes no effect.
"""
import torch
from art.data_generators import PyTorchDataGenerator

display_progress_bar = kwargs.get("display_progress_bar", False)

# Put the model in the training mode
self._model.train()

Expand All @@ -490,8 +489,8 @@ def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwarg
== (0, 1)
)
):
for _ in tqdm(range(nb_epochs), disable=not display_progress_bar, desc="Epochs"):
for i_batch, o_batch in tqdm(generator.iterator, disable=not display_progress_bar, desc="Batches"):
for _ in tqdm(range(nb_epochs), disable=not verbose, desc="Epochs"):
for i_batch, o_batch in tqdm(generator.iterator, disable=not verbose, desc="Batches"):
if isinstance(i_batch, np.ndarray):
i_batch = torch.from_numpy(i_batch).to(self._device)
else:
Expand All @@ -500,7 +499,10 @@ def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwarg
if isinstance(o_batch, np.ndarray):
o_batch = torch.argmax(torch.from_numpy(o_batch).to(self._device), dim=1)
else:
o_batch = torch.argmax(o_batch.to(self._device), dim=1)
if o_batch.dim() > 1:
o_batch = torch.argmax(o_batch.to(self._device), dim=1)
else:
o_batch = o_batch.to(self._device)

# Zero the parameter gradients
self._optimizer.zero_grad()
Expand Down
40 changes: 22 additions & 18 deletions art/estimators/classification/tensorflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,9 @@ def predict( # pylint: disable=W0221

return predictions

def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: int = 10, **kwargs) -> None:
def fit(
self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: int = 10, verbose: bool = False, **kwargs
) -> None:
"""
Fit the classifier on the training set `(x, y)`.
Expand All @@ -275,6 +277,7 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in
shape (nb_samples,).
:param batch_size: Size of batches.
:param nb_epochs: Number of epochs to use for training.
:param verbose: If to display the progress bar information.
:param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for
TensorFlow and providing it takes no effect.
"""
Expand All @@ -298,12 +301,12 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in
ind = np.arange(len(x_preprocessed)).tolist()

# Start training
for _ in range(nb_epochs):
for _ in tqdm(range(nb_epochs), disable=not verbose, desc="Epochs"):
# Shuffle the examples
random.shuffle(ind)

# Train for one epoch
for m in range(num_batch):
for m in tqdm(range(num_batch), disable=not verbose, desc="Batches"):
i_batch = x_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]
o_batch = y_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]

Expand All @@ -314,13 +317,14 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in
# Run train step
self._sess.run(self.train, feed_dict=feed_dict)

def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwargs) -> None:
def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, verbose: bool = False, **kwargs) -> None:
"""
Fit the classifier using the generator that yields batches as specified.
:param generator: Batch generator providing `(x, y)` for each epoch. If the generator can be used for native
training in TensorFlow, it will.
:param nb_epochs: Number of epochs to use for training.
:param verbose: If to display the progress bar information.
:param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for
TensorFlow and providing it takes no effect.
"""
Expand All @@ -343,8 +347,8 @@ def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwarg
== (0, 1)
)
):
for _ in range(nb_epochs):
for _ in range(int(generator.size / generator.batch_size)): # type: ignore
for _ in tqdm(range(nb_epochs), disable=not verbose, desc="Epochs"):
for _ in tqdm(range(int(generator.size / generator.batch_size)), disable=not verbose, desc="Batches"): # type: ignore
i_batch, o_batch = generator.get_batch()

if self._reduce_labels:
Expand Down Expand Up @@ -953,7 +957,9 @@ def _predict_framework(self, x: "tf.Tensor", training_mode: bool = False) -> "tf

return self._model(x_preprocessed, training=training_mode)

def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: int = 10, **kwargs) -> None:
def fit(
self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: int = 10, verbose: bool = False, **kwargs
) -> None:
"""
Fit the classifier on the training set `(x, y)`.
Expand All @@ -962,14 +968,13 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in
shape (nb_samples,).
:param batch_size: Size of batches.
:param nb_epochs: Number of epochs to use for training.
:param verbose: If to display progress bar information.
:param kwargs: Dictionary of framework-specific arguments. This parameter currently supports
"scheduler" which is an optional function that will be called at the end of every
epoch to adjust the learning rate, and "display_progress_bar" to display training progress.
epoch to adjust the learning rate.
"""
import tensorflow as tf

display_progress_bar = kwargs.get("display_progress_bar", False)

if self._train_step is None: # pragma: no cover
if self._loss_object is None: # pragma: no cover
raise TypeError(
Expand Down Expand Up @@ -1006,29 +1011,28 @@ def train_step(model, images, labels):

train_ds = tf.data.Dataset.from_tensor_slices((x_preprocessed, y_preprocessed)).shuffle(10000).batch(batch_size)

for epoch in tqdm(range(nb_epochs), disable=not display_progress_bar, desc="Epochs"):
for images, labels in tqdm(train_ds, disable=not display_progress_bar, desc="Batches"):
for epoch in tqdm(range(nb_epochs), disable=not verbose, desc="Epochs"):
for images, labels in tqdm(train_ds, disable=not verbose, desc="Batches"):
train_step(self.model, images, labels)

if scheduler is not None:
scheduler(epoch)

def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwargs) -> None:
def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, verbose: bool = False, **kwargs) -> None:
"""
Fit the classifier using the generator that yields batches as specified.
:param generator: Batch generator providing `(x, y)` for each epoch. If the generator can be used for native
training in TensorFlow, it will.
:param nb_epochs: Number of epochs to use for training.
:param verbose: If to display progress bar information
:param kwargs: Dictionary of framework-specific arguments. This parameter currently supports
"scheduler" which is an optional function that will be called at the end of every
epoch to adjust the learning rate, and "display_progress_bar" to display training progress.
epoch to adjust the learning rate.
"""
import tensorflow as tf
from art.data_generators import TensorFlowV2DataGenerator

display_progress_bar = kwargs.get("display_progress_bar", False)

if self._train_step is None: # pragma: no cover
if self._loss_object is None: # pragma: no cover
raise TypeError(
Expand Down Expand Up @@ -1068,8 +1072,8 @@ def train_step(model, images, labels):
== (0, 1)
)
):
for epoch in tqdm(range(nb_epochs), disable=not display_progress_bar, desc="Epochs"):
for i_batch, o_batch in tqdm(generator.iterator, disable=not display_progress_bar, desc="Batches"):
for epoch in tqdm(range(nb_epochs), disable=not verbose, desc="Epochs"):
for i_batch, o_batch in tqdm(generator.iterator, disable=not verbose, desc="Batches"):
if self._reduce_labels:
o_batch = tf.math.argmax(o_batch, axis=1)
train_step(self._model, i_batch, o_batch)
Expand Down
6 changes: 1 addition & 5 deletions tests/estimators/classification/test_deeplearning_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,11 +202,7 @@ def get_lr(_):
# Test a valid callback
classifier, _ = image_dl_estimator(from_logits=True)

# Keras fit has its own kwarg arguments
if framework in ["kerastf", "keras"]:
kwargs = {"callbacks": [LearningRateScheduler(get_lr)]}
else:
kwargs = {"callbacks": [LearningRateScheduler(get_lr)], "display_progress_bar": True}
kwargs = {"callbacks": [LearningRateScheduler(get_lr)], "verbose": True}
classifier.fit(x_train_mnist, y_train_mnist, batch_size=default_batch_size, nb_epochs=1, **kwargs)

# Test failure for invalid parameters: does not apply to many frameworks which allow arbitrary kwargs
Expand Down

0 comments on commit 39ab9cd

Please sign in to comment.