diff --git a/art/estimators/certification/derandomized_smoothing/pytorch.py b/art/estimators/certification/derandomized_smoothing/pytorch.py index 52b38d604d..f47a7cd145 100644 --- a/art/estimators/certification/derandomized_smoothing/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/pytorch.py @@ -438,7 +438,7 @@ def fit( # pylint: disable=W0221 training_mode: bool = True, drop_last: bool = False, scheduler: Optional[Any] = None, - verbose: Optional[Union[bool, int]] = None, + verbose: bool = False, update_batchnorm: bool = True, batchnorm_update_epochs: int = 1, transform: Optional["torchvision.transforms.transforms.Compose"] = None, @@ -469,8 +469,6 @@ def fit( # pylint: disable=W0221 """ import torch - display_pb = self.process_verbose(verbose) - # Set model mode self._model.train(mode=training_mode) @@ -501,7 +499,7 @@ def fit( # pylint: disable=W0221 epoch_loss = [] epoch_batch_sizes = [] - pbar = tqdm(range(num_batch), disable=not display_pb) + pbar = tqdm(range(num_batch), disable=not verbose) # Train for one epoch for m in pbar: @@ -547,7 +545,7 @@ def fit( # pylint: disable=W0221 epoch_loss.append(loss.cpu().detach().numpy()) epoch_batch_sizes.append(len(i_batch)) - if display_pb: + if verbose: pbar.set_description( f"Loss {np.average(epoch_loss, weights=epoch_batch_sizes):.3f} " f"Acc {np.average(epoch_acc, weights=epoch_batch_sizes):.3f} " diff --git a/art/estimators/certification/derandomized_smoothing/tensorflow.py b/art/estimators/certification/derandomized_smoothing/tensorflow.py index 4261443a78..e99154198b 100644 --- a/art/estimators/certification/derandomized_smoothing/tensorflow.py +++ b/art/estimators/certification/derandomized_smoothing/tensorflow.py @@ -160,7 +160,7 @@ def fit( # pylint: disable=W0221 y: np.ndarray, batch_size: int = 128, nb_epochs: int = 10, - verbose: Optional[Union[bool, int]] = None, + verbose: bool = False, **kwargs, ) -> None: """ @@ -171,15 +171,13 @@ def fit( # pylint: disable=W0221 shape (nb_samples,). :param batch_size: Size of batches. :param nb_epochs: Number of epochs to use for training. - :param verbose: If to display training progress bars + :param verbose: Display training progress bar. :param kwargs: Dictionary of framework-specific arguments. This parameter currently only supports "scheduler" which is an optional function that will be called at the end of every epoch to adjust the learning rate. """ import tensorflow as tf - display_pb = self.process_verbose(verbose) - if self._train_step is None: # pragma: no cover if self._loss_object is None: # pragma: no cover raise TypeError( @@ -222,7 +220,7 @@ def train_step(model, images, labels): epoch_loss = [] epoch_batch_sizes = [] - pbar = tqdm(range(num_batch), disable=not display_pb) + pbar = tqdm(range(num_batch), disable=not verbose) ind = np.arange(len(x_preprocessed)) for m in pbar: @@ -239,7 +237,7 @@ def train_step(model, images, labels): else: train_step(self.model, images, labels) - if display_pb: + if verbose: if self._train_step is None: pbar.set_description( f"Loss {np.average(epoch_loss, weights=epoch_batch_sizes):.3f} " diff --git a/art/estimators/certification/randomized_smoothing/macer/pytorch.py b/art/estimators/certification/randomized_smoothing/macer/pytorch.py index cde56c252a..4bc13b1be5 100644 --- a/art/estimators/certification/randomized_smoothing/macer/pytorch.py +++ b/art/estimators/certification/randomized_smoothing/macer/pytorch.py @@ -138,7 +138,7 @@ def fit( # pylint: disable=W0221 training_mode: bool = True, drop_last: bool = False, scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None, - verbose: Optional[Union[bool, int]] = None, + verbose: bool = False, **kwargs, ) -> None: """ @@ -154,8 +154,7 @@ def fit( # pylint: disable=W0221 the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then the last batch will be smaller. (default: ``False``) :param scheduler: Learning rate scheduler to run at the start of every epoch. - :param verbose: (Optional) Display the progress bar, if not supplied will revert to the verbose level when - class was initialised. + :param verbose: Display the training progress bar. :param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch and providing it takes no effect. """ @@ -163,8 +162,6 @@ class was initialised. import torch.nn.functional as F from torch.utils.data import TensorDataset, DataLoader - display_pb = self.process_verbose(verbose) - # Set model mode self._model.train(mode=training_mode) @@ -190,7 +187,7 @@ class was initialised. ) # Start training - for _ in trange(nb_epochs, disable=not display_pb): + for _ in trange(nb_epochs, disable=not verbose): for x_batch, y_batch in dataloader: # Move inputs to GPU x_batch = x_batch.to(self.device) diff --git a/art/estimators/certification/randomized_smoothing/macer/tensorflow.py b/art/estimators/certification/randomized_smoothing/macer/tensorflow.py index 5c88011f8d..860921507c 100644 --- a/art/estimators/certification/randomized_smoothing/macer/tensorflow.py +++ b/art/estimators/certification/randomized_smoothing/macer/tensorflow.py @@ -138,7 +138,7 @@ def fit( y: np.ndarray, batch_size: int = 128, nb_epochs: int = 10, - verbose: Optional[Union[bool, int]] = None, + verbose: bool = False, **kwargs ) -> None: """ @@ -149,16 +149,13 @@ def fit( shape (nb_samples,). :param batch_size: Size of batches. :param nb_epochs: Number of epochs to use for training. - :param verbose: (Optional) Display the progress bar, if not supplied will revert to the verbose level when - class was initialised. + :param verbose: Display the training progress bar. :param kwargs: Dictionary of framework-specific arguments. This parameter currently only supports "scheduler" which is an optional function that will be called at the end of every epoch to adjust the learning rate. """ import tensorflow as tf - display_pb = self.process_verbose(verbose) - if self._train_step is None: # pragma: no cover if self._optimizer is None: # pragma: no cover raise ValueError( @@ -225,7 +222,7 @@ def train_step(model, images, labels): train_ds = tf.data.Dataset.from_tensor_slices((x_preprocessed, y_preprocessed)).shuffle(10000).batch(batch_size) - for epoch in trange(nb_epochs, disable=not display_pb): + for epoch in trange(nb_epochs, disable=not verbose): for images, labels in train_ds: # Tile samples for Gaussian augmentation input_size = len(images) diff --git a/art/estimators/certification/randomized_smoothing/pytorch.py b/art/estimators/certification/randomized_smoothing/pytorch.py index eff16f7c90..fddc7d0938 100644 --- a/art/estimators/certification/randomized_smoothing/pytorch.py +++ b/art/estimators/certification/randomized_smoothing/pytorch.py @@ -140,7 +140,7 @@ def fit( # pylint: disable=W0221 training_mode: bool = True, drop_last: bool = False, scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None, - verbose: Optional[Union[bool, int]] = None, + verbose: bool = False, **kwargs, ) -> None: """ @@ -156,16 +156,13 @@ def fit( # pylint: disable=W0221 the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then the last batch will be smaller. (default: ``False``) :param scheduler: Learning rate scheduler to run at the start of every epoch. - :param verbose: (Optional) Display the progress bar, if not supplied will revert to the verbose level when - class was initialised. + :param verbose: Display the training progress bar. :param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch and providing it takes no effect. """ import torch from torch.utils.data import TensorDataset, DataLoader - display_pb = self.process_verbose(verbose) - # Set model mode self._model.train(mode=training_mode) @@ -187,7 +184,7 @@ class was initialised. dataloader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, drop_last=drop_last) # Start training - for _ in trange(nb_epochs, disable=not display_pb): + for _ in trange(nb_epochs, disable=not verbose): for x_batch, y_batch in dataloader: # Move inputs to device x_batch = x_batch.to(self._device) diff --git a/art/estimators/certification/randomized_smoothing/smooth_adv/pytorch.py b/art/estimators/certification/randomized_smoothing/smooth_adv/pytorch.py index b80d5888b2..a0ac0a1742 100644 --- a/art/estimators/certification/randomized_smoothing/smooth_adv/pytorch.py +++ b/art/estimators/certification/randomized_smoothing/smooth_adv/pytorch.py @@ -155,7 +155,7 @@ def fit( # pylint: disable=W0221 training_mode: bool = True, drop_last: bool = False, scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None, - verbose: Optional[Union[bool, int]] = None, + verbose: bool = False, **kwargs, ) -> None: """ @@ -171,16 +171,13 @@ def fit( # pylint: disable=W0221 the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then the last batch will be smaller. (default: ``False``) :param scheduler: Learning rate scheduler to run at the start of every epoch. - :param verbose: (Optional) Display the progress bar, if not supplied will revert to the verbose level when - class was initialised. + :param verbose: Display the training progress bar. :param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch and providing it takes no effect. """ import torch from torch.utils.data import TensorDataset, DataLoader - display_pb = self.process_verbose(verbose) - # Set model mode self._model.train(mode=training_mode) @@ -202,7 +199,7 @@ class was initialised. dataloader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, drop_last=drop_last) # Start training - for epoch in trange(nb_epochs, disable=not display_pb): + for epoch in trange(nb_epochs, disable=not verbose): self.attack.norm = min(self.epsilon, (epoch + 1) * self.epsilon / self.warmup) for x_batch, y_batch in dataloader: diff --git a/art/estimators/certification/randomized_smoothing/smooth_adv/tensorflow.py b/art/estimators/certification/randomized_smoothing/smooth_adv/tensorflow.py index 938db2a5c3..0d8b960de0 100644 --- a/art/estimators/certification/randomized_smoothing/smooth_adv/tensorflow.py +++ b/art/estimators/certification/randomized_smoothing/smooth_adv/tensorflow.py @@ -155,7 +155,7 @@ def fit( y: np.ndarray, batch_size: int = 128, nb_epochs: int = 10, - verbose: Optional[Union[bool, int]] = None, + verbose: bool = False, **kwargs ) -> None: """ @@ -166,16 +166,13 @@ def fit( shape (nb_samples,). :param batch_size: Size of batches. :param nb_epochs: Number of epochs to use for training. - :param verbose: (Optional) Display the progress bar, if not supplied will revert to the verbose level when - class was initialised. + :param verbose: Display the training progress bar. :param kwargs: Dictionary of framework-specific arguments. This parameter currently only supports "scheduler" which is an optional function that will be called at the end of every epoch to adjust the learning rate. """ import tensorflow as tf - display_pb = self.process_verbose(verbose) - if self._train_step is None: # pragma: no cover if self._loss_object is None: # pragma: no cover raise TypeError( @@ -212,7 +209,7 @@ def train_step(model, images, labels): train_ds = tf.data.Dataset.from_tensor_slices((x_preprocessed, y_preprocessed)).shuffle(10000).batch(batch_size) - for epoch in trange(nb_epochs, disable=not display_pb): + for epoch in trange(nb_epochs, disable=not verbose): self.attack.norm = min(self.epsilon, (epoch + 1) * self.epsilon / self.warmup) for x_batch, y_batch in train_ds: diff --git a/art/estimators/certification/randomized_smoothing/smooth_mix/pytorch.py b/art/estimators/certification/randomized_smoothing/smooth_mix/pytorch.py index 9bf3b848e4..9a84470edc 100644 --- a/art/estimators/certification/randomized_smoothing/smooth_mix/pytorch.py +++ b/art/estimators/certification/randomized_smoothing/smooth_mix/pytorch.py @@ -172,7 +172,7 @@ def fit( # pylint: disable=W0221 training_mode: bool = True, drop_last: bool = False, scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None, - verbose: Optional[Union[bool, int]] = None, + verbose: bool = False, **kwargs, ) -> None: """ @@ -188,8 +188,7 @@ def fit( # pylint: disable=W0221 the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then the last batch will be smaller. (default: ``False``) :param scheduler: Learning rate scheduler to run at the start of every epoch. - :param verbose: (Optional) Display the progress bar, if not supplied will revert to the verbose level when - class was initialised. + :param verbose: Display the training progress bar. :param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch and providing it takes no effect. """ @@ -197,8 +196,6 @@ class was initialised. import torch.nn.functional as F from torch.utils.data import TensorDataset, DataLoader - display_pb = self.process_verbose(verbose) - # Set model mode self._model.train(mode=training_mode) @@ -220,7 +217,7 @@ class was initialised. dataloader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, drop_last=drop_last) # Start training - for epoch in trange(nb_epochs, disable=not display_pb): + for epoch in trange(nb_epochs, disable=not verbose): warmup_v = min(1.0, (epoch + 1) / self.warmup) for x_batch, y_batch in dataloader: diff --git a/art/estimators/certification/randomized_smoothing/tensorflow.py b/art/estimators/certification/randomized_smoothing/tensorflow.py index 74c1c875da..6c6949a770 100644 --- a/art/estimators/certification/randomized_smoothing/tensorflow.py +++ b/art/estimators/certification/randomized_smoothing/tensorflow.py @@ -137,7 +137,7 @@ def fit( # pylint: disable=W0221 y: np.ndarray, batch_size: int = 128, nb_epochs: int = 10, - verbose: Optional[Union[bool, int]] = None, + verbose: bool = False, **kwargs ) -> None: """ @@ -148,16 +148,13 @@ def fit( # pylint: disable=W0221 shape (nb_samples,). :param batch_size: Size of batches. :param nb_epochs: Number of epochs to use for training. - :param verbose: (Optional) Display the progress bar, if not supplied will revert to the verbose level when - class was initialised. + :param verbose: Display the training progress bar. :param kwargs: Dictionary of framework-specific arguments. This parameter currently only supports "scheduler" which is an optional function that will be called at the end of every epoch to adjust the learning rate. """ import tensorflow as tf - display_pb = self.process_verbose(verbose) - if self._train_step is None: # pragma: no cover if self._loss_object is None: # pragma: no cover raise TypeError( @@ -194,7 +191,7 @@ def train_step(model, images, labels): train_ds = tf.data.Dataset.from_tensor_slices((x_preprocessed, y_preprocessed)).shuffle(10000).batch(batch_size) - for epoch in trange(nb_epochs, disable=not display_pb): + for epoch in trange(nb_epochs, disable=not verbose): for images, labels in train_ds: # Add random noise for randomized smoothing images += tf.random.normal(shape=images.shape, mean=0.0, stddev=self.scale) diff --git a/art/estimators/classification/pytorch.py b/art/estimators/classification/pytorch.py index 41a3408eb6..a9fe17ab89 100644 --- a/art/estimators/classification/pytorch.py +++ b/art/estimators/classification/pytorch.py @@ -366,35 +366,6 @@ def _predict_framework( return output, y_preprocessed - def process_verbose(self, verbose: Optional[Union[bool, int]] = None) -> bool: - """ - Function to unify the various ways implemented in ART of displaying progress bars - into a single True/False output. - - :param verbose: If to display the progress bar information in one of a few possible formats. - :return: True/False if to display the progress bars. - """ - - if verbose is not None: - if isinstance(verbose, int): - if verbose <= 0: - display_pb = False - else: - display_pb = True - elif isinstance(verbose, bool): - display_pb = verbose - else: - raise ValueError("Verbose should be True/False or an int") - else: - # Check if the verbose attribute is present in the current classifier - if hasattr(self, "verbose"): - display_pb = self.verbose # type: ignore - # else default to False - else: - display_pb = False - - return display_pb - def fit( # pylint: disable=W0221 self, x: np.ndarray, @@ -404,7 +375,7 @@ def fit( # pylint: disable=W0221 training_mode: bool = True, drop_last: bool = False, scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None, - verbose: Optional[Union[bool, int]] = None, + verbose: bool = False, **kwargs, ) -> None: """ @@ -427,8 +398,6 @@ def fit( # pylint: disable=W0221 import torch from torch.utils.data import TensorDataset, DataLoader - display_pb = self.process_verbose(verbose) - # Set model mode self._model.train(mode=training_mode) @@ -450,8 +419,8 @@ def fit( # pylint: disable=W0221 dataloader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, drop_last=drop_last) # Start training - for _ in tqdm(range(nb_epochs), disable=not display_pb, desc="Epochs"): - for x_batch, y_batch in tqdm(dataloader, disable=not display_pb, desc="Batches"): + for _ in tqdm(range(nb_epochs), disable=not verbose, desc="Epochs"): + for x_batch, y_batch in dataloader: # Move inputs to device x_batch = x_batch.to(self._device) y_batch = y_batch.to(self._device) @@ -488,22 +457,20 @@ def fit( # pylint: disable=W0221 scheduler.step() def fit_generator( # pylint: disable=W0221 - self, generator: "DataGenerator", nb_epochs: int = 20, verbose: Optional[Union[bool, int]] = None, **kwargs + self, generator: "DataGenerator", nb_epochs: int = 20, verbose: bool = False, **kwargs ) -> None: """ Fit the classifier using the generator that yields batches as specified. :param generator: Batch generator providing `(x, y)` for each epoch. :param nb_epochs: Number of epochs to use for training. - :param verbose: If to display the progress bar information. + :param verbose: Display the training progress bar. :param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch and providing it takes no effect. """ import torch from art.data_generators import PyTorchDataGenerator - display_pb = self.process_verbose(verbose) - # Put the model in the training mode self._model.train() @@ -524,8 +491,8 @@ def fit_generator( # pylint: disable=W0221 == (0, 1) ) ): - for _ in tqdm(range(nb_epochs), disable=not display_pb, desc="Epochs"): - for i_batch, o_batch in tqdm(generator.iterator, disable=not display_pb, desc="Batches"): + for _ in tqdm(range(nb_epochs), disable=not verbose, desc="Epochs"): + for i_batch, o_batch in generator.iterator: if isinstance(i_batch, np.ndarray): i_batch = torch.from_numpy(i_batch).to(self._device) else: @@ -534,10 +501,7 @@ def fit_generator( # pylint: disable=W0221 if isinstance(o_batch, np.ndarray): o_batch = torch.argmax(torch.from_numpy(o_batch).to(self._device), dim=1) else: - if o_batch.dim() > 1: - o_batch = torch.argmax(o_batch.to(self._device), dim=1) - else: - o_batch = o_batch.to(self._device) + o_batch = torch.argmax(o_batch.to(self._device), dim=1) # Zero the parameter gradients self._optimizer.zero_grad() diff --git a/art/estimators/classification/tensorflow.py b/art/estimators/classification/tensorflow.py index 963f64697c..6ead0ec234 100644 --- a/art/estimators/classification/tensorflow.py +++ b/art/estimators/classification/tensorflow.py @@ -266,41 +266,13 @@ def predict( # pylint: disable=W0221 return predictions - def process_verbose(self, verbose: Optional[Union[bool, int]] = None) -> bool: - """ - Function to unify the various ways implemented in ART of displaying progress bars - into a single True/False output. - :param verbose: If to display the progress bar information in one of a few possible formats. - :return: True/False if to display the progress bars. - """ - - if verbose is not None: - if isinstance(verbose, int): - if verbose == 0: - display_pb = False - else: - display_pb = True - elif isinstance(verbose, bool): - display_pb = verbose - else: - raise ValueError("Verbose should be True/False or a 0/1 int") - else: - # Check if the verbose attribute is present in the current classifier - if hasattr(self, "verbose"): - display_pb = self.verbose # type: ignore - # else default to False - else: - display_pb = False - - return display_pb - def fit( # pylint: disable=W0221 self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: int = 10, - verbose: Optional[Union[bool, int]] = None, + verbose: bool = False, **kwargs, ) -> None: """ @@ -311,15 +283,13 @@ def fit( # pylint: disable=W0221 shape (nb_samples,). :param batch_size: Size of batches. :param nb_epochs: Number of epochs to use for training. - :param verbose: If to display the progress bar information. + :param verbose: Display the training progress bar. :param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for TensorFlow and providing it takes no effect. """ if self.learning is not None: self.feed_dict[self.learning] = True - display_pb = self.process_verbose(verbose) - # Check if train and output_ph available if self.train is None or self.labels_ph is None: # pragma: no cover raise ValueError("Need the training objective and the output placeholder to train the model.") @@ -337,12 +307,12 @@ def fit( # pylint: disable=W0221 ind = np.arange(len(x_preprocessed)).tolist() # Start training - for _ in tqdm(range(nb_epochs), disable=not display_pb, desc="Epochs"): + for _ in tqdm(range(nb_epochs), disable=not verbose, desc="Epochs"): # Shuffle the examples random.shuffle(ind) # Train for one epoch - for m in tqdm(range(num_batch), disable=not display_pb, desc="Batches"): + for m in range(num_batch): i_batch = x_preprocessed[ind[m * batch_size : (m + 1) * batch_size]] o_batch = y_preprocessed[ind[m * batch_size : (m + 1) * batch_size]] @@ -354,7 +324,7 @@ def fit( # pylint: disable=W0221 self._sess.run(self.train, feed_dict=feed_dict) def fit_generator( # pylint: disable=W0221 - self, generator: "DataGenerator", nb_epochs: int = 20, verbose: Optional[Union[bool, int]] = None, **kwargs + self, generator: "DataGenerator", nb_epochs: int = 20, verbose: bool = False, **kwargs ) -> None: """ Fit the classifier using the generator that yields batches as specified. @@ -362,14 +332,12 @@ def fit_generator( # pylint: disable=W0221 :param generator: Batch generator providing `(x, y)` for each epoch. If the generator can be used for native training in TensorFlow, it will. :param nb_epochs: Number of epochs to use for training. - :param verbose: If to display the progress bar information. + :param verbose: Display the training progress bar. :param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for TensorFlow and providing it takes no effect. """ from art.data_generators import TensorFlowDataGenerator - display_pb = self.process_verbose(verbose) - if self.learning is not None: self.feed_dict[self.learning] = True @@ -387,14 +355,14 @@ def fit_generator( # pylint: disable=W0221 == (0, 1) ) ): - for _ in tqdm(range(nb_epochs), disable=not display_pb, desc="Epochs"): + for _ in tqdm(range(nb_epochs), disable=not verbose, desc="Epochs"): gen_size = generator.size if isinstance(gen_size, int): num_batchcs = int(gen_size / generator.batch_size) else: raise ValueError("Number of batches could not be determined from the generator") - for _ in tqdm(range(num_batchcs), disable=not display_pb, desc="Batches"): + for _ in range(num_batches): i_batch, o_batch = generator.get_batch() if self._reduce_labels: @@ -1003,42 +971,13 @@ def _predict_framework(self, x: "tf.Tensor", training_mode: bool = False) -> "tf return self._model(x_preprocessed, training=training_mode) - def process_verbose(self, verbose: Optional[Union[bool, int]] = None) -> bool: - """ - Function to unify the various ways implemented in ART of displaying progress bars - into a single True/False output. - - :param verbose: If to display the progress bar information in one of a few possible formats. - :return: True/False if to display the progress bars. - """ - - if verbose is not None: - if isinstance(verbose, int): - if verbose <= 0: - display_pb = False - else: - display_pb = True - elif isinstance(verbose, bool): - display_pb = verbose - else: - raise ValueError("Verbose should be True/False or a 0/1 int") - else: - # Check if the verbose attribute is present in the current classifier - if hasattr(self, "verbose"): - display_pb = self.verbose # type: ignore - # else default to False - else: - display_pb = False - - return display_pb - def fit( # pylint: disable=W0221 self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: int = 10, - verbose: Optional[Union[bool, int]] = None, + verbose: bool = False, **kwargs, ) -> None: """ @@ -1049,15 +988,13 @@ def fit( # pylint: disable=W0221 shape (nb_samples,). :param batch_size: Size of batches. :param nb_epochs: Number of epochs to use for training. - :param verbose: If to display progress bar information. + :param verbose: Display training progress bar. :param kwargs: Dictionary of framework-specific arguments. This parameter currently supports "scheduler" which is an optional function that will be called at the end of every epoch to adjust the learning rate. """ import tensorflow as tf - display_pb = self.process_verbose(verbose) - if self._train_step is None: # pragma: no cover if self._loss_object is None: # pragma: no cover raise TypeError( @@ -1094,15 +1031,15 @@ def train_step(model, images, labels): train_ds = tf.data.Dataset.from_tensor_slices((x_preprocessed, y_preprocessed)).shuffle(10000).batch(batch_size) - for epoch in tqdm(range(nb_epochs), disable=not display_pb, desc="Epochs"): - for images, labels in tqdm(train_ds, disable=not display_pb, desc="Batches"): + for epoch in tqdm(range(nb_epochs), disable=not verbose, desc="Epochs"): + for images, labels in train_ds: train_step(self.model, images, labels) if scheduler is not None: scheduler(epoch) def fit_generator( # pylint: disable=W0221 - self, generator: "DataGenerator", nb_epochs: int = 20, verbose: Optional[Union[bool, int]] = None, **kwargs + self, generator: "DataGenerator", nb_epochs: int = 20, verbose: bool = False, **kwargs ) -> None: """ Fit the classifier using the generator that yields batches as specified. @@ -1110,7 +1047,7 @@ def fit_generator( # pylint: disable=W0221 :param generator: Batch generator providing `(x, y)` for each epoch. If the generator can be used for native training in TensorFlow, it will. :param nb_epochs: Number of epochs to use for training. - :param verbose: If to display progress bar information + :param verbose: Display training progress bar. :param kwargs: Dictionary of framework-specific arguments. This parameter currently supports "scheduler" which is an optional function that will be called at the end of every epoch to adjust the learning rate. @@ -1118,8 +1055,6 @@ def fit_generator( # pylint: disable=W0221 import tensorflow as tf from art.data_generators import TensorFlowV2DataGenerator - display_pb = self.process_verbose(verbose) - if self._train_step is None: # pragma: no cover if self._loss_object is None: # pragma: no cover raise TypeError( @@ -1159,8 +1094,8 @@ def train_step(model, images, labels): == (0, 1) ) ): - for epoch in tqdm(range(nb_epochs), disable=not display_pb, desc="Epochs"): - for i_batch, o_batch in tqdm(generator.iterator, disable=not display_pb, desc="Batches"): + for epoch in tqdm(range(nb_epochs), disable=not verbose, desc="Epochs"): + for i_batch, o_batch in generator.iterator: if self._reduce_labels: o_batch = tf.math.argmax(o_batch, axis=1) train_step(self._model, i_batch, o_batch) diff --git a/tests/estimators/classification/test_deeplearning_common.py b/tests/estimators/classification/test_deeplearning_common.py index de2d9e3cdb..b7dbf8323a 100644 --- a/tests/estimators/classification/test_deeplearning_common.py +++ b/tests/estimators/classification/test_deeplearning_common.py @@ -204,27 +204,13 @@ def get_lr(_): # Test a valid callback classifier, sess = image_dl_estimator(from_logits=True) - kwargs = {"callbacks": [LearningRateScheduler(get_lr)], "verbose": True} - classifier.fit(x_train_mnist, y_train_mnist, batch_size=default_batch_size, nb_epochs=1, **kwargs) + kwargs = {"callbacks": [LearningRateScheduler(get_lr)]} + classifier.fit(x_train_mnist, y_train_mnist, batch_size=default_batch_size, nb_epochs=1, verbose=True, **kwargs) # Check for fit_generator kwargs as well data_gen = image_data_generator(sess=sess) classifier.fit_generator(generator=data_gen, nb_epochs=1, **kwargs) - # Test failure for invalid parameters: does not apply to many frameworks which allow arbitrary kwargs - if framework not in [ - "tensorflow1", - "tensorflow2", - "tensorflow2v1", - "huggingface", - "pytorch", - ]: - kwargs = {"epochs": 1} - with pytest.raises(TypeError) as exception: - classifier.fit(x_train_mnist, y_train_mnist, batch_size=default_batch_size, nb_epochs=1, **kwargs) - - assert "multiple values for keyword argument" in str(exception) - except ARTTestException as e: art_warning(e)