Skip to content

Commit

Permalink
Apply suggestions from code review
Browse files Browse the repository at this point in the history
  • Loading branch information
beat-buesser authored Dec 19, 2023
1 parent 94cf59f commit 1f3026b
Show file tree
Hide file tree
Showing 12 changed files with 54 additions and 194 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -438,7 +438,7 @@ def fit( # pylint: disable=W0221
training_mode: bool = True,
drop_last: bool = False,
scheduler: Optional[Any] = None,
verbose: Optional[Union[bool, int]] = None,
verbose: bool = False,
update_batchnorm: bool = True,
batchnorm_update_epochs: int = 1,
transform: Optional["torchvision.transforms.transforms.Compose"] = None,
Expand Down Expand Up @@ -469,8 +469,6 @@ def fit( # pylint: disable=W0221
"""
import torch

display_pb = self.process_verbose(verbose)

# Set model mode
self._model.train(mode=training_mode)

Expand Down Expand Up @@ -501,7 +499,7 @@ def fit( # pylint: disable=W0221
epoch_loss = []
epoch_batch_sizes = []

pbar = tqdm(range(num_batch), disable=not display_pb)
pbar = tqdm(range(num_batch), disable=not verbose)

# Train for one epoch
for m in pbar:
Expand Down Expand Up @@ -547,7 +545,7 @@ def fit( # pylint: disable=W0221
epoch_loss.append(loss.cpu().detach().numpy())
epoch_batch_sizes.append(len(i_batch))

if display_pb:
if verbose:
pbar.set_description(
f"Loss {np.average(epoch_loss, weights=epoch_batch_sizes):.3f} "
f"Acc {np.average(epoch_acc, weights=epoch_batch_sizes):.3f} "
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ def fit( # pylint: disable=W0221
y: np.ndarray,
batch_size: int = 128,
nb_epochs: int = 10,
verbose: Optional[Union[bool, int]] = None,
verbose: bool = False,
**kwargs,
) -> None:
"""
Expand All @@ -171,15 +171,13 @@ def fit( # pylint: disable=W0221
shape (nb_samples,).
:param batch_size: Size of batches.
:param nb_epochs: Number of epochs to use for training.
:param verbose: If to display training progress bars
:param verbose: Display training progress bar.
:param kwargs: Dictionary of framework-specific arguments. This parameter currently only supports
"scheduler" which is an optional function that will be called at the end of every
epoch to adjust the learning rate.
"""
import tensorflow as tf

display_pb = self.process_verbose(verbose)

if self._train_step is None: # pragma: no cover
if self._loss_object is None: # pragma: no cover
raise TypeError(
Expand Down Expand Up @@ -222,7 +220,7 @@ def train_step(model, images, labels):
epoch_loss = []
epoch_batch_sizes = []

pbar = tqdm(range(num_batch), disable=not display_pb)
pbar = tqdm(range(num_batch), disable=not verbose)

ind = np.arange(len(x_preprocessed))
for m in pbar:
Expand All @@ -239,7 +237,7 @@ def train_step(model, images, labels):
else:
train_step(self.model, images, labels)

if display_pb:
if verbose:
if self._train_step is None:
pbar.set_description(
f"Loss {np.average(epoch_loss, weights=epoch_batch_sizes):.3f} "
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ def fit( # pylint: disable=W0221
training_mode: bool = True,
drop_last: bool = False,
scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None,
verbose: Optional[Union[bool, int]] = None,
verbose: bool = False,
**kwargs,
) -> None:
"""
Expand All @@ -154,17 +154,14 @@ def fit( # pylint: disable=W0221
the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then
the last batch will be smaller. (default: ``False``)
:param scheduler: Learning rate scheduler to run at the start of every epoch.
:param verbose: (Optional) Display the progress bar, if not supplied will revert to the verbose level when
class was initialised.
:param verbose: Display the training progress bar.
:param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch
and providing it takes no effect.
"""
import torch
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader

display_pb = self.process_verbose(verbose)

# Set model mode
self._model.train(mode=training_mode)

Expand All @@ -190,7 +187,7 @@ class was initialised.
)

# Start training
for _ in trange(nb_epochs, disable=not display_pb):
for _ in trange(nb_epochs, disable=not verbose):
for x_batch, y_batch in dataloader:
# Move inputs to GPU
x_batch = x_batch.to(self.device)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ def fit(
y: np.ndarray,
batch_size: int = 128,
nb_epochs: int = 10,
verbose: Optional[Union[bool, int]] = None,
verbose: bool = False,
**kwargs
) -> None:
"""
Expand All @@ -149,16 +149,13 @@ def fit(
shape (nb_samples,).
:param batch_size: Size of batches.
:param nb_epochs: Number of epochs to use for training.
:param verbose: (Optional) Display the progress bar, if not supplied will revert to the verbose level when
class was initialised.
:param verbose: Display the training progress bar.
:param kwargs: Dictionary of framework-specific arguments. This parameter currently only supports
"scheduler" which is an optional function that will be called at the end of every
epoch to adjust the learning rate.
"""
import tensorflow as tf

display_pb = self.process_verbose(verbose)

if self._train_step is None: # pragma: no cover
if self._optimizer is None: # pragma: no cover
raise ValueError(
Expand Down Expand Up @@ -225,7 +222,7 @@ def train_step(model, images, labels):

train_ds = tf.data.Dataset.from_tensor_slices((x_preprocessed, y_preprocessed)).shuffle(10000).batch(batch_size)

for epoch in trange(nb_epochs, disable=not display_pb):
for epoch in trange(nb_epochs, disable=not verbose):
for images, labels in train_ds:
# Tile samples for Gaussian augmentation
input_size = len(images)
Expand Down
9 changes: 3 additions & 6 deletions art/estimators/certification/randomized_smoothing/pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ def fit( # pylint: disable=W0221
training_mode: bool = True,
drop_last: bool = False,
scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None,
verbose: Optional[Union[bool, int]] = None,
verbose: bool = False,
**kwargs,
) -> None:
"""
Expand All @@ -156,16 +156,13 @@ def fit( # pylint: disable=W0221
the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then
the last batch will be smaller. (default: ``False``)
:param scheduler: Learning rate scheduler to run at the start of every epoch.
:param verbose: (Optional) Display the progress bar, if not supplied will revert to the verbose level when
class was initialised.
:param verbose: Display the training progress bar.
:param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch
and providing it takes no effect.
"""
import torch
from torch.utils.data import TensorDataset, DataLoader

display_pb = self.process_verbose(verbose)

# Set model mode
self._model.train(mode=training_mode)

Expand All @@ -187,7 +184,7 @@ class was initialised.
dataloader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, drop_last=drop_last)

# Start training
for _ in trange(nb_epochs, disable=not display_pb):
for _ in trange(nb_epochs, disable=not verbose):
for x_batch, y_batch in dataloader:
# Move inputs to device
x_batch = x_batch.to(self._device)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ def fit( # pylint: disable=W0221
training_mode: bool = True,
drop_last: bool = False,
scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None,
verbose: Optional[Union[bool, int]] = None,
verbose: bool = False,
**kwargs,
) -> None:
"""
Expand All @@ -171,16 +171,13 @@ def fit( # pylint: disable=W0221
the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then
the last batch will be smaller. (default: ``False``)
:param scheduler: Learning rate scheduler to run at the start of every epoch.
:param verbose: (Optional) Display the progress bar, if not supplied will revert to the verbose level when
class was initialised.
:param verbose: Display the training progress bar.
:param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch
and providing it takes no effect.
"""
import torch
from torch.utils.data import TensorDataset, DataLoader

display_pb = self.process_verbose(verbose)

# Set model mode
self._model.train(mode=training_mode)

Expand All @@ -202,7 +199,7 @@ class was initialised.
dataloader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, drop_last=drop_last)

# Start training
for epoch in trange(nb_epochs, disable=not display_pb):
for epoch in trange(nb_epochs, disable=not verbose):
self.attack.norm = min(self.epsilon, (epoch + 1) * self.epsilon / self.warmup)

for x_batch, y_batch in dataloader:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ def fit(
y: np.ndarray,
batch_size: int = 128,
nb_epochs: int = 10,
verbose: Optional[Union[bool, int]] = None,
verbose: bool = False,
**kwargs
) -> None:
"""
Expand All @@ -166,16 +166,13 @@ def fit(
shape (nb_samples,).
:param batch_size: Size of batches.
:param nb_epochs: Number of epochs to use for training.
:param verbose: (Optional) Display the progress bar, if not supplied will revert to the verbose level when
class was initialised.
:param verbose: Display the training progress bar.
:param kwargs: Dictionary of framework-specific arguments. This parameter currently only supports
"scheduler" which is an optional function that will be called at the end of every
epoch to adjust the learning rate.
"""
import tensorflow as tf

display_pb = self.process_verbose(verbose)

if self._train_step is None: # pragma: no cover
if self._loss_object is None: # pragma: no cover
raise TypeError(
Expand Down Expand Up @@ -212,7 +209,7 @@ def train_step(model, images, labels):

train_ds = tf.data.Dataset.from_tensor_slices((x_preprocessed, y_preprocessed)).shuffle(10000).batch(batch_size)

for epoch in trange(nb_epochs, disable=not display_pb):
for epoch in trange(nb_epochs, disable=not verbose):
self.attack.norm = min(self.epsilon, (epoch + 1) * self.epsilon / self.warmup)

for x_batch, y_batch in train_ds:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ def fit( # pylint: disable=W0221
training_mode: bool = True,
drop_last: bool = False,
scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None,
verbose: Optional[Union[bool, int]] = None,
verbose: bool = False,
**kwargs,
) -> None:
"""
Expand All @@ -188,17 +188,14 @@ def fit( # pylint: disable=W0221
the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then
the last batch will be smaller. (default: ``False``)
:param scheduler: Learning rate scheduler to run at the start of every epoch.
:param verbose: (Optional) Display the progress bar, if not supplied will revert to the verbose level when
class was initialised.
:param verbose: Display the training progress bar.
:param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch
and providing it takes no effect.
"""
import torch
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader

display_pb = self.process_verbose(verbose)

# Set model mode
self._model.train(mode=training_mode)

Expand All @@ -220,7 +217,7 @@ class was initialised.
dataloader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, drop_last=drop_last)

# Start training
for epoch in trange(nb_epochs, disable=not display_pb):
for epoch in trange(nb_epochs, disable=not verbose):
warmup_v = min(1.0, (epoch + 1) / self.warmup)

for x_batch, y_batch in dataloader:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ def fit( # pylint: disable=W0221
y: np.ndarray,
batch_size: int = 128,
nb_epochs: int = 10,
verbose: Optional[Union[bool, int]] = None,
verbose: bool = False,
**kwargs
) -> None:
"""
Expand All @@ -148,16 +148,13 @@ def fit( # pylint: disable=W0221
shape (nb_samples,).
:param batch_size: Size of batches.
:param nb_epochs: Number of epochs to use for training.
:param verbose: (Optional) Display the progress bar, if not supplied will revert to the verbose level when
class was initialised.
:param verbose: Display the training progress bar.
:param kwargs: Dictionary of framework-specific arguments. This parameter currently only supports
"scheduler" which is an optional function that will be called at the end of every
epoch to adjust the learning rate.
"""
import tensorflow as tf

display_pb = self.process_verbose(verbose)

if self._train_step is None: # pragma: no cover
if self._loss_object is None: # pragma: no cover
raise TypeError(
Expand Down Expand Up @@ -194,7 +191,7 @@ def train_step(model, images, labels):

train_ds = tf.data.Dataset.from_tensor_slices((x_preprocessed, y_preprocessed)).shuffle(10000).batch(batch_size)

for epoch in trange(nb_epochs, disable=not display_pb):
for epoch in trange(nb_epochs, disable=not verbose):
for images, labels in train_ds:
# Add random noise for randomized smoothing
images += tf.random.normal(shape=images.shape, mean=0.0, stddev=self.scale)
Expand Down
Loading

0 comments on commit 1f3026b

Please sign in to comment.