Skip to content

Commit

Permalink
Merge branch 'dev_1.17.0' into hf_notebook_dev
Browse files Browse the repository at this point in the history
  • Loading branch information
beat-buesser authored Dec 22, 2023
2 parents bb9fbee + 74be71f commit f29950a
Show file tree
Hide file tree
Showing 28 changed files with 1,631 additions and 420 deletions.
1 change: 1 addition & 0 deletions art/attacks/evasion/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
from art.attacks.evasion.brendel_bethge import BrendelBethgeAttack

from art.attacks.evasion.boundary import BoundaryAttack
from art.attacks.evasion.composite_adversarial_attack import CompositeAdversarialAttackPyTorch
from art.attacks.evasion.carlini import CarliniL2Method, CarliniLInfMethod, CarliniL0Method
from art.attacks.evasion.decision_tree_attack import DecisionTreeAttack
from art.attacks.evasion.deepfool import DeepFool
Expand Down
673 changes: 673 additions & 0 deletions art/attacks/evasion/composite_adversarial_attack.py

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions art/attacks/extraction/knockoff_nets.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ def _random_extraction(self, x: np.ndarray, thieved_classifier: "CLASSIFIER_TYPE
y=fake_labels,
batch_size=self.batch_size_fit,
nb_epochs=self.nb_epochs,
verbose=0,
verbose=False,
)

return thieved_classifier
Expand Down Expand Up @@ -243,7 +243,7 @@ def _adaptive_extraction(
y=fake_label,
batch_size=self.batch_size_fit,
nb_epochs=1,
verbose=0,
verbose=False,
)

# Test new labels
Expand Down
372 changes: 249 additions & 123 deletions art/attacks/inference/membership_inference/black_box.py

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions art/attacks/poisoning/sleeper_agent_attack.py
Original file line number Diff line number Diff line change
Expand Up @@ -360,7 +360,7 @@ def _create_model(
for layer in model_pt.model.children():
if hasattr(layer, "reset_parameters"):
layer.reset_parameters() # type: ignore
model_pt.fit(x_train, y_train, batch_size=batch_size, nb_epochs=epochs, verbose=1)
model_pt.fit(x_train, y_train, batch_size=batch_size, nb_epochs=epochs, verbose=True)
predictions = model_pt.predict(x_test)
accuracy = np.sum(np.argmax(predictions, axis=1) == np.argmax(y_test, axis=1)) / len(y_test)
logger.info("Accuracy of retrained model : %s", accuracy * 100.0)
Expand All @@ -370,7 +370,7 @@ def _create_model(

self.substitute_classifier.model.trainable = True
model_tf = self.substitute_classifier.clone_for_refitting()
model_tf.fit(x_train, y_train, batch_size=batch_size, nb_epochs=epochs, verbose=0)
model_tf.fit(x_train, y_train, batch_size=batch_size, nb_epochs=epochs, verbose=False)
predictions = model_tf.predict(x_test)
accuracy = np.sum(np.argmax(predictions, axis=1) == np.argmax(y_test, axis=1)) / len(y_test)
logger.info("Accuracy of retrained model : %s", accuracy * 100.0)
Expand Down
4 changes: 3 additions & 1 deletion art/defences/detector/poison/activation_defence.py
Original file line number Diff line number Diff line change
Expand Up @@ -695,7 +695,9 @@ def _get_activations(self, x_train: Optional[np.ndarray] = None) -> np.ndarray:

# wrong way to get activations activations = self.classifier.predict(self.x_train)
if isinstance(activations, np.ndarray):
nodes_last_layer = np.shape(activations)[1]
# flatten activations across batch
activations = np.reshape(activations, (activations.shape[0], -1))
nodes_last_layer = activations.shape[1]
else:
raise ValueError("activations is None or tensor.")

Expand Down
2 changes: 2 additions & 0 deletions art/defences/detector/poison/spectral_signature_defense.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,8 @@ def detect_poison(self, **kwargs) -> Tuple[dict, List[int]]:
raise ValueError("Wrong type detected.")

if features_x_poisoned is not None:
# flatten activations across batch
features_x_poisoned = np.reshape(features_x_poisoned, (features_x_poisoned.shape[0], -1))
features_split = segment_by_class(features_x_poisoned, self.y_train, self.classifier.nb_classes)
else:
raise ValueError("Activation are `None`.")
Expand Down
8 changes: 6 additions & 2 deletions art/defences/trainer/adversarial_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,9 @@ def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwarg
x_batch[adv_ids] = x_adv

# Fit batch
self._classifier.fit(x_batch, y_batch, nb_epochs=1, batch_size=x_batch.shape[0], verbose=0, **kwargs)
self._classifier.fit(
x_batch, y_batch, nb_epochs=1, batch_size=x_batch.shape[0], verbose=False, **kwargs
)
attack_id = (attack_id + 1) % len(self.attacks)

def fit( # pylint: disable=W0221
Expand Down Expand Up @@ -260,7 +262,9 @@ def fit( # pylint: disable=W0221
x_batch[adv_ids] = x_adv

# Fit batch
self._classifier.fit(x_batch, y_batch, nb_epochs=1, batch_size=x_batch.shape[0], verbose=0, **kwargs)
self._classifier.fit(
x_batch, y_batch, nb_epochs=1, batch_size=x_batch.shape[0], verbose=False, **kwargs
)
attack_id = (attack_id + 1) % len(self.attacks)

def predict(self, x: np.ndarray, **kwargs) -> np.ndarray:
Expand Down
4 changes: 2 additions & 2 deletions art/defences/trainer/dp_instahide_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ def fit( # pylint: disable=W0221
x_aug = self._generate_noise(x_aug)

# fit batch
self._classifier.fit(x_aug, y_aug, nb_epochs=1, batch_size=x_aug.shape[0], verbose=0, **kwargs)
self._classifier.fit(x_aug, y_aug, nb_epochs=1, batch_size=x_aug.shape[0], verbose=False, **kwargs)

# get metrics
loss = self._classifier.compute_loss(x_aug, y_aug, reduction="mean")
Expand Down Expand Up @@ -234,7 +234,7 @@ def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwarg
x_aug = self._generate_noise(x_aug)

# fit batch
self._classifier.fit(x_aug, y_aug, nb_epochs=1, batch_size=x_aug.shape[0], verbose=0, **kwargs)
self._classifier.fit(x_aug, y_aug, nb_epochs=1, batch_size=x_aug.shape[0], verbose=False, **kwargs)

# get metrics
loss = self._classifier.compute_loss(x_aug, y_aug, reduction="mean")
Expand Down
10 changes: 4 additions & 6 deletions art/estimators/certification/derandomized_smoothing/pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -438,7 +438,7 @@ def fit( # pylint: disable=W0221
training_mode: bool = True,
drop_last: bool = False,
scheduler: Optional[Any] = None,
verbose: Optional[Union[bool, int]] = None,
verbose: bool = False,
update_batchnorm: bool = True,
batchnorm_update_epochs: int = 1,
transform: Optional["torchvision.transforms.transforms.Compose"] = None,
Expand All @@ -457,7 +457,7 @@ def fit( # pylint: disable=W0221
the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then
the last batch will be smaller. (default: ``False``)
:param scheduler: Learning rate scheduler to run at the start of every epoch.
:param verbose: if to display training progress bars
:param verbose: Display training progress bar.
:param update_batchnorm: ViT specific argument.
If to run the training data through the model to update any batch norm statistics prior
to training. Useful on small datasets when using pre-trained ViTs.
Expand All @@ -469,8 +469,6 @@ def fit( # pylint: disable=W0221
"""
import torch

display_pb = self.process_verbose(verbose)

# Set model mode
self._model.train(mode=training_mode)

Expand Down Expand Up @@ -501,7 +499,7 @@ def fit( # pylint: disable=W0221
epoch_loss = []
epoch_batch_sizes = []

pbar = tqdm(range(num_batch), disable=not display_pb)
pbar = tqdm(range(num_batch), disable=not verbose)

# Train for one epoch
for m in pbar:
Expand Down Expand Up @@ -547,7 +545,7 @@ def fit( # pylint: disable=W0221
epoch_loss.append(loss.cpu().detach().numpy())
epoch_batch_sizes.append(len(i_batch))

if display_pb:
if verbose:
pbar.set_description(
f"Loss {np.average(epoch_loss, weights=epoch_batch_sizes):.3f} "
f"Acc {np.average(epoch_acc, weights=epoch_batch_sizes):.3f} "
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ def fit( # pylint: disable=W0221
y: np.ndarray,
batch_size: int = 128,
nb_epochs: int = 10,
verbose: Optional[Union[bool, int]] = None,
verbose: bool = False,
**kwargs,
) -> None:
"""
Expand All @@ -171,15 +171,13 @@ def fit( # pylint: disable=W0221
shape (nb_samples,).
:param batch_size: Size of batches.
:param nb_epochs: Number of epochs to use for training.
:param verbose: If to display training progress bars
:param verbose: Display training progress bar.
:param kwargs: Dictionary of framework-specific arguments. This parameter currently only supports
"scheduler" which is an optional function that will be called at the end of every
epoch to adjust the learning rate.
"""
import tensorflow as tf

display_pb = self.process_verbose(verbose)

if self._train_step is None: # pragma: no cover
if self._loss_object is None: # pragma: no cover
raise TypeError(
Expand Down Expand Up @@ -222,7 +220,7 @@ def train_step(model, images, labels):
epoch_loss = []
epoch_batch_sizes = []

pbar = tqdm(range(num_batch), disable=not display_pb)
pbar = tqdm(range(num_batch), disable=not verbose)

ind = np.arange(len(x_preprocessed))
for m in pbar:
Expand All @@ -239,7 +237,7 @@ def train_step(model, images, labels):
else:
train_step(self.model, images, labels)

if display_pb:
if verbose:
if self._train_step is None:
pbar.set_description(
f"Loss {np.average(epoch_loss, weights=epoch_batch_sizes):.3f} "
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,6 @@ def __init__(
gamma: float = 8.0,
lmbda: float = 12.0,
gaussian_samples: int = 16,
verbose: bool = False,
) -> None:
"""
Create a MACER classifier.
Expand Down Expand Up @@ -105,7 +104,6 @@ def __init__(
:param gamma: The hinge factor.
:param lmbda: The trade-off factor.
:param gaussian_samples: The number of gaussian samples per input.
:param verbose: Show progress bars.
"""
super().__init__(
model=model,
Expand All @@ -122,7 +120,6 @@ def __init__(
sample_size=sample_size,
scale=scale,
alpha=alpha,
verbose=verbose,
)
self.beta = beta
self.gamma = gamma
Expand All @@ -138,7 +135,7 @@ def fit( # pylint: disable=W0221
training_mode: bool = True,
drop_last: bool = False,
scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None,
verbose: Optional[Union[bool, int]] = None,
verbose: bool = False,
**kwargs,
) -> None:
"""
Expand All @@ -154,17 +151,14 @@ def fit( # pylint: disable=W0221
the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then
the last batch will be smaller. (default: ``False``)
:param scheduler: Learning rate scheduler to run at the start of every epoch.
:param verbose: (Optional) Display the progress bar, if not supplied will revert to the verbose level when
class was initialised.
:param verbose: Display the training progress bar.
:param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch
and providing it takes no effect.
"""
import torch
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader

display_pb = self.process_verbose(verbose)

# Set model mode
self._model.train(mode=training_mode)

Expand All @@ -190,7 +184,7 @@ class was initialised.
)

# Start training
for _ in trange(nb_epochs, disable=not display_pb):
for _ in trange(nb_epochs, disable=not verbose):
for x_batch, y_batch in dataloader:
# Move inputs to GPU
x_batch = x_batch.to(self.device)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,6 @@ def __init__(
gamma: float = 8.0,
lmbda: float = 12.0,
gaussian_samples: int = 16,
verbose: bool = False,
) -> None:
"""
Create a MACER classifier.
Expand Down Expand Up @@ -108,7 +107,6 @@ def __init__(
:param gamma: The hinge factor.
:param lmbda: The trade-off factor.
:param gaussian_samples: The number of gaussian samples per input.
:param verbose: Show progress bars.
"""
super().__init__(
model=model,
Expand All @@ -125,21 +123,14 @@ def __init__(
sample_size=sample_size,
scale=scale,
alpha=alpha,
verbose=verbose,
)
self.beta = beta
self.gamma = gamma
self.lmbda = lmbda
self.gaussian_samples = gaussian_samples

def fit(
self,
x: np.ndarray,
y: np.ndarray,
batch_size: int = 128,
nb_epochs: int = 10,
verbose: Optional[Union[bool, int]] = None,
**kwargs
self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: int = 10, verbose: bool = False, **kwargs
) -> None:
"""
Fit the classifier on the training set `(x, y)`.
Expand All @@ -149,16 +140,13 @@ def fit(
shape (nb_samples,).
:param batch_size: Size of batches.
:param nb_epochs: Number of epochs to use for training.
:param verbose: (Optional) Display the progress bar, if not supplied will revert to the verbose level when
class was initialised.
:param verbose: Display the training progress bar.
:param kwargs: Dictionary of framework-specific arguments. This parameter currently only supports
"scheduler" which is an optional function that will be called at the end of every
epoch to adjust the learning rate.
"""
import tensorflow as tf

display_pb = self.process_verbose(verbose)

if self._train_step is None: # pragma: no cover
if self._optimizer is None: # pragma: no cover
raise ValueError(
Expand Down Expand Up @@ -225,7 +213,7 @@ def train_step(model, images, labels):

train_ds = tf.data.Dataset.from_tensor_slices((x_preprocessed, y_preprocessed)).shuffle(10000).batch(batch_size)

for epoch in trange(nb_epochs, disable=not display_pb):
for epoch in trange(nb_epochs, disable=not verbose):
for images, labels in train_ds:
# Tile samples for Gaussian augmentation
input_size = len(images)
Expand Down
Loading

0 comments on commit f29950a

Please sign in to comment.