diff --git a/art/attacks/evasion/__init__.py b/art/attacks/evasion/__init__.py index fb452f21d4..3882b839c1 100644 --- a/art/attacks/evasion/__init__.py +++ b/art/attacks/evasion/__init__.py @@ -18,6 +18,7 @@ from art.attacks.evasion.brendel_bethge import BrendelBethgeAttack from art.attacks.evasion.boundary import BoundaryAttack +from art.attacks.evasion.composite_adversarial_attack import CompositeAdversarialAttackPyTorch from art.attacks.evasion.carlini import CarliniL2Method, CarliniLInfMethod, CarliniL0Method from art.attacks.evasion.decision_tree_attack import DecisionTreeAttack from art.attacks.evasion.deepfool import DeepFool diff --git a/art/attacks/evasion/composite_adversarial_attack.py b/art/attacks/evasion/composite_adversarial_attack.py new file mode 100644 index 0000000000..0a2dd2fe8b --- /dev/null +++ b/art/attacks/evasion/composite_adversarial_attack.py @@ -0,0 +1,673 @@ +# MIT License +# +# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2023 +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the +# Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +""" +This module implements the composite adversarial attack by sequentially perturbing different components of the inputs. +It uses order scheduling to search for the attack sequence and uses the iterative gradient sign method to optimize the +perturbations in semantic space and Lp-ball (see `FastGradientMethod` and `BasicIterativeMethod`). + +| Paper link: https://arxiv.org/abs/2202.04235 +""" + +from __future__ import absolute_import, division, print_function, unicode_literals + +import logging + +from typing import Optional, Tuple, List, TYPE_CHECKING + +import numpy as np +from tqdm.auto import tqdm + +from art.attacks.attack import EvasionAttack +from art.config import ART_NUMPY_DTYPE +from art.estimators.estimator import BaseEstimator, LossGradientsMixin +from art.estimators.classification.classifier import ClassifierMixin +from art.utils import compute_success, check_and_transform_label_format + +if TYPE_CHECKING: + # pylint: disable=C0412 + import torch + from art.estimators.classification.pytorch import PyTorchClassifier + +logger = logging.getLogger(__name__) + + +class CompositeAdversarialAttackPyTorch(EvasionAttack): + """ + Implementation of the composite adversarial attack on image classifiers in PyTorch. The attack is constructed by + adversarially perturbing the hue component of the inputs. It uses order scheduling to search for the attack sequence + and uses the iterative gradient sign method to optimize the perturbations in semantic space and Lp-ball (see + `FastGradientMethod` and `BasicIterativeMethod`). + + | Note that this attack is intended for only PyTorch image classifiers with RGB images in the range [0, 1] as inputs + + | Paper link: https://arxiv.org/abs/2202.04235 + """ + + attack_params = EvasionAttack.attack_params + [ + "enabled_attack", + "hue_epsilon", + "sat_epsilon", + "rot_epsilon", + "bri_epsilon", + "con_epsilon", + "pgd_epsilon", + "early_stop", + "max_iter", + "max_inner_iter", + "schedule", + "batch_size", + "verbose", + ] + _estimator_requirements = (BaseEstimator, LossGradientsMixin, ClassifierMixin) # type: ignore + + def __init__( + self, + classifier: "PyTorchClassifier", + enabled_attack: Tuple = (0, 1, 2, 3, 4, 5), + # Default: Full Attacks; 0: Hue, 1: Saturation, 2: Rotation, 3: Brightness, 4: Contrast, 5: PGD (L-infinity) + hue_epsilon: Tuple[float, float] = (-np.pi, np.pi), + sat_epsilon: Tuple[float, float] = (0.7, 1.3), + rot_epsilon: Tuple[float, float] = (-10.0, 10.0), + bri_epsilon: Tuple[float, float] = (-0.2, 0.2), + con_epsilon: Tuple[float, float] = (0.7, 1.3), + pgd_epsilon: Tuple[float, float] = (-8 / 255, 8 / 255), # L-infinity + early_stop: bool = True, + max_iter: int = 5, + max_inner_iter: int = 10, + attack_order: str = "scheduled", + batch_size: int = 1, + verbose: bool = True, + ) -> None: + """ + Create an instance of the :class:`.CompositeAdversarialAttackPyTorch`. + + :param classifier: A trained PyTorch classifier. + :param enabled_attack: Attack pool selection, and attack order designation for fixed order. For simplicity, + we use the following abbreviations to specify each attack types. 0: Hue, 1: Saturation, + 2: Rotation, 3: Brightness, 4: Contrast, 5: PGD(L-infinity). Therefore, `(0,1,2)` means + that the attack combines hue, saturation, and rotation; `(0,1,2,3,4)` means the + semantic attacks; `(0,1,2,3,4,5)` means the full attacks. + :param hue_epsilon: The boundary of the hue perturbation. The value is expected to be in the interval + `[-np.pi, np.pi]`. Perturbation of `0` means no shift and `-np.pi` and `np.pi` give a + complete reversal of the hue channel in the HSV color space in the positive and negative + directions, respectively. See `kornia.enhance.adjust_hue` for more details. + :param sat_epsilon: The boundary of the saturation perturbation. The value is expected to be in the interval + `[0, infinity]`. The perturbation of `0` gives a black-and-white image, `1` gives the + original image, and `2` enhances the saturation by a factor of 2. See + `kornia.geometry.transform.rotate` for more details. + :param rot_epsilon: The boundary of the rotation perturbation (in degrees). Positive values mean + counter-clockwise rotation. See `kornia.geometry.transform.rotate` for more details. + :param bri_epsilon: The boundary of the brightness perturbation. The value is expected to be in the interval + `[-1, 1]`. Perturbation of `0` means no shift, `-1` gives a complete black image, and `1` + gives a complete white image. See `kornia.enhance.adjust_brightness` for more details. + :param con_epsilon: The boundary of the contrast perturbation. The value is expected to be in the interval + `[0, infinity]`. Perturbation of `0` gives a complete black image, `1` does not modify the + image, and any other value modifies the brightness by this factor. See + `kornia.enhance.adjust_contrast` for more details. + :param pgd_epsilon: The maximum perturbation that the attacker can introduce in the L-infinity ball. + :param early_stop: When True, the attack will stop if the perturbed example is classified incorrectly by the + classifier. + :param max_iter: The maximum number of iterations for attack order optimization. + :param max_inner_iter: The maximum number of iterations for each attack optimization. + :param attack_order: Specify the scheduling type for composite adversarial attack. The value is expected to be + `fixed`, `random`, or `scheduled`. `fixed` means the attack order is the same as specified + in `enabled_attack`. `random` means the attack order is randomly generated at each + iteration. `scheduled` means to enable the attack order optimization proposed in the paper. + If only one attack is enabled, `fixed` will be used. + :param batch_size: The batch size to use during the generation of adversarial samples. + :param verbose: Show progress bars. + """ + import torch + + super().__init__(estimator=classifier) + self.classifier = classifier + self.model = classifier.model + self.device = next(self.model.parameters()).device + self.fixed_order = enabled_attack + self.enabled_attack = tuple(sorted(enabled_attack)) + self.epsilons = [hue_epsilon, sat_epsilon, rot_epsilon, bri_epsilon, con_epsilon, pgd_epsilon] + self.early_stop = early_stop + self.attack_order = attack_order + self.max_iter = max_iter if self.attack_order == "scheduled" else 1 + self.max_inner_iter = max_inner_iter + self.batch_size = batch_size + self.verbose = verbose + self._check_params() + + import kornia + + self.seq_num = len(self.enabled_attack) # attack_num + self.linf_idx = self.enabled_attack.index(5) if 5 in self.enabled_attack else None + self.attack_pool = ( + self.caa_hue, + self.caa_saturation, + self.caa_rotation, + self.caa_brightness, + self.caa_contrast, + self.caa_linf, + ) + self.eps_pool = torch.tensor(self.epsilons, device=self.device) + self.attack_pool_base = ( + kornia.enhance.adjust_hue, + kornia.enhance.adjust_saturation, + kornia.geometry.transform.rotate, + kornia.enhance.adjust_brightness, + kornia.enhance.adjust_contrast, + ) + self.attack_dict = tuple(self.attack_pool[i] for i in self.enabled_attack) + self.step_size_pool = [ + 2.5 * ((eps[1] - eps[0]) / 2) / self.max_inner_iter for eps in self.eps_pool + ] # 2.5 * ε-test / num_steps + + self._description = "Composite Adversarial Attack" + self._is_scheduling: bool = False + self.eps_space: List = [] + self.adv_val_space: List = [] + self.curr_dsm: "torch.Tensor" = torch.zeros((len(self.enabled_attack), len(self.enabled_attack))) + self.curr_seq: "torch.Tensor" = torch.zeros(len(self.enabled_attack)) + self.is_attacked: "torch.Tensor" = torch.zeros(self.batch_size, device=self.device).bool() + self.is_not_attacked: "torch.Tensor" = torch.ones(self.batch_size, device=self.device).bool() + + def _check_params(self) -> None: + """ + Check validity of parameters. + """ + super()._check_params() + if not isinstance(self.enabled_attack, tuple) or not all( + value in [0, 1, 2, 3, 4, 5] for value in self.enabled_attack + ): + raise ValueError( + "The parameter `enabled_attack` must be a tuple specifying the attack to launch. For simplicity, we use" + + " the following abbreviations to specify each attack types. 0: Hue, 1: Saturation, 2: Rotation," + + " 3: Brightness, 4: Contrast, 5: PGD(L-infinity). Therefore, `(0,1,2)` means that the attack combines" + + " hue, saturation, and rotation; `(0,1,2,3,4)` means the all semantic attacks; `(0,1,2,3,4,5)` means" + + " the full attacks." + ) + _epsilons_range = ( + ("hue_epsilon", (-np.pi, np.pi), "(-np.pi, np.pi)"), + ("sat_epsilon", (0.0, np.inf), "(0.0, np.inf)"), + ("rot_epsilon", (-360.0, 360.0), "(-360.0, 360.0)"), + ("bri_epsilon", (-1.0, 1.0), "(-1.0, 1.0)"), + ("con_epsilon", (0.0, np.inf), "(0.0, np.inf)"), + ("pgd_epsilon", (-1.0, 1.0), "(-1.0, 1.0)"), + ) + for i in range(6): + if ( + not isinstance(self.epsilons[i], tuple) + or not len(self.epsilons[i]) == 2 + or not (isinstance(self.epsilons[i][0], float) and isinstance(self.epsilons[i][1], float)) + ): + logger.info( + "The argument `%s` must be an interval within %s of type tuple.", + _epsilons_range[i][0], + _epsilons_range[i][2], + ) + raise TypeError( + f"The argument `{_epsilons_range[i][0]}` must be an interval " + f"within {_epsilons_range[i][2]} of type tuple." + ) + + if not _epsilons_range[i][1][0] <= self.epsilons[i][0] <= self.epsilons[i][1] <= _epsilons_range[i][1][1]: + logger.info( + "The argument `%s` must be an interval within %s of type tuple.", + _epsilons_range[i][0], + _epsilons_range[i][2], + ) + raise ValueError( + f"The argument `{_epsilons_range[i][0]}` must be an interval " + f"within {_epsilons_range[i][2]} of type tuple." + ) + + if not isinstance(self.early_stop, bool): + logger.info("The flag `early_stop` has to be of type bool.") + raise TypeError("The flag `early_stop` has to be of type bool.") + + if not isinstance(self.max_iter, int): + logger.info("The argument `max_iter` must be positive of type int.") + raise TypeError("The argument `max_iter` must be positive of type int.") + + if self.max_iter <= 0: + logger.info("The argument `max_iter` must be positive of type int.") + raise ValueError("The argument `max_iter` must be positive of type int.") + + if not isinstance(self.max_inner_iter, int): + logger.info("The argument `max_inner_iter` must be positive of type int.") + raise TypeError("The argument `max_inner_iter` must be positive of type int.") + + if self.max_inner_iter <= 0: + logger.info("The argument `max_inner_iter` must be positive of type int.") + raise ValueError("The argument `max_inner_iter` must be positive of type int.") + + if self.attack_order not in ("fixed", "random", "scheduled"): + logger.info("The argument `attack_order` should be either `fixed`, `random`, or `scheduled`.") + raise ValueError("The argument `attack_order` should be either `fixed`, `random`, or `scheduled`.") + + if self.batch_size <= 0: + logger.info("The batch size has to be positive.") + raise ValueError("The batch size has to be positive.") + + if not isinstance(self.verbose, bool): + logger.info("The argument `verbose` has to be a Boolean.") + raise TypeError("The argument `verbose` has to be a Boolean.") + + def _setup_attack(self): + """ + Set up the initial parameter for each attack component. + """ + import torch + + hue_space = ( + torch.rand(self.batch_size, device=self.device) * (self.eps_pool[0][1] - self.eps_pool[0][0]) + + self.eps_pool[0][0] + ) + sat_space = ( + torch.rand(self.batch_size, device=self.device) * (self.eps_pool[1][1] - self.eps_pool[1][0]) + + self.eps_pool[1][0] + ) + rot_space = ( + torch.rand(self.batch_size, device=self.device) * (self.eps_pool[2][1] - self.eps_pool[2][0]) + + self.eps_pool[2][0] + ) + bri_space = ( + torch.rand(self.batch_size, device=self.device) * (self.eps_pool[3][1] - self.eps_pool[3][0]) + + self.eps_pool[3][0] + ) + con_space = ( + torch.rand(self.batch_size, device=self.device) * (self.eps_pool[4][1] - self.eps_pool[4][0]) + + self.eps_pool[4][0] + ) + pgd_space = 0.001 * torch.randn([self.batch_size, 3, 32, 32], device=self.device) + + self.eps_space = [self.eps_pool[i] for i in self.enabled_attack] + self.adv_val_space = [ + [hue_space, sat_space, rot_space, bri_space, con_space, pgd_space][i] for i in self.enabled_attack + ] + + def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + """ + Generate the composite adversarial samples and return them in a Numpy array. + + :param x: An array with the original inputs to be attacked. + :param y: An array with the original labels to be predicted. + :return: An array holding the composite adversarial examples. + """ + if y is None: + raise ValueError("The argument `y` must be provided.") + + import torch + + y = check_and_transform_label_format(y, nb_classes=self.estimator.nb_classes) + dataset = torch.utils.data.TensorDataset( + torch.from_numpy(x.astype(ART_NUMPY_DTYPE)), + torch.from_numpy(y.astype(ART_NUMPY_DTYPE)), + ) + data_loader = torch.utils.data.DataLoader( + dataset=dataset, batch_size=self.batch_size, shuffle=False, drop_last=False + ) + + # Start to compute adversarial examples. + x_adv = x.copy().astype(ART_NUMPY_DTYPE) + + # Compute perturbations with batching. + for batch_id, batch_all in enumerate( + tqdm(data_loader, desc=self._description, leave=False, disable=not self.verbose) + ): + (batch_x, batch_y) = batch_all[0], batch_all[1] + batch_index_1, batch_index_2 = batch_id * self.batch_size, (batch_id + 1) * self.batch_size + + x_adv[batch_index_1:batch_index_2] = self._generate_batch(x=batch_x, y=batch_y) + + logger.info( + "Success rate of attack: %.2f%%", + 100 * compute_success(self.estimator, x, y, x_adv, batch_size=self.batch_size), + ) + + return x_adv + + def _generate_batch(self, x: "torch.Tensor", y: "torch.Tensor") -> np.ndarray: + """ + Generate a batch of composite adversarial examples and return them in a NumPy array. + + :param x: A tensor of a batch of original inputs to be attacked. + :param y: A tensor of a batch of the original labels to be predicted. + :return: An array holding the composite adversarial examples. + """ + import torch + + self.batch_size = x.shape[0] + self._setup_attack() + self.is_attacked = torch.zeros(self.batch_size, device=self.device).bool() + self.is_not_attacked = torch.ones(self.batch_size, device=self.device).bool() + x, y = x.to(self.device), y.to(self.device) + + return self.caa_attack(x, y).cpu().detach().numpy() + + def _comp_pgd( + self, + data: "torch.Tensor", + labels: "torch.Tensor", + attack_idx: int, + attack_parameter: "torch.Tensor", + ori_is_attacked: "torch.Tensor", + ) -> Tuple["torch.Tensor", "torch.Tensor"]: + """ + Compute the adversarial examples for each attack component. + + :param data: A tensor of a batch of original inputs to be attacked. + :param labels: A tensor of a batch of the original labels to be predicted. + :param attack_idx: The index of the attack component (one of the enabled attacks) in the attack pool. + :param attack_parameter: Specify the parameter of the attack component. For example, hue shift angle, saturation + factor, etc. + :param ori_is_attacked: Specify whether the perturbed data is already attacked. + :return: The perturbed data and the corresponding attack parameter. + """ + import torch + import torch.nn.functional as F + + adv_data = self.attack_pool_base[attack_idx](data, attack_parameter) + for _ in range(self.max_inner_iter): + outputs = self.model(adv_data) + + if not self._is_scheduling and self.early_stop: + cur_pred = outputs.max(1, keepdim=True)[1].squeeze() + self.is_attacked = torch.logical_or( + ori_is_attacked, cur_pred != labels.max(1, keepdim=True)[1].squeeze() + ) + + with torch.enable_grad(): + cost = F.cross_entropy(outputs, labels) + _grad = torch.autograd.grad(cost, attack_parameter)[0] + if not self._is_scheduling: + _grad[self.is_attacked] = 0 + attack_parameter = ( + torch.clamp( + attack_parameter + torch.sign(_grad) * self.step_size_pool[attack_idx], + self.eps_pool[attack_idx][0], + self.eps_pool[attack_idx][1], + ) + .detach() + .requires_grad_() + ) + adv_data = self.attack_pool_base[attack_idx](data, attack_parameter) + + return adv_data, attack_parameter + + def caa_hue( + self, data: "torch.Tensor", hue: "torch.Tensor", labels: "torch.Tensor" + ) -> Tuple["torch.Tensor", "torch.Tensor"]: + """ + Compute the adversarial examples for hue component. + + :param data: A tensor of a batch of original inputs to be attacked. + :param hue: Specify the hue shift angle. + :param labels: A tensor of a batch of the original labels to be predicted. + :return: The perturbed data and the corresponding hue shift angle. + """ + hue = hue.detach().clone() + hue[self.is_attacked] = 0 + hue.requires_grad_() + sur_data = data.detach().requires_grad_() + + return self._comp_pgd( + data=sur_data, labels=labels, attack_idx=0, attack_parameter=hue, ori_is_attacked=self.is_attacked.clone() + ) + + def caa_saturation( + self, data: "torch.Tensor", saturation: "torch.Tensor", labels: "torch.Tensor" + ) -> Tuple["torch.Tensor", "torch.Tensor"]: + """ + Compute the adversarial examples for saturation component. + + :param data: A tensor of a batch of original inputs to be attacked. + :param saturation: Specify the saturation factor. + :param labels: A tensor of a batch of the original labels to be predicted. + :return: The perturbed data and the corresponding saturation factor. + """ + saturation = saturation.detach().clone() + saturation[self.is_attacked] = 1 + saturation.requires_grad_() + sur_data = data.detach().requires_grad_() + + return self._comp_pgd( + data=sur_data, + labels=labels, + attack_idx=1, + attack_parameter=saturation, + ori_is_attacked=self.is_attacked.clone(), + ) + + def caa_rotation( + self, data: "torch.Tensor", theta: "torch.Tensor", labels: "torch.Tensor" + ) -> Tuple["torch.Tensor", "torch.Tensor"]: + """ + Compute the adversarial examples for rotation component. + + :param data: A tensor of a batch of original inputs to be attacked. + :param theta: Specify the rotation angle. + :param labels: A tensor of a batch of the original labels to be predicted. + :return: The perturbed data and the corresponding rotation angle. + """ + theta = theta.detach().clone() + theta[self.is_attacked] = 0 + theta.requires_grad_() + sur_data = data.detach().requires_grad_() + + return self._comp_pgd( + data=sur_data, labels=labels, attack_idx=2, attack_parameter=theta, ori_is_attacked=self.is_attacked.clone() + ) + + def caa_brightness( + self, data: "torch.Tensor", brightness: "torch.Tensor", labels: "torch.Tensor" + ) -> Tuple["torch.Tensor", "torch.Tensor"]: + """ + Compute the adversarial examples for brightness component. + + :param data: A tensor of a batch of original inputs to be attacked. + :param brightness: Specify the brightness factor. + :param labels: A tensor of a batch of the original labels to be predicted. + :return: The perturbed data and the corresponding brightness factor. + """ + brightness = brightness.detach().clone() + brightness[self.is_attacked] = 0 + brightness.requires_grad_() + sur_data = data.detach().requires_grad_() + + return self._comp_pgd( + data=sur_data, + labels=labels, + attack_idx=3, + attack_parameter=brightness, + ori_is_attacked=self.is_attacked.clone(), + ) + + def caa_contrast( + self, data: "torch.Tensor", contrast: "torch.Tensor", labels: "torch.Tensor" + ) -> Tuple["torch.Tensor", "torch.Tensor"]: + """ + Compute the adversarial examples for contrast component. + + :param data: A tensor of a batch of original inputs to be attacked. + :param contrast: Specify the contrast factor. + :param labels: A tensor of a batch of the original labels to be predicted. + :return: The perturbed data and the corresponding contrast factor. + """ + contrast = contrast.detach().clone() + contrast[self.is_attacked] = 1 + contrast.requires_grad_() + sur_data = data.detach().requires_grad_() + + return self._comp_pgd( + data=sur_data, + labels=labels, + attack_idx=4, + attack_parameter=contrast, + ori_is_attacked=self.is_attacked.clone(), + ) + + def caa_linf( + self, data: "torch.Tensor", eta: "torch.Tensor", labels: "torch.Tensor" + ) -> Tuple["torch.Tensor", "torch.Tensor"]: + """ + Compute the adversarial examples for L-infinity (PGD) component. + + :param data: A tensor of a batch of original inputs to be attacked. + :param eta: The perturbation in the L-infinity ball. + :param labels: A tensor of a batch of the original labels to be predicted. + :return: The perturbed data. + """ + import torch + import torch.nn.functional as F + + sur_data = data.detach() + adv_data = data.detach().requires_grad_() + ori_is_attacked = self.is_attacked.clone() + for _ in range(self.max_inner_iter): + outputs = self.model(adv_data) + + if not self._is_scheduling and self.early_stop: + cur_pred = outputs.max(1, keepdim=True)[1].squeeze() + self.is_attacked = torch.logical_or( + ori_is_attacked, cur_pred != labels.max(1, keepdim=True)[1].squeeze() + ) + + with torch.enable_grad(): + cost = F.cross_entropy(outputs, labels) + _grad = torch.autograd.grad(cost, adv_data)[0] + if not self._is_scheduling: + _grad[self.is_attacked] = 0 + adv_data = adv_data + self.step_size_pool[5] * torch.sign(_grad) + eta = torch.clamp(adv_data - sur_data, min=self.eps_pool[5][0], max=self.eps_pool[5][1]) + adv_data = torch.clamp(sur_data + eta, min=0.0, max=1.0).detach_().requires_grad_() + + return adv_data, eta + + def update_attack_order(self, images: "torch.Tensor", labels: "torch.Tensor", adv_val: List) -> None: + """ + Update the specified attack ordering. + + :param images: A tensor of a batch of original inputs to be attacked. + :param labels: A tensor of a batch of the original labels to be predicted. + :param adv_val: Optional; A list of a batch of current attack parameters. + """ + import torch + import torch.nn.functional as F + + def hungarian(matrix_batch): + sol = torch.tensor([-i for i in range(1, matrix_batch.shape[0] + 1)], dtype=torch.int32) + for i in range(matrix_batch.shape[0]): + topk = 1 + sol[i] = torch.topk(matrix_batch[i], topk)[1][topk - 1] + while sol.shape != torch.unique(sol).shape: + topk = topk + 1 + sol[i] = torch.topk(matrix_batch[i], topk)[1][topk - 1] + return sol + + def sinkhorn_normalization(ori_dsm, n_iters=20): + for _ in range(n_iters): + ori_dsm /= ori_dsm.sum(dim=0, keepdim=True) + ori_dsm /= ori_dsm.sum(dim=1, keepdim=True) + return ori_dsm + + if self.attack_order == "fixed": + if self.curr_seq.sum() == 0: + self.fixed_order = tuple(self.enabled_attack.index(i) for i in self.fixed_order) + self.curr_seq = torch.tensor(self.fixed_order, device=self.device) + elif self.attack_order == "random": + self.curr_seq = torch.randperm(self.seq_num) + elif self.attack_order == "scheduled": + if self.curr_seq.sum() == 0: + self.curr_dsm = sinkhorn_normalization(torch.rand((self.seq_num, self.seq_num))) + self.curr_seq = hungarian(self.curr_dsm) + self.curr_dsm = self.curr_dsm.detach().requires_grad_() + adv_img = images.clone().detach().requires_grad_() + original_iter_num = self.max_inner_iter + self.max_inner_iter = 3 + self._is_scheduling = True + for tdx in range(self.seq_num): + prev_img = adv_img.clone() + adv_img = torch.zeros_like(adv_img) + for idx in range(self.seq_num): + _adv_img, _ = self.attack_dict[idx](prev_img, adv_val[idx], labels) + adv_img = adv_img + self.curr_dsm[tdx][idx] * _adv_img + self._is_scheduling = False + self.max_inner_iter = original_iter_num + outputs = self.model(adv_img) + with torch.enable_grad(): + cost = F.cross_entropy(outputs, labels) + + dsm_grad = torch.autograd.grad(cost, self.curr_dsm)[0] + + prev_seq = self.curr_seq.clone() + dsm_noise = torch.zeros_like(self.curr_dsm) + while torch.equal(prev_seq, self.curr_seq): + self.curr_dsm = sinkhorn_normalization(torch.exp(self.curr_dsm + dsm_grad + dsm_noise).detach()) + self.curr_seq = hungarian(self.curr_dsm.detach()) + dsm_noise = (torch.randn_like(self.curr_dsm) + 1) * 2 # Escaping local optimum + else: + raise ValueError() + + def caa_attack(self, images: "torch.Tensor", labels: "torch.Tensor") -> "torch.Tensor": + """ + The main algorithm to generate the adversarial examples for composite adversarial attack. + + :param images: A tensor of a batch of original inputs to be attacked. + :param labels: A tensor of a batch of the original labels to be predicted. + :return: The perturbed data. + """ + import torch + + adv_img = images.detach().clone() + adv_val_saved = torch.zeros((self.seq_num, self.batch_size), device=self.device) + adv_val = [self.adv_val_space[idx] for idx in range(self.seq_num)] + + if self.is_attacked.sum() > 0: + for att_id in range(self.seq_num): + if att_id == self.linf_idx: + continue + adv_val[att_id].detach() + adv_val[att_id][self.is_attacked] = adv_val_saved[att_id][self.is_attacked] + adv_val[att_id].requires_grad_() + + for _ in range(self.max_iter): + self.update_attack_order(images, labels, adv_val) + + adv_img = adv_img.detach().clone() + self.is_not_attacked = torch.logical_not(self.is_attacked) + adv_img[self.is_not_attacked] = images[self.is_not_attacked].clone() + adv_img.requires_grad = True + + for tdx in range(self.seq_num): + idx = self.curr_seq[tdx] + adv_img, adv_val_updated = self.attack_dict[idx](adv_img, adv_val[idx], labels) # type: ignore + if idx != self.linf_idx: + adv_val[idx] = adv_val_updated + + outputs = self.model(adv_img) + cur_pred = outputs.max(1, keepdim=True)[1].squeeze() + self.is_attacked = torch.logical_or(self.is_attacked, cur_pred != labels.max(1, keepdim=True)[1].squeeze()) + + if self.is_attacked.sum() > 0: + for att_id in range(self.seq_num): + if att_id == self.linf_idx: + continue + adv_val_saved[att_id][self.is_attacked] = adv_val[att_id][self.is_attacked].detach() + + if self.is_attacked.sum() == self.batch_size: + break + + return adv_img diff --git a/art/attacks/inference/membership_inference/black_box.py b/art/attacks/inference/membership_inference/black_box.py index 21758bbe05..3715e55329 100644 --- a/art/attacks/inference/membership_inference/black_box.py +++ b/art/attacks/inference/membership_inference/black_box.py @@ -98,6 +98,7 @@ def __init__( self.epochs = nn_model_epochs self.batch_size = nn_model_batch_size self.learning_rate = nn_model_learning_rate + self.use_label = True self._regressor_model = RegressorMixin in type(self.estimator).__mro__ @@ -108,67 +109,8 @@ def __init__( self.attack_model_type = "None" else: self.default_model = True - if self.attack_model_type == "nn": - import torch - from torch import nn - class MembershipInferenceAttackModel(nn.Module): - """ - Implementation of a pytorch model for learning a membership inference attack. - - The features used are probabilities/logits or losses for the attack training data along with - its true labels. - """ - - def __init__(self, num_classes, num_features=None): - - self.num_classes = num_classes - if num_features: - self.num_features = num_features - else: - self.num_features = num_classes - - super().__init__() - - self.features = nn.Sequential( - nn.Linear(self.num_features, 512), - nn.ReLU(), - nn.Linear(512, 100), - nn.ReLU(), - nn.Linear(100, 64), - nn.ReLU(), - ) - - self.labels = nn.Sequential( - nn.Linear(self.num_classes, 256), - nn.ReLU(), - nn.Linear(256, 64), - nn.ReLU(), - ) - - self.combine = nn.Sequential( - nn.Linear(64 * 2, 1), - ) - - self.output = nn.Sigmoid() - - def forward(self, x_1, label): - """Forward the model.""" - out_x1 = self.features(x_1) - out_l = self.labels(label) - is_member = self.combine(torch.cat((out_x1, out_l), 1)) - return self.output(is_member) - - if self.input_type == "prediction": - num_classes = estimator.nb_classes # type: ignore - self.attack_model = MembershipInferenceAttackModel(num_classes) - else: - if self._regressor_model: - self.attack_model = MembershipInferenceAttackModel(1, num_features=1) - else: - num_classes = estimator.nb_classes # type: ignore - self.attack_model = MembershipInferenceAttackModel(num_classes, num_features=1) - elif self.attack_model_type == "rf": + if self.attack_model_type == "rf": self.attack_model = RandomForestClassifier() elif self.attack_model_type == "gb": self.attack_model = GradientBoostingClassifier() @@ -180,13 +122,15 @@ def forward(self, x_1, label): self.attack_model = KNeighborsClassifier() elif self.attack_model_type == "svm": self.attack_model = SVC(probability=True) + elif attack_model_type != "nn": + raise ValueError("Illegal value for parameter `attack_model_type`.") def fit( # pylint: disable=W0613 self, - x: np.ndarray, - y: np.ndarray, - test_x: np.ndarray, - test_y: np.ndarray, + x: Optional[np.ndarray] = None, + y: Optional[np.ndarray] = None, + test_x: Optional[np.ndarray] = None, + test_y: Optional[np.ndarray] = None, pred: Optional[np.ndarray] = None, test_pred: Optional[np.ndarray] = None, **kwargs @@ -195,10 +139,10 @@ def fit( # pylint: disable=W0613 Train the attack model. :param x: Records that were used in training the target estimator. Can be None if supplying `pred`. - :param y: True labels for `x`. + :param y: True labels for `x`. If not supplied, attack will be based solely on model predictions. :param test_x: Records that were not used in training the target estimator. Can be None if supplying `test_pred`. - :param test_y: True labels for `test_x`. + :param test_y: True labels for `test_x`. If not supplied, attack will be based solely on model predictions. :param pred: Estimator predictions for the records, if not supplied will be generated by calling the estimators' `predict` function. Only relevant for input_type='prediction'. :param test_pred: Estimator predictions for the test records, if not supplied will be generated by calling the @@ -216,28 +160,30 @@ def fit( # pylint: disable=W0613 if test_x is not None and self.estimator.input_shape[0] != test_x.shape[1]: # pragma: no cover raise ValueError("Shape of test_x does not match input_shape of estimator") - if not self._regressor_model: + if y is not None and test_y is not None and not self._regressor_model: y = check_and_transform_label_format(y, nb_classes=self.estimator.nb_classes, return_one_hot=True) test_y = check_and_transform_label_format(test_y, nb_classes=self.estimator.nb_classes, return_one_hot=True) - if x is not None and y.shape[0] != x.shape[0]: # pragma: no cover + if x is not None and y is not None and y.shape[0] != x.shape[0]: # pragma: no cover raise ValueError("Number of rows in x and y do not match") - if pred is not None and y.shape[0] != pred.shape[0]: # pragma: no cover + if pred is not None and y is not None and y.shape[0] != pred.shape[0]: # pragma: no cover raise ValueError("Number of rows in pred and y do not match") - if test_x is not None and test_y.shape[0] != test_x.shape[0]: # pragma: no cover + if test_x is not None and test_y is not None and test_y.shape[0] != test_x.shape[0]: # pragma: no cover raise ValueError("Number of rows in test_x and test_y do not match") - if test_pred is not None and test_y.shape[0] != test_pred.shape[0]: # pragma: no cover + if test_pred is not None and test_y is not None and test_y.shape[0] != test_pred.shape[0]: # pragma: no cover raise ValueError("Number of rows in test_pred and test_y do not match") # Create attack dataset # uses final probabilities/logits - if pred is None: + x_len = 0 + test_len = 0 + if pred is None and x is not None: x_len = x.shape[0] - else: + elif pred is not None: x_len = pred.shape[0] - if test_pred is None: + if test_pred is None and test_x is not None: test_len = test_x.shape[0] - else: + elif test_pred is not None: test_len = test_pred.shape[0] if self.input_type == "prediction": @@ -253,6 +199,8 @@ def fit( # pylint: disable=W0613 test_features = test_pred.astype(np.float32) # only for models with loss elif self.input_type == "loss": + if y is None: + raise ValueError("Cannot compute loss values without y.") if x is not None: # members features = self.estimator.compute_loss(x, y).astype(np.float32).reshape(-1, 1) @@ -288,11 +236,14 @@ def fit( # pylint: disable=W0613 test_labels = np.zeros(test_len) x_1 = np.concatenate((features, test_features)) - x_2 = np.concatenate((y, test_y)) + x_2: Optional[np.ndarray] = None + if y is not None and test_y is not None: + x_2 = np.concatenate((y, test_y)) + if self._regressor_model and x_2 is not None: + x_2 = x_2.astype(np.float32).reshape(-1, 1) y_new = np.concatenate((labels, test_labels)) - - if self._regressor_model: - x_2 = x_2.astype(np.float32).reshape(-1, 1) + if x_2 is None: + self.use_label = False if self.default_model and self.attack_model_type == "nn": import torch @@ -301,37 +252,157 @@ def fit( # pylint: disable=W0613 from torch.utils.data import DataLoader from art.utils import to_cuda - loss_fn = nn.BCELoss() - optimizer = optim.Adam(self.attack_model.parameters(), lr=self.learning_rate) # type: ignore + if x_2 is not None: - attack_train_set = self._get_attack_dataset(f_1=x_1, f_2=x_2, label=y_new) - train_loader = DataLoader(attack_train_set, batch_size=self.batch_size, shuffle=True, num_workers=0) + class MembershipInferenceAttackModel(nn.Module): + """ + Implementation of a pytorch model for learning a membership inference attack. - self.attack_model = to_cuda(self.attack_model) # type: ignore - self.attack_model.train() # type: ignore + The features used are probabilities/logits or losses for the attack training data along with + its true labels. + """ - for _ in range(self.epochs): - for (input1, input2, targets) in train_loader: - input1, input2, targets = to_cuda(input1), to_cuda(input2), to_cuda(targets) - _, input2 = torch.autograd.Variable(input1), torch.autograd.Variable(input2) - targets = torch.autograd.Variable(targets) + def __init__(self, num_classes, num_features=None): - optimizer.zero_grad() - outputs = self.attack_model(input1, input2) # type: ignore - loss = loss_fn(outputs, targets.unsqueeze(1)) + self.num_classes = num_classes + if num_features: + self.num_features = num_features + else: + self.num_features = num_classes - loss.backward() - optimizer.step() - else: + super().__init__() + + self.features = nn.Sequential( + nn.Linear(self.num_features, 512), + nn.ReLU(), + nn.Linear(512, 100), + nn.ReLU(), + nn.Linear(100, 64), + nn.ReLU(), + ) + + self.labels = nn.Sequential( + nn.Linear(self.num_classes, 256), + nn.ReLU(), + nn.Linear(256, 64), + nn.ReLU(), + ) + + self.combine = nn.Sequential( + nn.Linear(64 * 2, 1), + ) + + self.output = nn.Sigmoid() + + def forward(self, x_1, label): + """Forward the model.""" + out_x1 = self.features(x_1) + out_l = self.labels(label) + is_member = self.combine(torch.cat((out_x1, out_l), 1)) + return self.output(is_member) + + if self.input_type == "prediction": + num_classes = self.estimator.nb_classes # type: ignore + self.attack_model = MembershipInferenceAttackModel(num_classes) + else: # loss + if self._regressor_model: + self.attack_model = MembershipInferenceAttackModel(1, num_features=1) + else: + num_classes = self.estimator.nb_classes # type: ignore + self.attack_model = MembershipInferenceAttackModel(num_classes, num_features=1) + + loss_fn = nn.BCELoss() + optimizer = optim.Adam(self.attack_model.parameters(), lr=self.learning_rate) # type: ignore + + attack_train_set = self._get_attack_dataset(f_1=x_1, f_2=x_2, label=y_new) + train_loader = DataLoader(attack_train_set, batch_size=self.batch_size, shuffle=True, num_workers=0) + + self.attack_model = to_cuda(self.attack_model) # type: ignore + self.attack_model.train() # type: ignore + + for _ in range(self.epochs): + for (input1, input2, targets) in train_loader: + input1, input2, targets = to_cuda(input1), to_cuda(input2), to_cuda(targets) + _, input2 = torch.autograd.Variable(input1), torch.autograd.Variable(input2) + targets = torch.autograd.Variable(targets) + + optimizer.zero_grad() + outputs = self.attack_model(input1, input2) # type: ignore + loss = loss_fn(outputs, targets.unsqueeze(1)) + + loss.backward() + optimizer.step() + else: # no label + + class MembershipInferenceAttackModelNoLabel(nn.Module): + """ + Implementation of a pytorch model for learning a membership inference attack. + + The features used are probabilities/logits or losses for the attack training data along with + its true labels. + """ + + def __init__(self, num_features): + + self.num_features = num_features + + super().__init__() + + self.features = nn.Sequential( + nn.Linear(self.num_features, 512), + nn.ReLU(), + nn.Linear(512, 100), + nn.ReLU(), + nn.Linear(100, 64), + nn.ReLU(), + nn.Linear(64, 1), + ) + + self.output = nn.Sigmoid() + + def forward(self, x_1): + """Forward the model.""" + out_x1 = self.features(x_1) + return self.output(out_x1) + + num_classes = self.estimator.nb_classes # type: ignore + self.attack_model = MembershipInferenceAttackModelNoLabel(num_classes) + + loss_fn = nn.BCELoss() + optimizer = optim.Adam(self.attack_model.parameters(), lr=self.learning_rate) # type: ignore + + attack_train_set = self._get_attack_dataset_no_label(f_1=x_1, label=y_new) + train_loader = DataLoader(attack_train_set, batch_size=self.batch_size, shuffle=True, num_workers=0) + + self.attack_model = to_cuda(self.attack_model) # type: ignore + self.attack_model.train() # type: ignore + + for _ in range(self.epochs): + for (input1, targets) in train_loader: + input1, targets = to_cuda(input1), to_cuda(targets) + input1 = torch.autograd.Variable(input1) + targets = torch.autograd.Variable(targets) + + optimizer.zero_grad() + outputs = self.attack_model(input1) # type: ignore + loss = loss_fn(outputs, targets.unsqueeze(1)) + + loss.backward() + optimizer.step() + + else: # not nn y_ready = check_and_transform_label_format(y_new, nb_classes=2, return_one_hot=False) - self.attack_model.fit(np.c_[x_1, x_2], y_ready.ravel()) # type: ignore + if x_2 is not None: + self.attack_model.fit(np.c_[x_1, x_2], y_ready.ravel()) # type: ignore + else: + self.attack_model.fit(x_1, y_ready.ravel()) # type: ignore def infer(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: """ Infer membership in the training set of the target estimator. :param x: Input records to attack. Can be None if supplying `pred`. - :param y: True labels for `x`. + :param y: True labels for `x`. If not supplied, attack will be based solely on model predictions. :param pred: Estimator predictions for the records, if not supplied will be generated by calling the estimators' `predict` function. Only relevant for input_type='prediction'. :param probabilities: a boolean indicating whether to return the predicted probabilities per class, or just @@ -349,24 +420,22 @@ def infer(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.n else: probabilities = False - if y is None: # pragma: no cover - raise ValueError("MembershipInferenceBlackBox requires true labels `y`.") if x is None and pred is None: raise ValueError("Must supply either x or pred") + if y is None and self.use_label: + raise ValueError("y must be provided") + if self.estimator.input_shape is not None and x is not None: # pragma: no cover if self.estimator.input_shape[0] != x.shape[1]: raise ValueError("Shape of x does not match input_shape of estimator") - if not self._regressor_model: + if y is not None and not self._regressor_model: y = check_and_transform_label_format(y, nb_classes=self.estimator.nb_classes, return_one_hot=True) - if y is None: - raise ValueError("None value detected.") - - if x is not None and y.shape[0] != x.shape[0]: # pragma: no cover + if x is not None and y is not None and y.shape[0] != x.shape[0]: # pragma: no cover raise ValueError("Number of rows in x and y do not match") - if pred is not None and y.shape[0] != pred.shape[0]: # pragma: no cover + if pred is not None and y is not None and y.shape[0] != pred.shape[0]: # pragma: no cover raise ValueError("Number of rows in pred and y do not match") if self.input_type == "prediction": @@ -375,6 +444,8 @@ def infer(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.n else: features = pred.astype(np.float32) elif self.input_type == "loss": + if y is None: + raise ValueError("Cannot compute loss values without y.") if x is not None: features = self.estimator.compute_loss(x, y).astype(np.float32).reshape(-1, 1) else: @@ -388,7 +459,7 @@ def infer(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.n else: raise ValueError("Value of `input_type` not recognized.") - if self._regressor_model: + if y is not None and self._regressor_model: y = y.astype(np.float32).reshape(-1, 1) if self.default_model and self.attack_model_type == "nn": @@ -398,22 +469,39 @@ def infer(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.n self.attack_model.eval() # type: ignore predictions: Optional[np.ndarray] = None - test_set = self._get_attack_dataset(f_1=features, f_2=y) - test_loader = DataLoader(test_set, batch_size=self.batch_size, shuffle=False, num_workers=0) - for input1, input2, _ in test_loader: - input1, input2 = to_cuda(input1), to_cuda(input2) - outputs = self.attack_model(input1, input2) # type: ignore - if not probabilities: - predicted = torch.round(outputs) - else: - predicted = outputs - predicted = from_cuda(predicted) - if predictions is None: - predictions = predicted.detach().numpy() - else: - predictions = np.vstack((predictions, predicted.detach().numpy())) + if y is not None and self.use_label: + test_set = self._get_attack_dataset(f_1=features, f_2=y) + test_loader = DataLoader(test_set, batch_size=self.batch_size, shuffle=False, num_workers=0) + for input1, input2, _ in test_loader: + input1, input2 = to_cuda(input1), to_cuda(input2) + outputs = self.attack_model(input1, input2) # type: ignore + if not probabilities: + predicted = torch.round(outputs) + else: + predicted = outputs + predicted = from_cuda(predicted) + + if predictions is None: + predictions = predicted.detach().numpy() + else: + predictions = np.vstack((predictions, predicted.detach().numpy())) + else: + test_set = self._get_attack_dataset_no_label(f_1=features) + test_loader = DataLoader(test_set, batch_size=self.batch_size, shuffle=False, num_workers=0) + for input1, _ in test_loader: + input1 = to_cuda(input1) + outputs = self.attack_model(input1) # type: ignore + if not probabilities: + predicted = torch.round(outputs) + else: + predicted = outputs + predicted = from_cuda(predicted) + if predictions is None: + predictions = predicted.detach().numpy() + else: + predictions = np.vstack((predictions, predicted.detach().numpy())) if predictions is not None: if not probabilities: inferred_return = np.round(predictions) @@ -423,13 +511,19 @@ def infer(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.n raise ValueError("No data available.") elif not self.default_model: # assumes the predict method of the supplied model returns probabilities - inferred = self.attack_model.predict(np.c_[features, y]) # type: ignore + if y is not None and self.use_label: + inferred = self.attack_model.predict(np.c_[features, y]) # type: ignore + else: + inferred = self.attack_model.predict(features) # type: ignore if probabilities: inferred_return = inferred else: inferred_return = np.round(inferred) else: - inferred = self.attack_model.predict_proba(np.c_[features, y]) # type: ignore + if y is not None and self.use_label: + inferred = self.attack_model.predict_proba(np.c_[features, y]) # type: ignore + else: + inferred = self.attack_model.predict_proba(features) # type: ignore if probabilities: inferred_return = inferred[:, [1]] else: @@ -470,6 +564,38 @@ def __getitem__(self, idx): return AttackDataset(x_1=f_1, x_2=f_2, y=label) + def _get_attack_dataset_no_label(self, f_1, label=None): + from torch.utils.data.dataset import Dataset + + class AttackDataset(Dataset): + """ + Implementation of a pytorch dataset for membership inference attack. + + The features are probabilities/logits or losses for the attack training data (`x_1`) along with + its true labels (`x_2`). The labels (`y`) are a boolean representing whether this is a member. + """ + + def __init__(self, x_1, y=None): + import torch + + self.x_1 = torch.from_numpy(x_1.astype(np.float64)).type(torch.FloatTensor) + + if y is not None: + self.y = torch.from_numpy(y.astype(np.int8)).type(torch.FloatTensor) + else: + self.y = torch.zeros(x_1.shape[0]) + + def __len__(self): + return len(self.x_1) + + def __getitem__(self, idx): + if idx >= len(self.x_1): # pragma: no cover + raise IndexError("Invalid Index") + + return self.x_1[idx], self.y[idx] + + return AttackDataset(x_1=f_1, y=label) + def _check_params(self) -> None: if self.input_type not in ["prediction", "loss"]: raise ValueError("Illegal value for parameter `input_type`.") diff --git a/notebooks/README.md b/notebooks/README.md index 95806cbf65..77b36a3720 100644 --- a/notebooks/README.md +++ b/notebooks/README.md @@ -108,6 +108,11 @@ demonstrates a MembershipInferenceBlackBox membership inference attack using sha [label_only_membership_inference.ipynb](label_only_membership_inference.ipynb) [[on nbviewer](https://nbviewer.org/github/Trusted-AI/adversarial-robustness-toolbox/blob/main/notebooks/label_only_membership_inference.ipynb)] demonstrates a LabelOnlyDecisionBoundary membership inference attack on a PyTorch classifier for the MNIST dataset. +[composite-adversarial-attack.ipynb](composite-adversarial-attack.ipynb)[[on nbviewer](https://nbviewer.org/github/Trusted-AI/adversarial-robustness-toolbox/blob/main/notebooks/composite-adversarial-attack.ipynb)] +shows how to launch Composite Adversarial Attack (CAA) on Pytorch-based model ([Hsiung et al., 2023](https://arxiv.org/abs/2202.04235)). +CAA composites the perturbations in Lp-ball and semantic space (i.e., hue, saturation, rotation, brightness, and contrast), +and is able to optimize the attack sequence and each attack component, thereby enhancing the efficiency and efficacy of adversarial examples. + ## Metrics [privacy_metric.ipynb](privacy_metric.ipynb) [[on nbviewer](https://nbviewer.jupyter.org/github/Trusted-AI/adversarial-robustness-toolbox/blob/main/notebooks/privacy_metric.ipynb)] diff --git a/notebooks/composite-adversarial-attack.ipynb b/notebooks/composite-adversarial-attack.ipynb new file mode 100644 index 0000000000..89156d743b --- /dev/null +++ b/notebooks/composite-adversarial-attack.ipynb @@ -0,0 +1,290 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "10b7328c-03a0-4ab5-a451-eee98801bd55", + "metadata": {}, + "source": [ + "# Composite Adversarial Attacks in PyTorch\n", + "This notebook provides a demonstration showing how to use ART to launch the composite adversarial attack (CAA) [1]. CAA consists of the following perturbations:\n", + "\n", + "- Hue\n", + "- Saturation\n", + "- Rotation\n", + "- Brightness\n", + "- Contrast\n", + "- PGD ($\\ell_\\infty$)\n", + "\n", + "[1] Towards Compositional Adversarial Robustness: Generalizing Adversarial Training to Composite Semantic Perturbations (CVPR 2023)" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "53cf6ef7-9572-4a30-a71e-8315844a5c3d", + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import torch.nn as nn\n", + "import torch.optim as optim\n", + "from art.attacks.evasion import CompositeAdversarialAttackPyTorch\n", + "from art.estimators.classification import PyTorchClassifier\n", + "from art.utils import load_cifar10\n", + "\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "2f689043-e7c8-4e87-b49c-5ff7d11a447f", + "metadata": {}, + "outputs": [], + "source": [ + "(x_train, y_train), (x_test, y_test), min_pixel_value, max_pixel_value = load_cifar10()\n", + "\n", + "# Swap the axes to PyTorch's NCHW format.\n", + "x_train = np.transpose(x_train, (0, 3, 1, 2)).astype(np.float32)\n", + "x_test = np.transpose(x_test, (0, 3, 1, 2)).astype(np.float32)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "e5775160-2a0b-49bc-9bf6-57a9613906e1", + "metadata": {}, + "outputs": [], + "source": [ + "# Create a simple convolutional neural network.\n", + "model = nn.Sequential(\n", + " nn.Conv2d(3, 8, 5), nn.BatchNorm2d(8), nn.ReLU(), nn.MaxPool2d(2, 2), \n", + " nn.Conv2d(8, 16, 5), nn.BatchNorm2d(16), nn.ReLU(), nn.MaxPool2d(2, 2),\n", + " nn.Flatten(), \n", + " nn.Linear(5*5*16, 128), \n", + " nn.Linear(128, 10)\n", + ")\n", + "\n", + "# Define the loss function and the optimizer.\n", + "criterion = nn.CrossEntropyLoss()\n", + "optimizer = optim.Adam(model.parameters(), lr=0.01)\n", + "\n", + "# Create the ART classifier.\n", + "classifier = PyTorchClassifier(\n", + " model=model,\n", + " clip_values=(min_pixel_value, max_pixel_value),\n", + " loss=criterion,\n", + " optimizer=optimizer,\n", + " input_shape=(3, 32, 32),\n", + " nb_classes=10,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "7cdda5a3-d7a3-4210-a47c-fd5571e3ea92", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accuracy on the benign test set: 60.440000000000005%\n" + ] + } + ], + "source": [ + "# Train the ART classifier.\n", + "classifier.fit(x_train, y_train, batch_size=64, nb_epochs=5, verbose=True)\n", + "\n", + "# Evaluate the ART classifier on benign test examples.\n", + "predictions_benign = classifier.predict(x_test)\n", + "\n", + "accuracy = np.sum(np.argmax(predictions_benign, axis=1) == np.argmax(y_test, axis=1)) / len(y_test)\n", + "print(\"Accuracy on the benign test set: {}%\".format(accuracy * 100))" + ] + }, + { + "cell_type": "markdown", + "id": "1b45e556-d405-4620-8a0f-77628b8c5e33", + "metadata": {}, + "source": [ + "## Launch Composite Adversarial Attack\n", + "\n", + "`CompositeAdversarialAttack` has the following parameters:\n", + "- `classifier`: A trained PyTorch classifier.\n", + "- `enabled_attack`: Attack pool selection, and attack order designation for `fixed` order. For simplicity, we use the following abbreviations to specify each attack type. 0: Hue, 1: Saturation, 2: Rotation, 3: Brightness, 4: Contrast, 5: PGD ($\\ell_\\infty$).\n", + "- `hue_epsilon`: The boundary of the hue perturbation. The value is expected to be in the interval `[-np.pi, np.pi]`. Perturbation of `0` means no shift and `-np.pi` and `np.pi` give a complete reversal of the hue channel in the HSV color space in the positive and negative directions, respectively. See `kornia.enhance.adjust_hue` for more details.\n", + "- `sat_epsilon`: The boundary of the saturation perturbation. The value is expected to be in the interval `[0.0, infinity]`. The perturbation of `0.0` gives a black-and-white image, `1.0` gives the original image, and `2.0` enhances the saturation by a factor of 2. See `kornia.geometry.transform.rotate` for more details.\n", + "- `rot_epsilon`: The boundary of the rotation perturbation (in degrees). Positive values mean counter-clockwise rotation. See `kornia.geometry.transform.rotate` for more details.\n", + "- `bri_epsilon`: The boundary of the brightness perturbation. The value is expected to be in the interval `[-1.0, 1.0]`. Perturbation of `0.0` means no shift, `-1.0` gives a complete black image, and `1.0` gives a complete white image. See `kornia.enhance.adjust_brightness` for more details.\n", + "- `con_epsilon`: The boundary of the contrast perturbation. The value is expected to be in the interval `[0.0, infinity]`. Perturbation of `0.0` gives a complete black image, `1` does not modify the image, and any other value modifies the brightness by this factor. See `kornia.enhance.adjust_contrast` for more details.\n", + "- `pgd_epsilon`: The maximum perturbation that the attacker can introduce in the L-infinity ball.\n", + "- `early_stop`: When True, the attack will stop if the perturbed example is classified incorrectly by the classifier.\n", + "- `max_iter`: The maximum number of iterations for attack order optimization.\n", + "- `max_inner_iter`: The maximum number of iterations for each attack optimization.\n", + "- `attack_order`: Specify the scheduling type for the composite adversarial attack. The value is expected to be `fixed`, `random`, or `scheduled`. `fixed` means the attack order is the same as specified in `enabled_attack`. `random` means the attack order is randomly generated at each iteration. `scheduled` means to enable the attack order optimization proposed in the paper. If only one attack is enabled, `fixed` will be used.\n", + "- `batch_size`: The batch size to use during the generation of adversarial samples." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "f36c10c0-ddb6-4787-8b97-41e1f228ea62", + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Composite Adversarial Attack: 0%| | 0/40 [00:00" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAh8AAAEjCAYAAACSDWOaAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/OQEPoAAAACXBIWXMAAA9hAAAPYQGoP6dpAABHlklEQVR4nO3de3RU5b0//vfcJ5lM7ncSIICCgmBFRLyBiCBeKpbaart+B2yr1AItovWI31YqeppWPS1WkdbWA9ZVSw89UqtVvKDEowJVhCIolEuAYG4EmFwmmfvz+4OTaYbM50kmJDu392utWQvmM3vP3nv2PHkymff+mJRSCkREREQGMff2BhAREdHgwskHERERGYqTDyIiIjIUJx9ERERkKE4+iIiIyFCcfBAREZGhOPkgIiIiQ3HyQURERIbi5IOIiIgMxclHPzV8+HDMnz8/+v/NmzfDZDJh8+bN3fYcJpMJP/nJT7ptfT3ho48+wmWXXQaXywWTyYSdO3f29iYR9aqBPDbMnz8fKSkp3b7exx9/HCNGjIDFYsGFF17Y7eun9jj56IK1a9fCZDJFb06nE+eeey4WLVqEmpqa3t68hLz22mt9foIhCQaDuPXWW3Hy5En88pe/xAsvvIBhw4bhmWeewdq1a3t789DU1ITly5fjuuuuQ2ZmJkwmU5/YLuo5HBv6nzfffBP3338/Lr/8cqxZswY//elPe3uTtHw+H0pLS3H++ecjOTkZQ4YMwa233oo9e/b09qYlxNrbG9CfrVixAiUlJfD5fHj//fexevVqvPbaa9i9ezeSk5MN3ZarrroKLS0tsNvtCS332muvYdWqVXEHmZaWFlitffcUOXjwII4cOYLf/va3+M53vhO9/5lnnkF2dnbMb3+9oa6uDitWrMDQoUMxYcKEbv3Nk/o2jg39xzvvvAOz2Yznnnsu4WPUG775zW/ir3/9K+68805cdNFFqKysxKpVqzBlyhR8+umnGDZsWG9vYqcMjLOnl8yePRsXX3wxAOA73/kOsrKy8Itf/AIvv/wybr/99rjLeL1euFyubt8Ws9kMp9PZrevs7vV1t9raWgBAenp6jz9XKBRCJBJJaHAqKChAVVUV8vPz8fHHH2PSpEk9uIXUl3Bs6D9qa2uRlJTUbRMPpRR8Ph+SkpK6ZX1tffHFF3jppZdw33334fHHH4/ef+WVV2L69Ol46aWXcM8993T78/YE/tmlG02fPh0AUF5eDuBff588ePAgrr/+erjdbnzzm98EAEQiEaxcuRJjx46F0+lEXl4eFixYgFOnTsWsUymFRx99FEVFRUhOTsbVV18d9+M16e+627Ztw/XXX4+MjAy4XC6MHz8eTz75ZHT7Vq1aBQAxHxW3ivd33R07dmD27NlITU1FSkoKrrnmGmzdujXmMa0fPX/wwQdYunQpcnJy4HK5cMstt+D48eMdHsddu3Zh/vz5GDFiBJxOJ/Lz8/Gtb30LJ06ciD5m/vz5mDp1KgDg1ltvhclkwrRp0zB8+HDs2bMHZWVl0f2ZNm1adDmPx4MlS5aguLgYDocDo0aNws9//nNEIpHoYw4fPgyTyYQnnngCK1euxMiRI+FwOPDZZ58BAPbu3YujR492uB8OhwP5+fkdPo4GPo4Np53t2NDq0KFDmDVrFlwuFwoLC7FixQqc2aC9M8fRZDJhzZo18Hq90X1s/dNoKBTCI488En3/Dx8+HA8++CD8fn/M8wwfPhw33ngj3njjDVx88cVISkrCb37zGwCdG28AoKqqCnv37kUwGNTud2NjIwAgLy8v5v6CggIA6JEJT0/hJx/d6ODBgwCArKys6H2hUAizZs3CFVdcgSeeeCL6keuCBQuwdu1a3HHHHfj+97+P8vJyPP3009ixYwc++OAD2Gw2AMBDDz2ERx99FNdffz2uv/56fPLJJ5g5cyYCgUCH2/PWW2/hxhtvREFBAX7wgx8gPz8fn3/+OV599VX84Ac/wIIFC1BZWYm33noLL7zwQofr27NnD6688kqkpqbi/vvvh81mw29+8xtMmzYNZWVlmDx5cszjFy9ejIyMDCxfvhyHDx/GypUrsWjRIvzpT3/qcLsPHTqEO+64A/n5+dizZw+effZZ7NmzB1u3boXJZMKCBQswZMgQ/PSnP8X3v/99TJo0CXl5efB6vVi8eDFSUlLw//7f/wPwrzdqc3Mzpk6dii+++AILFizA0KFD8eGHH2LZsmWoqqrCypUrY7ZjzZo18Pl8uOuuu+BwOJCZmQkAOO+88zB16lT+GYU6jWND94wNABAOh3Hdddfh0ksvxWOPPYaNGzdi+fLlCIVCWLFiRfRxnTmOL7zwAp599ln8/e9/x+9+9zsAwGWXXQbg9CdWzz//PL761a/i3nvvxbZt21BaWorPP/8cGzZsiNmmffv24fbbb8eCBQtw5513YvTo0QmNN8uWLcPzzz+P8vJyDB8+XNz3kSNHoqioCP/5n/+J0aNH40tf+hIqKytx//33o6SkBLfddluHx6/PUJSwNWvWKADq7bffVsePH1cVFRVq3bp1KisrSyUlJaljx44ppZSaN2+eAqAeeOCBmOX/93//VwFQf/jDH2Lu37hxY8z9tbW1ym63qxtuuEFFIpHo4x588EEFQM2bNy9637vvvqsAqHfffVcppVQoFFIlJSVq2LBh6tSpUzHP03ZdCxcuVNJpAEAtX748+v85c+You92uDh48GL2vsrJSud1uddVVV7U7PjNmzIh5rnvuuUdZLBbl8XjiPl+r5ubmdvf98Y9/VADUe++9126f169fH/PYsWPHqqlTp7ZbxyOPPKJcLpf65z//GXP/Aw88oCwWizp69KhSSqny8nIFQKWmpqra2tp26wEQd/06H330kQKg1qxZk9By1L9wbOjZsaH1uC1evDhmm2+44QZlt9vV8ePHlVKdP46t63S5XDGP27lzpwKgvvOd78Tcf9999ykA6p133oneN2zYMAVAbdy4MeaxnR1v2u5XeXm5dv+VUmrbtm1q5MiRCkD0NnHiRFVVVdXhsn0J/+xyFmbMmIGcnBwUFxfjtttuQ0pKCjZs2IAhQ4bEPO7uu++O+f/69euRlpaGa6+9FnV1ddHbxIkTkZKSgnfffRcA8PbbbyMQCGDx4sUxH3kuWbKkw23bsWMHysvLsWTJknbfiWi7rs4Kh8N48803MWfOHIwYMSJ6f0FBAb7xjW/g/fffR0NDQ8wyd911V8xzXXnllQiHwzhy5Ij2udp+dOjz+VBXV4dLL70UAPDJJ58kvO2t1q9fjyuvvBIZGRkxx33GjBkIh8N47733Yh4/d+5c5OTktFuPUoqfepAWx4aeGRtaLVq0KGabFy1ahEAggLfffhtA54+j5LXXXgMALF26NOb+e++9FwDwt7/9Leb+kpISzJo1K+a+RMabtWvXQiml/dSjVUZGBi688EI88MAD+Mtf/oInnngChw8fxq233gqfz9fh8n0F/+xyFlatWoVzzz0XVqsVeXl5GD16NMzm2Pmc1WpFUVFRzH379+9HfX09cnNz46639YuUrW/Ec845J6aek5ODjIwM7ba1fsw7bty4zu+QxvHjx9Hc3IzRo0e3q5133nmIRCKoqKjA2LFjo/cPHTo05nGt23zm367PdPLkSTz88MNYt25d9Fi0qq+v7+ouYP/+/di1a1fcCQWAds9VUlLS5eeiwY1jw2ndPTYAp79A23aSAwDnnnsugNPf1wI6fxwlR44cgdlsxqhRo2Luz8/PR3p6ertJUryxItHxpjPq6+tx5ZVX4oc//GF0IgQAF198MaZNm4Y1a9a0m9D2VZx8nIVLLrkk+o12icPhaDfoRCIR5Obm4g9/+EPcZaSTtb+xWCxx71dnfDHsTF/72tfw4Ycf4oc//CEuvPBCpKSkIBKJ4Lrrrmv3Ra1ERCIRXHvttbj//vvj1lsHsFb96ctb1LdwbNDr6tjQWd11HDv7SVC8sSLR8aYz/ud//gc1NTX48pe/HHP/1KlTkZqaig8++ICTD5KNHDkSb7/9Ni6//HLtD7jWvPb+/ftjZvrHjx/v8DeEkSNHAgB2796NGTNmiI/r7JsrJycHycnJ2LdvX7va3r17YTabUVxc3Kl16Zw6dQqbNm3Cww8/jIceeih6//79+zu9DmmfRo4ciaamJu3xIOpNHBs6FolEcOjQoZgf3v/85z8BIPpni84eR8mwYcMQiUSwf/9+nHfeedH7a2pq4PF4OnUtjZ4Yb1ovVBcOh2PuV0ohHA4jFAp123P1NH7noxd87WtfQzgcxiOPPNKuFgqF4PF4AJz+u7HNZsNTTz0V8xvBmamMeC666CKUlJRg5cqV0fW1aruu1usKnPmYM1ksFsycORMvv/xy9KNN4PSb4cUXX8QVV1yB1NTUDrerI62/EZ35G1Bn9rmVy+WKuz9f+9rXsGXLFrzxxhvtah6Pp9Nv3M5GbYkSxbGhc55++umYbX766adhs9lwzTXXAOj8cZRcf/31ANofz1/84hcAgBtuuKHDbUxkvOls1LZ1wrVu3bqY+//617/C6/XiS1/6Uofb1Vfwk49eMHXqVCxYsAClpaXYuXMnZs6cCZvNhv3792P9+vV48skn8dWvfhU5OTm47777UFpaihtvvBHXX389duzYgddffx3Z2dna5zCbzVi9ejVuuukmXHjhhbjjjjtQUFCAvXv3Ys+ePdE3xMSJEwEA3//+9zFr1ixYLBYxrvXoo4/irbfewhVXXIHvfe97sFqt+M1vfgO/34/HHnusW45NamoqrrrqKjz22GMIBoMYMmQI3nzzzej1ETpj4sSJWL16NR599FGMGjUKubm5mD59On74wx/ir3/9K2688UbMnz8fEydOhNfrxaeffoo///nPOHz4cIfHFUgsavv000/D4/GgsrISAPDKK6/g2LFjAE7HDdPS0jq9XzTwcWzomNPpxMaNGzFv3jxMnjwZr7/+Ov72t7/hwQcfjP45pbPHUTJhwgTMmzcPzz77LDweD6ZOnYq///3veP755zFnzhxcffXVHW5nIuNNZ6O2N910E8aOHYsVK1bgyJEjuPTSS3HgwAE8/fTTKCgowLe//e3EDmZv6qWUTb/WGhf76KOPtI+LF+Fq69lnn1UTJ05USUlJyu12qwsuuEDdf//9qrKyMvqYcDisHn74YVVQUKCSkpLUtGnT1O7du9WwYcO0cbpW77//vrr22muV2+1WLpdLjR8/Xj311FPReigUUosXL1Y5OTnKZDLFROtwRpxOKaU++eQTNWvWLJWSkqKSk5PV1VdfrT788MNOHR9pG8907Ngxdcstt6j09HSVlpambr31VlVZWdlue6SobXV1tbrhhhuU2+1uF4ttbGxUy5YtU6NGjVJ2u11lZ2eryy67TD3xxBMqEAgopf4VtX388cfjbt+Z69RpjeHFu3UmVkf9C8eGnh0bWo/bwYMH1cyZM1VycrLKy8tTy5cvV+FwuEvHUXotgsGgevjhh1VJSYmy2WyquLhYLVu2TPl8vpjHDRs2TN1www1xt7cz403rNnR2TDh58qS655571LnnnqscDofKzs5Wt912mzp06FCHy/YlJqW66Rs+RERERJ3A73wQERGRoTj5ICIiIkNx8kFERESG4uSDiIiIDMXJBxERERmKkw8iIiIyVI9dZGzVqlV4/PHHUV1djQkTJuCpp57CJZdc0uFykUgElZWVcLvdXeqwSERnTymFxsZGFBYWtus/0pO6Om4AHDuIeltC40ZPXDxk3bp1ym63q//6r/9Se/bsUXfeeadKT09XNTU1HS5bUVEhXpSJN954M/ZWUVHRE0NEXGczbijFsYM33vrKrTPjRo9cZGzy5MmYNGlS9Pr7kUgExcXFWLx4MR544AHtsvX19UhPT+/uTRowzmyh3VbbltWJ1MaNlVtrn3++vJzd6RBrMMunlUnJM2KT5i+But9mTZrng9J1wpXX2QNvjQ7p9lG3PT/96U/F2tq1a7u8PR6Px7BLwJ/NuAH8a+z4xz/+Abfb3a6u690zGD4p6VP7qHtr6d7KusU0v2grzZJm/YIykzyumDQ1pRlzAP0Y2BNjUlfPi3jb0tjYiIsuuqhT40a3/9klEAhg+/btWLZsWfQ+s9mMGTNmYMuWLe0e7/f74ff7o/9vbGzs7k0aUKRW1ABgs9nEmtPpFGvJycliLd4g3oqTj+7X1cmH3W7vic0x7AdWouMGII8dbrebk484+tQ+cvIhL9uPJx+JrLPb/5hbV1eHcDiMvLy8mPvz8vJQXV3d7vGlpaVIS0uL3rqz9TIR9Q+JjhsAxw6i/qzX0y7Lli1DfX199FZRUdHbm0RE/QDHDqL+q9v/7JKdnQ2LxYKampqY+2tqapCfn9/u8Q6HAw6H5uN7IhrwEh03AI4dRP1Zt08+7HY7Jk6ciE2bNmHOnDkATn9xbNOmTVi0aFF3P123kAY3ABg/fryhNQDIzc3V1iXa70Ro/s5o6uofYDV/1lMm+UO1cEReMNy1p4MprPn7q2Yf+tKfwTuie30nTJhg4JZ0v+4cN8xms/a7UYNVn/rOh4YpEhZr2m88mOX9i2gHK825orr2PTMTdN8z6+h7G/33Ox+JvO965DofS5cuxbx583DxxRfjkksuwcqVK+H1enHHHXf0xNMR0QDAcYNo8OiRycfXv/51HD9+HA899BCqq6tx4YUXYuPGje2+TEZE1IrjBtHg0WNXOF20aFGf/TMLEfVNHDeIBodeT7sQERHR4MLJBxERERmKkw8iIiIyVI995+NszZs3L+4loy+44AJxGV2EVdcTRaerMSTdcmfTJbQnolbay+QqXexNE5mNyPvY1OwXayfr68WaQ3M5d5Pm0tnZGelizW6X3wKRszjW2vNGG4numv4ete1OSqm453RvXC6/LzF6/7XvAd226NohaFepi3nK45E/KI8dVk3LCmji/RbT2RxrXUy37zjb9xg/+SAiIiJDcfJBREREhuLkg4iIiAzFyQcREREZipMPIiIiMhQnH0RERGSoPhu1ffDBB+F2u9vd3xPRV52eiKdFIsZHqXT7r9tDXdxU153WH5QjuvWNLWLt4OEKsVZUXCDWMlNSxJrSdabUzL9NJk2n3B5qENrV83T0uaO7eUv6L5PJFPc49peurjoDJi6seSnCmn1UmjEnpBlXgyF5PNp/6JBYy8uXO4xHAgGxlpOZIdacDk18F2cX8TfS2b7H+MkHERERGYqTDyIiIjIUJx9ERERkKE4+iIiIyFCcfBAREZGhOPkgIiIiQ/XZqK0Ul+uqARNR6wHaDrxxOgu3CoXkY+qpbxJrdQ3NYs3rlztMnmqUl3PYnWLNZguKNaumq61FMzVXHXWeVJqFdae1rhmu/hlFhYWFYq2ysrKLa+27utLVdiDEcDvSly43oDubLTZ5zAlrYvMtTXK3bE+9V6zV1J0Ua0lul1jLinMpiFZmkybC38Hv/LqIf5f1QCft+E/DqC0RERH1UZx8EBERkaE4+SAiIiJDcfJBREREhuLkg4iIiAzFyQcREREZqs9GbbvC6LhcVyNoZ7OduufUbo5Zfs5wSI521Z1qEGs1mppHE3tr8PrEWlNAE9899IVYKz98TKyNHDZErI05p0SspSY7xFpHATWlzcxqos3aqK2mqHnxJ0yYINYGYtTWbDbBHOdA6jqiGk2TGNW3me6ANjbfxXEnrDnvdB26LZqseiAgx9+Pn5DHFd3Y0eKXO9d6m+XxyOxIlpdrkTvXpiTLL5TmKgSQg8SnGZ367q2Yebd/8vGTn/wkeo2O1tuYMWO6+2mIaADhuEE0uPTIJx9jx47F22+//a8nsQ6oD1iIqAdw3CAaPHrk3W21WpGfn98TqyaiAYrjBtHg0SNfON2/fz8KCwsxYsQIfPOb38TRo0fFx/r9fjQ0NMTciGjwSWTcADh2EPVn3T75mDx5MtauXYuNGzdi9erVKC8vx5VXXonGxsa4jy8tLUVaWlr0Vlxc3N2bRER9XKLjBsCxg6g/6/bJx+zZs3Hrrbdi/PjxmDVrFl577TV4PB7893//d9zHL1u2DPX19dFbRUVFd28SEfVxiY4bAMcOov6sx7/RlZ6ejnPPPRcHDhyIW3c4HHA42kcau7urbV/S0V7p9lsX4QpF5KiZ2ayJvWnicic1HWi/OO4Ra0FNY0ZdDNXmkLvT6vbBqukE6QvLtWZNzNhpsog1U0d5yC6eu2bNem3aVcrF8Zqo7euvv96JrTJeR+MGII8dzS0+WKy29gtE5GNrtcivtdIsZ7HKy+lqJpMmMq95nc2Rrv++aO5i/LvJL8dbddH/JM0Xhn1BuXt1lSZqW6uJ90c0+xfUDJzNjXIH7lpNx9tjX1SJtfPPGSHWRg4vEmsAYFHyOK69vENPdNLWPF2880l7jrV7bA9ramrCwYMHUVBQ0NNPRUQDBMcNooGt2ycf9913H8rKynD48GF8+OGHuOWWW2CxWHD77bd391MR0QDBcYNocOn2P7scO3YMt99+O06cOIGcnBxcccUV2Lp1K3Jycrr7qYhogOC4QTS4dPvkY926dd29SiIa4DhuEA0ubCxHREREhuLkg4iIiAw1oJondDWYq+0wqX0+eUHdKjuKI5lM8pwwGJRjb15NJC7JKUdYI2E52mW3ytual5Uh1kxm+dQym+UIoib1C4dd3oeUVLkzZZKmO20gKHe7PFJZK9aa6uvF2un1yt0w09LdYm3YkEKxZrXJx01nwoVy1HYgqm/xI2xt3zs0JdklLmOOF839P+GIHAvVJl81b3OLLk6rGZBMmrh5hzQxTV28v7pK7iadmZkp1pKccv9Wv0+O8Cc75OXyc7LFmi7C722Wx0aXXX6+gK9FrFnM8mDV5JfHlVAHMXyTSR479Z3UNeeNtlt2l1YZ93RK5GcpP/kgIiIiQ3HyQURERIbi5IOIiIgMxckHERERGYqTDyIiIjIUJx9ERERkqD4btY2Ew/EjoLqkka4Dn1WeZ5nNXc3aysvp4nIdrlZTCwbkCFeDR+7AaM/KkmuaqFl2qhxPdFiC8jqT5Oirw5kk1kJh+UU02+Xl/GF5W6pq5chsZWWlWDt5vE6sNZ7SR219mihhUVGeWMvNlqOETpu8/7oo3QWarrYDkTU1E1Z3arv7w5qYalAT/4ZJjqLramFdl2ld7FVTUx11U9bQdsvV1EKaMcek6cAKTUQ53S2PK8GgZh8tciQ6OUWOsOuitiaLHMU3aTLRjiR5W0yaAxrSXEoBAJTmcgO6Hw76H2OabtldezptdLsz+MkHERERGYqTDyIiIjIUJx9ERERkKE4+iIiIyFCcfBAREZGhOPkgIiIiQ3HyQURERIbqs9f5CIQVAnGu+aBrx65MmmtEaALLVk2a2aJZp6ak1dFiujy+XdNuWmmuK6B7VotFnoOmpqSINadDE0g3y+lxs00+7VoC8j5U1lWLtUMVFWKtukq+lkckIu9DslW+rkayo/11JNryN8vXRsjNzhdrKiK/+IGQfGxsFvmY5uTJzzcQvfDHP8W9lowpornWgVU+X1PcTrE2qmSoWJs0/nyxprnsEJRmO/Ut1QGlG+g0F4MJaa7JkZGZKdbsDvnY6Frc2+3ytTWyMjRjPOSaVXO9IrtV86POJu+DLyQfF0/DKblWL18HqLHeI28LgGBzi1zU/NDJykoXa+eMGiHWbHb52OhOt7jXMkngmln85IOIiIgMxckHERERGYqTDyIiIjIUJx9ERERkKE4+iIiIyFCcfBAREZGhEo7avvfee3j88cexfft2VFVVYcOGDZgzZ060rpTC8uXL8dvf/hYejweXX345Vq9ejXPOOSeh5wlaHAjGaXVs1UTiIiY5NmmxaGKhXW19rYnERTR5WV2UFgCU5gFmu7z/qekZYs1q10TiNLFYzS7C4tBEtDRto3Udo3XHpsnnFWv2ZDm6N3r0aLFm02yn2ynHjMMBOYIHAE1NctRu+PAisaY0qz1xokmsJTnl/bc75Nf3nDFj4t4fDodxaP9+eWMSZNS4AQC+Zj8icSLLgRa5rbpNE8VslF9KJGuWC58X/9gCgE8FxJpZ86Zz2OX4N6CPRoY1RaWJ4aZl5og1s2Y5mOX3VkATcbdoIrPo4rgS0Vxq4PCRQ2Lti9pasXbyxAmx1tIix2XDfv3YEWiRzw2/v1msFRXnibWhxfKY49JEbXWXaIgXpU7k6hMJf/Lh9XoxYcIErFq1Km79sccew69+9Sv8+te/xrZt2+ByuTBr1iz4fPIbn4gGNo4bRNRWwp98zJ49G7Nnz45bU0ph5cqV+NGPfoSbb74ZAPD73/8eeXl5+Mtf/oLbbrvt7LaWiPoljhtE1Fa3fuejvLwc1dXVmDFjRvS+tLQ0TJ48GVu2bIm7jN/vR0NDQ8yNiAaProwbAMcOov6sWycf1dWnL3+dlxf7t6e8vLxo7UylpaVIS0uL3oqLi7tzk4ioj+vKuAFw7CDqz3o97bJs2TLU19dHbxWaHh1ERK04dhD1X906+cjPP93AqqamJub+mpqaaO1MDocDqampMTciGjy6Mm4AHDuI+rNu7WpbUlKC/Px8bNq0CRdeeCEAoKGhAdu2bcPdd9+d0Lp2HyhHssvV7v56j/x33WZfo1jLypAHponj5EicpokuTCFNXMwsH9qIJoIG6CNxIU3NnuQWa76QLoOniX5ponSaRqrwNsuxWF0Ez6aJ2RXkZom1IrMcM0vWxIyDmjRFyC9H3sKaLqAAkJYqx1tbNMemquq4WGvSLJeWmS7WMjXdLkvOiR9DDgWD3Rq11enOcQMAbvnyl+FKaf9e8Gu6hbqS5AirSRMgTNLEFDXJf+33UyKhoFizWeVzGQCsSZpIvVUezFqC8rmuIvI+mnXvZc1lEayabbHZ5DHHZO5aXDioGTd9Efl4u1LluH1GerpYCwfkdTot+ri054Sc7T72xWGxNqpklFjT/TzS/byxaI5pBw2WO5Tw5KOpqQkHDhyI/r+8vBw7d+5EZmYmhg4diiVLluDRRx/FOeecg5KSEvz4xz9GYWFhTKafiAYXjhtE1FbCk4+PP/4YV199dfT/S5cuBQDMmzcPa9euxf333w+v14u77roLHo8HV1xxBTZu3AinUz9jJ6KBi+MGEbWV8ORj2rRpULqrfppMWLFiBVasWHFWG0ZEAwfHDSJqq9fTLkRERDS4cPJBREREhuLkg4iIiAzVrVHb7vTaG2/B7mj/ZTOTrsuqJhZblC93ZizJLxBrZrfcLTQS8Is1EzQbo9tQAGGLPCc80SjHiSsq68Ra3Uk5vtXQKHdLbfbK8c5gUI6bpriSxdr4ceeLtTHnlIi1HJcce1NhTTxZEyOsPuPaEm19UVkp1hoa9ZfybtZEeBsa5WPa0CDXHMlyRG/IsKFizeyU48vFI+PH8wJ++dzu6yLBCCLB9jlXi+Z3Ld07MsXePvLfStdNuMWnuSxAUO6kffjQYbFm76Cr7dCSYWKtvEI+n1/duEmsBTVdr50O+dxK1hwblyYSnKa5Xkt6mnw5gS99abxYy8mWO36PLBoi1swm+cywaDrsBnzy+8eqib0CQEtuplgrLEiXa0Pkn2PhsHy+NTdrosa6CHqc3Vea43UmfvJBREREhuLkg4iIiAzFyQcREREZipMPIiIiMhQnH0RERGQoTj6IiIjIUH02apuXkweHs33MJztDjkzlZWeLtSGaqK07WY52NbfI0db6kyfEmtcrd9BMcutbf7eE5FjU22VbxNq2nZ/J69RE+8JBOWqlNB1vU5PlOO0Vl0wSay6LHM9TLfK21NaeEmsVlVVirfxohVirrqkVa556OZ4c6qCrrdkqv7VcLjkumJEpd+7NypHPYWeyvM56TXw3PSt+N2C/Jirc1736+jtxx46I5jw3Q45jp9jl89ytiYUOP6dIrOVkybHxrAI5Np2ZnSvWAMDpkiOsns+PiLXdn8vvkRbNZfE1zWlh1XQDdmu2c9RQOS485ZKLxFqW5n3l0rTgVnLjVgQC8vs8FJbjtM31HrEWDMvnIQAkJcvHJj1djn3XVMuXDairOyk/n0uO0+bly+dbcnL7KHVjS+cj+vzkg4iIiAzFyQcREREZipMPIiIiMhQnH0RERGQoTj6IiIjIUJx8EBERkaH6bNT2yssuhytOF1ML5FxUilOOKKU4NN0gI+07YLZqbJGjVo0BOb4aMsmHNtWdLm8LgKBfjmK1BOT9r6mVo6E2h9xhMjdT3p7x558n1q6YfLFYK86TOzOG/HKMc8/uz8XaEU1XzvJjX8jLVcnL2Wxy7DcjQ96HjOz4EdVWaenpYq2gUO6imZcnr7fF1yzWjtfKHY0rj8kx5CRXWtz7zZo4dF+3Y9fnsMZ5XZ2a1zrglzvQ2uzy72iTL5Uj5Ue+kOOrJ+SXBOPGjhVrdk03WABo9suRYZumy+yXLpI7wvo08Um7TR7nzhkhd6gee95osVaYnS7WUjWdnSM+ed8rqo+LtdpTcoS/qk5eztskR9g9Ho9YCwT1cVSbXT6m8Tq9twqH5GizrgN5crocUR4H+VxMi9Nh2Nskd0g/Ez/5ICIiIkNx8kFERESG4uSDiIiIDMXJBxERERmKkw8iIiIyFCcfREREZKiEo7bvvfceHn/8cWzfvh1VVVXYsGED5syZE63Pnz8fzz//fMwys2bNwsaNGxN6nsovapCU3D62U39SjkW1eOUOtCGfHDWyOmxirXCo3CnX7ZQPX2aWvJwzRd/VNmCS40ojRsnxtWlhOTKc6pI7cw4fKnffPH/MuWJNReRI8Of75Q67J054xJq3RY7LhZTcQjNv2Eixll8i70OSpjNvskvuIGky6+ftFk27z1BIPhf/8ZkcNfackjtThrxyfFnTtBMWa/yq39f57pSdYdS4AQB1lUdhsbR/T2dqOmIPKZK7d54//hyxZnPIR3fPzr+LtTzdZQFMcoS/tk6T0QXgSo0fnQaArFT5Ob983VVizWySz/W0NPn5srPkDs0nNR3By4/sF2v1HjkS3VAvj/+NDXJM3eOVI7MnGzSdrTVdkm02+WeKXfPzBgDMFs3xTpXPt3RNvD8jV47TOjRjoD1JrjW1tB9zvHHukyT8yYfX68WECROwatUq8THXXXcdqqqqorc//vGPiT4NEQ0gHDeIqK2EP/mYPXs2Zs+erX2Mw+FAfn5+lzeKiAYWjhtE1FaPfOdj8+bNyM3NxejRo3H33XfjxAn5Iza/34+GhoaYGxENPomMGwDHDqL+rNsnH9dddx1+//vfY9OmTfj5z3+OsrIyzJ49G+Fw/L9jlpaWIi0tLXorLi7u7k0ioj4u0XED4NhB1J91e2+X2267LfrvCy64AOPHj8fIkSOxefNmXHPNNe0ev2zZMixdujT6/4aGBg4iRINMouMGwLGDqD/r8ajtiBEjkJ2djQMHDsStOxwOpKamxtyIaHDraNwAOHYQ9Wc93tX22LFjOHHiBAoKChJa7m9vbIbN3r4Lo6+5RVymuUmO4Zrl5CMmaDo61u+Tu4W6NFHT80bL8c4mrxzfAgCTRd7YkiI5vnb+SDkym5Qkd7TMypK7tyZpukj+Y9cusXa8Uf77e8QuR/5y8+TfXJPccqzP5ZJ/8KQkt++O3Mqv6QJaUyt3tDxacVSsAcBxzbIejxyZbWzyiLVkTWfiVIcciUvSRPtsjvhDgC4ObISujhsAUHVgH0xx4qENqfJ5cOPM74q1666L/8kLALz9zptiLVfTLTQ3WY5xJwnxZwBwmuQ4PQDkpcnvA7em5kyW35MhyN1SdV1WQ5rof/U+uQv10doasRYIyttidcrH1O2Wx7hcp/zeCQbkMV7HZpffcxZNlLajutstn1OpqXLNYpHPqSavHEOuqZF//vnidNluaZZjy2dKePLR1NQU89tIeXk5du7ciczMTGRmZuLhhx/G3LlzkZ+fj4MHD+L+++/HqFGjMGvWrESfiogGCI4bRNRWwpOPjz/+GFdffXX0/61/c503bx5Wr16NXbt24fnnn4fH40FhYSFmzpyJRx55BA7Nb21ENLBx3CCithKefEybNg1KyR99vfHGG2e1QUQ08HDcIKK22NuFiIiIDMXJBxERERmKkw8iIiIyVI9HbbvqH//4FOY4nSldTjn6mZZqF2vnXyB3PXXIq8Shz3aLtTzNl+HyXHItPaLvGFpYNFSs5QzJEWvJbjlK6AvLkTGrJi6nTHLsd/g5Y8RaRsEIsVZbI0eidVG6qho5vnr8uHw9iIhmju0PyFHbE3VyJPbk8Q4u/a3pCmvTdLxNsssno5I3Ff6w/HxWyFcJdQivvRm9G7U9G75mb9yo7QUTxonLTL9muljLSpfj7ZdP1nSDNcvnstumiU2nyJFRiyamDgBW3fmj2Z4I5JOr/pR8rqda5f2IQD7PR4yWX4vcIvkyBSdPyRF+t6arazAs77tJyeODTXONhkhEjhL7fHJ31yav3LUcAFREfr82NcvLVlTJHY99LXKcNtgsb6vuCsPJcX7G6Z7nTPzkg4iIiAzFyQcREREZipMPIiIiMhQnH0RERGQoTj6IiIjIUJx8EBERkaH6bNS2eu8/gDhxOVd6trjM6KsvE2szrpwi1o7XVYo1yzC5y+qQdLlT4vCRcrQ3b4i8TgDIyJE7edqT5Q6MLUE5MnWyvlGs+QJyp+CTjXKtslqO4J06oelqq4mMBgJyxLOqulZ+Po+8f2FN5M8XkiPIfk2sLayJ0gLQ9AEFAlY5oqfrCmk3yccmM0WOYLo1XTvNkfjdLs0dxMH7suHnXgCLpf3Q9vX/7zviMs1huQvpvgNyl9WISV7OqemiG1Ryl9GTHjneiIg+yhgOy+9Xk2a0j0B+vRsb5PeWpUZ+/1TWyu9Xv19eLuKTz3OXphvwof3HxFr5UbkLtckqv4aZ2XLMOuCXj1l9vdy5/ESd3CkWAJQm3mo2y2OHSVNzJckR7HRNN2CnU45StzS1P9d0EeMz8ZMPIiIiMhQnH0RERGQoTj6IiIjIUJx8EBERkaE4+SAiIiJDcfJBREREhuqzUVuE48fbvHVyDKuoIFesnVMid1k9VxOL9V4gR9vsms6UdqscfQxoop8AcPS4HOFqCcpRusYWOaZ5yiNHX+sb5HXW1snLfaHpotjilY9bkqaLrs0qdyZubpEjaJGwHF0MamKxzV45Rtiii9r69ZGyoJLDtkrzrktxyvuRquna7NTEdy2adrhWc/zfP8Lm/tvV9uZbb4UjTvfrjPwicZl/7JZjmoGAHAsNaDqb6iLeKiL/3meBfA6YtCFuIByWt0dplhVOg+iSkmBIfr66E3JEORSSxxxNYhTpqeliLRCQ3+cnT8hjIyzy61RXJ7/P/ZqxONSi6RSr6aQNABa7PEAkO+UxwGHRnFMheR8DPvn8hqYjdpKr/Tge5+oYIn7yQURERIbi5IOIiIgMxckHERERGYqTDyIiIjIUJx9ERERkKE4+iIiIyFAJRW1LS0vx0ksvYe/evUhKSsJll12Gn//85xg9enT0MT6fD/feey/WrVsHv9+PWbNm4ZlnnkFeXl63bHB+8XCxVjh0lFhr0DSD1HVY9Cm546E/LEfQmho8Ys3bpI9pNjbK9XpNl9lgUN4PpTSRQE2HzXBAXs6JVLGW7JY7JTb45Air5+RJsdbkkTtF1p84JdYaG+S4cLOmo284Ih9PSweRR103yIKsNLE2Ml+Oi7uT5Of0++RjI8VpAcDpiF8zqe79vcTIseMfn+6AzdY+krjr053iMibIXT8tFnkMsGri9hZN3B6Q12nRRD+tdv3r4nRqYuw2+TntDnk/zHbNsdGMj6n2DHmdDk3HX4sc7/SF5Qh4SPOW1HUDDzbrovjy2BEIycuZNGNxB7lmBDQ/V8KaSxh4G+XtSdbEd3PS5NfCmiyfT/Y4L30kgYR+QiNMWVkZFi5ciK1bt+Ktt95CMBjEzJkz4fX+K0N9zz334JVXXsH69etRVlaGyspKfOUrX0nkaYhogOHYQURtJfTJx8aNG2P+v3btWuTm5mL79u246qqrUF9fj+eeew4vvvgipk+fDgBYs2YNzjvvPGzduhWXXnpp9205EfUbHDuIqK2z+my1vv70R72ZmZkAgO3btyMYDGLGjBnRx4wZMwZDhw7Fli1b4q7D7/ejoaEh5kZEAxvHDqLBrcuTj0gkgiVLluDyyy/HuHHjAADV1dWw2+1IT0+PeWxeXh6qq6vjrqe0tBRpaWnRW3FxcVc3iYj6AY4dRNTlycfChQuxe/durFu37qw2YNmyZaivr4/eKioqzmp9RNS3cewgoi41llu0aBFeffVVvPfeeygq+lezpvz8fAQCAXg8npjfYGpqapCfnx93XQ6HAw7NN62JaODg2EFEQIKTD6UUFi9ejA0bNmDz5s0oKSmJqU+cOBE2mw2bNm3C3LlzAQD79u3D0aNHMWXKlG7Z4AmTLhdr7mz5Y9dP98X/6BYAPPVyfMnjlaOYzc1yd0Kfphb0y5EoAAiH5WVNmo6XQU1s1B+Q47stATnaFWiWY29BzbEJ+OQukt6IfLwDPnk7GzSdeb0Nmg60EU0XSU2szxGna2Or3OxMeZ0Azh0a/wcmAIzOkyOIdk03WY/nuFizOjVx2mQ5DulOc8e93xYvR3cWjBw7tn7wLkzm9nHVZk383W6To5hJyfGP0WnyEGrRtC9Wmg+dzTZd1FZ+/wOAU9MxWhf/tjvl/bcmZ8nrtMuxcbtZE1HWfOZu0nR2Npk0HXb98vvcr+kyGwzKy0VMmha7mm2x6qL4cc7NGA75uKW5dDX5fEtJ0nTDtcn7aDPJkWFTuP3PsXj3SRKafCxcuBAvvvgiXn75Zbjd7ujfYtPS0pCUlIS0tDR8+9vfxtKlS5GZmYnU1FQsXrwYU6ZM4bfViQYxjh1E1FZCk4/Vq1cDAKZNmxZz/5o1azB//nwAwC9/+UuYzWbMnTs35kJBRDR4cewgorYS/rNLR5xOJ1atWoVVq1Z1eaOIaGDh2EFEbbG3CxERERmKkw8iIiIyFCcfREREZKguXeejN6VmpYu1HTt2iLXjtXLXT6+mU6DfJ8ewmr1yfKuxvlF+vmb9ZaBbWjxiLeSX461KEykNR+RIaUjJXSsR1JwiPl2sSo7aApprM1jlSJi2ZaIugZgk74PLJUcMxxTliLVRRQWaJwSyNLE334nDYu2ETz43nJly5DMpXY7+OlM0MVKX0NGyozhgH5ab7YbZ0v74V7XIUeVw2CPWUjPlY2s1ydHHhjpdp2X5/RHURO0jmk6qAKAimmiojiYWa0+SOy0rm9zZOmSS3wNmTdY2WdNF15Ukn8vhoGZ8iGi+cyR0dgYAkyba7NR0ik3SxJozU+SO3wBQlCK/z4sKssWapgEt/D7555FZyT/HrBZ5/9NT279OLQkk9PnJBxERERmKkw8iIiIyFCcfREREZChOPoiIiMhQnHwQERGRoTj5ICIiIkP1u6jtprf+Jta8TXKcyN8ox4mgjWhpatp8p6aTKjTbAkAfU+0Jmjlo0mixZMoSYpoAlCaiDJt82uliodlu+fny0uXumkMy5OjayEw5KpihSZu2dBCXrqmUuyg3BOUOvEk56WLNlSEfG5tLjijbXXJ00eqMX7N2MbHZF6hgC1Sk/YuXpjlGjZpuysGw/HqNHjNW3o4COaJ7vO6EWKs9USfWmjxyZB4AmpvlywaEw/JYFgnJ+++yyu+tMeNHirXKBnk8Pq7pMNwSkMe/Fp98qQGLZjx22OTX3mWT86HpmvdOTnq6WMsvlLtajxqSJ9YAINchDzxNXnncOXlSjpJb7Jpos0vusp3ilvc/K6v9cs3NmssonIGffBAREZGhOPkgIiIiQ3HyQURERIbi5IOIiIgMxckHERERGYqTDyIiIjJUv4vanjwsd64FdLFYDYsct7RnpIs1kyai62/WZBW18d3eIG9rca4ctbruutliLS9L7r7YckITCWuRo3RpSu5Mma7kfXCqoFjzt8idR/d/cUSsHWuQ45AAEHTLkbPUYjmG59JEbU02OYJnsco1myZKaBFizxbNc/V1J6srYTK1j12Gg3KctAXyudVccVSsZVrkY5vtlLuX2vxyJDbJLJ/LLRZNd1YASunGFk1M16TZ/xb5XL9ykhw1HnveBWLt6FH5vXXCI78n/X5NhF/Tudaq6dKcZJaXy9Z0p013ya9vWHOsq+vk8wkA9tVViTWTU44Mp+ZmibWkVPlyA8lueT8ys+V1pqS1j2CbrJ2fUvCTDyIiIjIUJx9ERERkKE4+iIiIyFCcfBAREZGhOPkgIiIiQ3HyQURERIZKaPJRWlqKSZMmwe12Izc3F3PmzMG+fftiHjNt2jSYTKaY23e/+91u3Wgi6l84dhBRWwld56OsrAwLFy7EpEmTEAqF8OCDD2LmzJn47LPP4GqTeb7zzjuxYsWK6P+Tk+VW4InT5NhNcuYemms9ICy3fk7LyBVrOS7NtSxO1Yu1Eyf1Wf2GZr+2bqTqir1iLXBklFi7JHmCWEvXXOfE1yS/FqFTcv6/sf6kWDtyqkasfarJ1H+h6StvKZTz7wCQm1cg1lIy08VaxCr/PhDSXMtE000cSsnXHAiH468zHNafo4kycuzIzcuAxdL+OB47ekxcJuTXjStyrfyf+8RavV3edt1vfd6IPFZ5Q5pxDEAkrLvOh/yaWuJcF6WV3ye/Jz/54E2xNs2VItbGmeUj0JImX5MiEpLPZVNIM64E5Gu81Ifl8bb2hHyNkyN75XGlrqVB3hab5s0KICk3U6xl5KeLNUeqfL5ZkuTrgySnyde5ciTL1wAxWdpPH+LdJ0lo8rFx48aY/69duxa5ubnYvn07rrrqquj9ycnJyM+XL6RERIMLxw4iauusvvNRX3/6t/vMzNiZ2h/+8AdkZ2dj3LhxWLZsGZqb5av5+f1+NDQ0xNyIaGDj2EE0uHX58uqRSARLlizB5ZdfjnHjxkXv/8Y3voFhw4ahsLAQu3btwr//+79j3759eOmll+Kup7S0FA8//HBXN4OI+hmOHUTU5cnHwoULsXv3brz//vsx9991113Rf19wwQUoKCjANddcg4MHD2LkyJHt1rNs2TIsXbo0+v+GhgYUFxd3dbOIqI/j2EFEXZp8LFq0CK+++iree+89FBUVaR87efJkAMCBAwfiDiAOhwMOh9y8h4gGDo4dRAQkOPlQSmHx4sXYsGEDNm/ejJKSkg6X2blzJwCgoED+9j8RDWwcO4iorYQmHwsXLsSLL76Il19+GW63G9XV1QCAtLQ0JCUl4eDBg3jxxRdx/fXXIysrC7t27cI999yDq666CuPHj++RHYihi9PqFxQrxw8dEGvBZDkS5gjJschgwNu5zeoDghH5C38fvrFBrE3e+alYS9cliQNyy+z6sPz67gnLx3SnahFr1cly7C29ZIhYyy4ZLtYAwJ0hR3FNcWKgrSwWufW31SHH5SyaVtYmsybaJ5X0acCEGTl2FI0cAqut/fFo8MpfSPUekyOVuoPh00RbT2rGALtJfr0CSl5nWBObBgDo4tgaJiXvoyaFiwO7PhJrFY3y+zXHnCTWlJLH47Amottklve9WslR2wN+eYw7FpIHq+Zk+TV0F8sT5rySYWINAJzpcvQVZs2PbM24kpIix56TU+WfY2ab/MmiMrV/vnj3SRKafKxevRrA6YsBtbVmzRrMnz8fdrsdb7/9NlauXAmv14vi4mLMnTsXP/rRjxJ5GiIaYDh2EFFbCf/ZRae4uBhlZWVntUFENPBw7CCittjbhYiIiAzFyQcREREZipMPIiIiMhQnH0RERGSoLl/hlABPs9ztcTDYr4k211QfEWvnm9LF2gkldwPeBjlO+7FYAXy5cvfh3DHDxVre8EKx5nDL0TUAiGjm9RFNdDHJLndmTk6Ru1bak+RInNUmr9NqjR/ttWq66/Z17vQM2OIcx5w8uUN1lSZqq0sdRzTfo/VDjsUGNcvp4rRhdC1K2xGludyA7gAEW+QYu7fuuFgzO9LFmsUvx2IrNcd0J+RY7AFNh2pvivz+cBVliLWcQnl8yMrJE2sOl75Tc0DzWihNlNohvJcBwKKraeL9ugi/Oc5yZrO8rnaP7fQjiYiIiLoBJx9ERERkKE4+iIiIyFCcfBAREZGhOPkgIiIiQ3HyQURERIYaUFHbgmK5k2hVxQkDt4Q+gNwpMqLkONYBTZz2M83zhfLlzozDR8odJocX5Ii15GQ5TtvcQdvXgEmOy1l0MVZNB1pdfxSLpqOlSdOWNCJ0ZY2EO+ie2oc5ncmw29t3AHY45TiyzS4fv3BQjjdqmsEipDkHoIvM6hbTPSEAdNBDR9wazTmiNLWmiLwfewPyGJBml7va7vXViLU9IXl8OJkqR1gzi0vEWoEmUp9ekCnWHC55fDBr8vTBDjoPW6ya7tWaLrPWOOd8K11n67Dmva4bO8xxOtiadS2Qz3xspx9JRERE1A04+SAiIiJDcfJBREREhuLkg4iIiAzFyQcREREZipMPIiIiMtSAitqmZskdCH1+uePhqdqmnticQW2zpnYQcjdgmyaGmlQox2KLh8pdJPNyU8VacrKuC6McQQvpWtMCgKZTpFXXRVJTi9dFslVNlRxPrK3ZI9aqvqiOe38oFD+C2x+EwmGY4kSIvS3yeedOd4o1n1ceO8KaqGk4ThQxWtMlYjVFU4cJ6M5HHdtSmgivssg/Jrxm+Tx5PyB3qD7SLC93Mlk+bta8YrGWP0QeH0py5M7WWWnyJRrMmjitV5OJ9mli1lIn6VZOTSTcmeyS12uXz2FnkhxDdjjl5Wyajthni598EBERkaE4+SAiIiJDcfJBREREhuLkg4iIiAzFyQcREREZipMPIiIiMlRCUdvVq1dj9erVOHz4MABg7NixeOihhzB79mwAgM/nw7333ot169bB7/dj1qxZeOaZZ5CXJ8cgu1PAL+fQsvNzxRqjtt0vpEn8NWfLUbKReXKHycJcOUqXmSFH4pLccrdHv0OOvXl8cgfNo5XHxRoA1NSeFGt1NXLteLWm+3LXGpb2CUaOHcGwHwi3j8Ba7PIBzMiRI4zBFPn8CWk63mpKCGoiukoTtTXrG6LCpIna6jqU6jrXwirHLa1WTffWJM37Lk3uFjsiTR6rMzLl2HxKqvzjLEUTqXc45eV8IflnSkATxVeaiKrF1sGPXd1roanZNF1tLZp4r02zPbrov4ozICUyRCX0yUdRURF+9rOfYfv27fj4448xffp03Hzzzdiz5/R1BO655x688sorWL9+PcrKylBZWYmvfOUriTwFEQ1AHDuIqK2EPvm46aabYv7/H//xH1i9ejW2bt2KoqIiPPfcc3jxxRcxffp0AMCaNWtw3nnnYevWrbj00ku7b6uJqF/h2EFEbXX5Ox/hcBjr1q2D1+vFlClTsH37dgSDQcyYMSP6mDFjxmDo0KHYsmWLuB6/34+GhoaYGxENXBw7iCjhycenn36KlJQUOBwOfPe738WGDRtw/vnno7q6Gna7Henp6TGPz8vLQ3V1/Es4A0BpaSnS0tKit+Ji+fK5RNR/cewgolYJTz5Gjx6NnTt3Ytu2bbj77rsxb948fPbZZ13egGXLlqG+vj56q6io6PK6iKjv4thBRK0Sbixnt9sxatQoAMDEiRPx0Ucf4cknn8TXv/51BAIBeDyemN9gampqkJ+fL67P4XDA4ZDTD0Q0MHDsIKJWZ93VNhKJwO/3Y+LEibDZbNi0aRPmzp0LANi3bx+OHj2KKVOmnPWGdkb9KflvvgXFcnzLpBm/lNzQcsAwaRoXOlxytCslRT5wWWlydDEvK13eFp+8LYcqj4m1j3fLHUurTsg1TTqbelhPjR0WmwkWW/vzNj1TjmOnaDqphgNygFAXtQ3Fifu2UppIrNksD8umDj6sNmuimGazpmOyppu01Sbvf5Imwul2a8aAlDSxluJIEmsuu1yzO+SBLKAZ45rs8r63xOmO3ErXtdipiSfbNV2CAX1kVtfZ2mSWt0cp+TUMBIJizW7X1Gztt0X3PGdKaPKxbNkyzJ49G0OHDkVjYyNefPFFbN68GW+88QbS0tLw7W9/G0uXLkVmZiZSU1OxePFiTJkyhd9WJxrkOHYQUVsJTT5qa2vxb//2b6iqqkJaWhrGjx+PN954A9deey0A4Je//CXMZjPmzp0bc6EgIhrcOHYQUVsJTT6ee+45bd3pdGLVqlVYtWrVWW0UEQ0sHDuIqC32diEiIiJDcfJBREREhjrrtEt3S+Tbsu2W1TRrCoc1TYD6ccOu7qDbf93rEYnItbDmm/6hkFwzadInunXqtmWwv75n42zej0Zr3dag8O39UFA+uUKaBmLhkOY819V0zeM0aRcVkbfF1EHrLl2DOKX5VTOiW69Jk/bRbEswKFd1CQu/Sf6xZIWc9tDtg67JH5T8fH5Nk7+QJu1iimiSJ5p1AoDSrFczBEKZ5GMDpWtWJydzzJptCdran6fN3tPNODszbphUHxtdjh07xisVEvURFRUVKCoq6u3N6BSOHUR9Q2fGjT43+YhEIqisrITb7YbJZEJDQwOKi4tRUVGB1FS5nfJgw+Mi47GJL5HjopRCY2MjCgsLYdZcP6AvaTt2NDY28hwQ8P0RH4+LrLPHJpFxo8/92cVsNsedMaWmpvKEiIPHRcZjE19nj0tamnwhqL6o7dhh+r8/P/AckPHYxMfjIuvMsensuNE/fqUhIiKiAYOTDyIiIjJUn598OBwOLF++nA2kzsDjIuOxiW8wHZfBtK+J4rGJj8dF1hPHps994ZSIiIgGtj7/yQcRERENLJx8EBERkaE4+SAiIiJDcfJBREREhuLkg4iIiAzVpycfq1atwvDhw+F0OjF58mT8/e9/7+1NMtx7772Hm266CYWFhTCZTPjLX/4SU1dK4aGHHkJBQQGSkpIwY8YM7N+/v3c21kClpaWYNGkS3G43cnNzMWfOHOzbty/mMT6fDwsXLkRWVhZSUlIwd+5c1NTU9NIWG2f16tUYP3589GqEU6ZMweuvvx6tD4bjwrGDY4eEY0d8Ro8bfXby8ac//QlLly7F8uXL8cknn2DChAmYNWsWamtre3vTDOX1ejFhwgSsWrUqbv2xxx7Dr371K/z617/Gtm3b4HK5MGvWLPh8PoO31FhlZWVYuHAhtm7dirfeegvBYBAzZ86E9/+6KgLAPffcg1deeQXr169HWVkZKisr8ZWvfKUXt9oYRUVF+NnPfobt27fj448/xvTp03HzzTdjz549AAb+ceHYcRrHjvg4dsRn+Lih+qhLLrlELVy4MPr/cDisCgsLVWlpaS9uVe8CoDZs2BD9fyQSUfn5+erxxx+P3ufxeJTD4VB//OMfe2ELe09tba0CoMrKypRSp4+DzWZT69evjz7m888/VwDUli1bemsze01GRob63e9+NyiOC8eO9jh2yDh2yHpy3OiTn3wEAgFs374dM2bMiN5nNpsxY8YMbNmypRe3rG8pLy9HdXV1zHFKS0vD5MmTB91xqq+vBwBkZmYCALZv345gMBhzbMaMGYOhQ4cOqmMTDoexbt06eL1eTJkyZcAfF44dncOx4184drRnxLjR57raAkBdXR3C4TDy8vJi7s/Ly8PevXt7aav6nurqagCIe5xaa4NBJBLBkiVLcPnll2PcuHEATh8bu92O9PT0mMcOlmPz6aefYsqUKfD5fEhJScGGDRtw/vnnY+fOnQP6uHDs6ByOHadx7Ihl5LjRJycfRIlYuHAhdu/ejffff7+3N6XPGD16NHbu3In6+nr8+c9/xrx581BWVtbbm0XUp3DsiGXkuNEn/+ySnZ0Ni8XS7pu0NTU1yM/P76Wt6ntaj8VgPk6LFi3Cq6++infffRdFRUXR+/Pz8xEIBODxeGIeP1iOjd1ux6hRozBx4kSUlpZiwoQJePLJJwf8ceHY0TkcOzh2xGPkuNEnJx92ux0TJ07Epk2bovdFIhFs2rQJU6ZM6cUt61tKSkqQn58fc5waGhqwbdu2AX+clFJYtGgRNmzYgHfeeQclJSUx9YkTJ8Jms8Ucm3379uHo0aMD/tjEE4lE4Pf7B/xx4djRORw7OHZ0Ro+OG93zndjut27dOuVwONTatWvVZ599pu666y6Vnp6uqqure3vTDNXY2Kh27NihduzYoQCoX/ziF2rHjh3qyJEjSimlfvazn6n09HT18ssvq127dqmbb75ZlZSUqJaWll7e8p519913q7S0NLV582ZVVVUVvTU3N0cf893vflcNHTpUvfPOO+rjjz9WU6ZMUVOmTOnFrTbGAw88oMrKylR5ebnatWuXeuCBB5TJZFJvvvmmUmrgHxeOHadx7IiPY0d8Ro8bfXbyoZRSTz31lBo6dKiy2+3qkksuUVu3bu3tTTLcu+++qwC0u82bN08pdToy9+Mf/1jl5eUph8OhrrnmGrVv377e3WgDxDsmANSaNWuij2lpaVHf+973VEZGhkpOTla33HKLqqqq6r2NNsi3vvUtNWzYMGW321VOTo665pprogOIUoPjuHDs4Ngh4dgRn9Hjhkkppbr2mQkRERFR4vrkdz6IiIho4OLkg4iIiAzFyQcREREZipMPIiIiMhQnH0RERGQoTj6IiIjIUJx8EBERkaE4+SAiIiJDcfJBREREhuLkg4iIiAzFyQcREREZ6v8HcWTehkygWVcAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAh8AAAEjCAYAAACSDWOaAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/OQEPoAAAACXBIWXMAAA9hAAAPYQGoP6dpAABL2ElEQVR4nO3deZRU5bU+/qfmqq6eoelmaKABQcWACSriBCqCqLli0ESTtS6YRI0BEkXjFb+JRHSlE/EmGEUSk1wwWTHkkisxGsUBBa8RiSL8UBREZuiBsXqorvm8vz+83aHo2rsHuk8PPJ+1ei2tXXWmOmfXyzlnn+0wxhgQERER2cTZ1QtAREREpxcOPoiIiMhWHHwQERGRrTj4ICIiIltx8EFERES24uCDiIiIbMXBBxEREdmKgw8iIiKyFQcfREREZCsOPnqooUOHYtasWU3/v3btWjgcDqxdu7bD5uFwOPDjH/+4w6bXGd577z1cdNFFCAaDcDgc2Lx5c1cvElGX6s25YdasWcjOzu7w6S5atAjDhg2Dy+XCueee2+HTp+Y4+GiH5cuXw+FwNP35/X6MHDkSc+bMQXV1dVcvXpu89NJL3X6AIUkkErjppptw7Ngx/OIXv8Af/vAHDBkyBE899RSWL1/e1YuH+vp6LFiwAFdffTUKCwvhcDi6xXJR52Fu6HleffVV3Hfffbj44ouxbNky/OQnP+nqRVJFo1GUl5fj7LPPRlZWFgYOHIibbroJW7du7epFaxN3Vy9AT7Zw4UKUlZUhGo3i7bffxtKlS/HSSy/ho48+QlZWlq3LctlllyESicDr9bbpcy+99BKWLFmSMclEIhG43d13F9m5cyf27t2L3/zmN/j2t7/d9PpTTz2Fvn37pv3rryscOXIECxcuxODBgzF27NgO/ZcndW/MDT3HG2+8AafTid/97ndt3kZd4Rvf+Ab+9re/4bbbbsOXvvQlVFRUYMmSJZgwYQI+/PBDDBkypKsXsVV6x97TRaZNm4bzzjsPAPDtb38bffr0wc9//nM8//zzuOWWWzJ+JhwOIxgMdviyOJ1O+P3+Dp1mR0+vox06dAgAkJ+f3+nzSiaTsCyrTcmpf//+qKysRElJCd5//32cf/75nbiE1J0wN/Qchw4dQiAQ6LCBhzEG0WgUgUCgQ6Z3ooMHD+K5557Dvffei0WLFjW9fumll+KKK67Ac889h7vvvrvD59sZeNmlA11xxRUAgN27dwP41/XJnTt34pprrkFOTg6+8Y1vAAAsy8LixYsxevRo+P1+FBcX44477sDx48fTpmmMwSOPPIJBgwYhKysLl19+ecbTa9J13Q0bNuCaa65BQUEBgsEgxowZg8cff7xp+ZYsWQIAaaeKG2W6rrtp0yZMmzYNubm5yM7OxpVXXol333037T2Np57/8Y9/YN68eSgqKkIwGMQNN9yAw4cPt7gdt2zZglmzZmHYsGHw+/0oKSnBN7/5TRw9erTpPbNmzcLEiRMBADfddBMcDgcmTZqEoUOHYuvWrVi3bl3T+kyaNKnpc6FQCHfddRdKS0vh8/kwYsQI/OxnP4NlWU3v2bNnDxwOBx577DEsXrwYw4cPh8/nw8cffwwA2LZtG/bt29fievh8PpSUlLT4Pur9mBs+d6q5odGuXbswdepUBINBDBgwAAsXLsTJDdpbsx0dDgeWLVuGcDjctI6Nl0aTySQefvjhpuN/6NCheOCBBxCLxdLmM3ToUFx33XV45ZVXcN555yEQCODXv/41gNblGwCorKzEtm3bkEgk1PWuq6sDABQXF6e93r9/fwDolAFPZ+GZjw60c+dOAECfPn2aXksmk5g6dSouueQSPPbYY02nXO+44w4sX74ct956K773ve9h9+7dePLJJ7Fp0yb84x//gMfjAQA8+OCDeOSRR3DNNdfgmmuuwQcffIApU6YgHo+3uDyvvfYarrvuOvTv3x/f//73UVJSgk8++QQvvvgivv/97+OOO+5ARUUFXnvtNfzhD39ocXpbt27FpZdeitzcXNx3333weDz49a9/jUmTJmHdunUYP3582vvnzp2LgoICLFiwAHv27MHixYsxZ84c/PnPf25xuXft2oVbb70VJSUl2Lp1K55++mls3boV7777LhwOB+644w4MHDgQP/nJT/C9730P559/PoqLixEOhzF37lxkZ2fj//2//wfgXwdqQ0MDJk6ciIMHD+KOO+7A4MGD8c4772D+/PmorKzE4sWL05Zj2bJliEajuP322+Hz+VBYWAgAOOusszBx4kReRqFWY27omNwAAKlUCldffTUuvPBCPProo1i9ejUWLFiAZDKJhQsXNr2vNdvxD3/4A55++mn885//xG9/+1sAwEUXXQTg8zNWzzzzDG688Ubcc8892LBhA8rLy/HJJ59g1apVacu0fft23HLLLbjjjjtw2223YdSoUW3KN/Pnz8czzzyD3bt3Y+jQoeK6Dx8+HIMGDcJ//ud/YtSoUfjiF7+IiooK3HfffSgrK8PNN9/c4vbrNgy12bJlywwA8/rrr5vDhw+b/fv3mxUrVpg+ffqYQCBgDhw4YIwxZubMmQaAuf/++9M+/7//+78GgPnjH/+Y9vrq1avTXj906JDxer3m2muvNZZlNb3vgQceMADMzJkzm1578803DQDz5ptvGmOMSSaTpqyszAwZMsQcP348bT4nTmv27NlG2g0AmAULFjT9//Tp043X6zU7d+5seq2iosLk5OSYyy67rNn2mTx5ctq87r77buNyuUwoFMo4v0YNDQ3NXvvTn/5kAJi33nqr2TqvXLky7b2jR482EydObDaNhx9+2ASDQfPpp5+mvX7//fcbl8tl9u3bZ4wxZvfu3QaAyc3NNYcOHWo2HQAZp6957733DACzbNmyNn2Oehbmhs7NDY3bbe7cuWnLfO211xqv12sOHz5sjGn9dmycZjAYTHvf5s2bDQDz7W9/O+31e++91wAwb7zxRtNrQ4YMMQDM6tWr097b2nxz4nrt3r1bXX9jjNmwYYMZPny4AdD0N27cOFNZWdniZ7sTXnY5BZMnT0ZRURFKS0tx8803Izs7G6tWrcLAgQPT3nfnnXem/f/KlSuRl5eHq666CkeOHGn6GzduHLKzs/Hmm28CAF5//XXE43HMnTs37ZTnXXfd1eKybdq0Cbt378Zdd93V7J6IE6fVWqlUCq+++iqmT5+OYcOGNb3ev39/fP3rX8fbb7+N2tratM/cfvvtafO69NJLkUqlsHfvXnVeJ546jEajOHLkCC688EIAwAcffNDmZW+0cuVKXHrppSgoKEjb7pMnT0YqlcJbb72V9v4ZM2agqKio2XSMMTzrQSrmhs7JDY3mzJmTtsxz5sxBPB7H66+/DqD121Hy0ksvAQDmzZuX9vo999wDAPj73/+e9npZWRmmTp2a9lpb8s3y5cthjFHPejQqKCjAueeei/vvvx9//etf8dhjj2HPnj246aabEI1GW/x8d8HLLqdgyZIlGDlyJNxuN4qLizFq1Cg4nenjObfbjUGDBqW9tmPHDtTU1KBfv34Zp9t4I2XjgXjGGWekxYuKilBQUKAuW+Np3nPOOaf1K6Q4fPgwGhoaMGrUqGaxs846C5ZlYf/+/Rg9enTT64MHD057X+Myn3zt+mTHjh3DQw89hBUrVjRti0Y1NTXtXQXs2LEDW7ZsyTigANBsXmVlZe2eF53emBs+19G5Afj8BtoTBzkAMHLkSACf368FtH47Svbu3Qun04kRI0akvV5SUoL8/Pxmg6RMuaKt+aY1ampqcOmll+IHP/hB00AIAM477zxMmjQJy5Ytazag7a44+DgFF1xwQdMd7RKfz9cs6ViWhX79+uGPf/xjxs9IO2tP43K5Mr5uTrox7GRf/epX8c477+AHP/gBzj33XGRnZ8OyLFx99dXNbtRqC8uycNVVV+G+++7LGG9MYI160s1b1L0wN+jamxtaq6O2Y2vPBGXKFW3NN63xP//zP6iursa//du/pb0+ceJE5Obm4h//+AcHHyQbPnw4Xn/9dVx88cXqD1xjvfaOHTvSRvqHDx9u8V8Iw4cPBwB89NFHmDx5svi+1h5cRUVFyMrKwvbt25vFtm3bBqfTidLS0lZNS3P8+HGsWbMGDz30EB588MGm13fs2NHqaUjrNHz4cNTX16vbg6grMTe0zLIs7Nq1K+3H+9NPPwWApssWrd2OkiFDhsCyLOzYsQNnnXVW0+vV1dUIhUKtepZGZ+SbxgfVpVKptNeNMUilUkgmkx02r87Gez66wFe/+lWkUik8/PDDzWLJZBKhUAjA59eNPR4PnnjiibR/EZxclZHJl770JZSVlWHx4sVN02t04rQanytw8ntO5nK5MGXKFDz//PNNpzaBzw+GZ599Fpdccglyc3NbXK6WNP6L6OR/AbVmnRsFg8GM6/PVr34V69evxyuvvNIsFgqFWn3gtrbUlqitmBta58knn0xb5ieffBIejwdXXnklgNZvR8k111wDoPn2/PnPfw4AuPbaa1tcxrbkm9aW2jYOuFasWJH2+t/+9jeEw2F88YtfbHG5ugue+egCEydOxB133IHy8nJs3rwZU6ZMgcfjwY4dO7By5Uo8/vjjuPHGG1FUVIR7770X5eXluO6663DNNddg06ZNePnll9G3b191Hk6nE0uXLsWXv/xlnHvuubj11lvRv39/bNu2DVu3bm06IMaNGwcA+N73voepU6fC5XKJ5VqPPPIIXnvtNVxyySX47ne/C7fbjV//+teIxWJ49NFHO2Tb5Obm4rLLLsOjjz6KRCKBgQMH4tVXX216PkJrjBs3DkuXLsUjjzyCESNGoF+/frjiiivwgx/8AH/7299w3XXXYdasWRg3bhzC4TA+/PBD/OUvf8GePXta3K5A20ptn3zySYRCIVRUVAAAXnjhBRw4cADA5+WGeXl5rV4v6v2YG1rm9/uxevVqzJw5E+PHj8fLL7+Mv//973jggQeaLqe0djtKxo4di5kzZ+Lpp59GKBTCxIkT8c9//hPPPPMMpk+fjssvv7zF5WxLvmltqe2Xv/xljB49GgsXLsTevXtx4YUX4rPPPsOTTz6J/v3741vf+lbbNmZX6qIqmx6tsVzsvffeU9+XqYTrRE8//bQZN26cCQQCJicnx3zhC18w9913n6moqGh6TyqVMg899JDp37+/CQQCZtKkSeajjz4yQ4YMUcvpGr399tvmqquuMjk5OSYYDJoxY8aYJ554oimeTCbN3LlzTVFRkXE4HGmldTipnM4YYz744AMzdepUk52dbbKysszll19u3nnnnVZtH2kZT3bgwAFzww03mPz8fJOXl2duuukmU1FR0Wx5pFLbqqoqc+2115qcnJxmZbF1dXVm/vz5ZsSIEcbr9Zq+ffuaiy66yDz22GMmHo8bY/5Varto0aKMy3fyNDWNZXiZ/lpTVkc9C3ND5+aGxu22c+dOM2XKFJOVlWWKi4vNggULTCqVatd2lL6LRCJhHnroIVNWVmY8Ho8pLS018+fPN9FoNO19Q4YMMddee23G5W1NvmlchtbmhGPHjpm7777bjBw50vh8PtO3b19z8803m127drX42e7EYUwH3eFDRERE1Aq854OIiIhsxcEHERER2YqDDyIiIrIVBx9ERERkKw4+iIiIyFYcfBAREZGtOu0hY0uWLMGiRYtQVVWFsWPH4oknnsAFF1zQ4ucsy0JFRQVycnLa1WGRiE6dMQZ1dXUYMGBAs/4jnam9eQNg7iDqam3KG53x8JAVK1YYr9dr/uu//sts3brV3HbbbSY/P99UV1e3+Nn9+/eLD2XiH//4Z+/f/v37OyNFZHQqecMY5g7+8a+7/LUmb3TKQ8bGjx+P888/v+n5+5ZlobS0FHPnzsX999+vframpgb5+fntmu+4iVeLsenfuE2MRRrCYix07JgYC/j9YmzAwEFiLDuYJcYAwKF8Jdo/6HwerxjzuORRqJWMyxO1UmIoEJBPnLlc8oKmEnIPle2fbBVjH2/9UIzt3bNbjH2241MxVnGAPVo0oVDItkfAn0reAP6VOx778z8QyMpu/oaUvC8fO1ItxmKxqBgbWjZMjOUr/UzcyvHh9WTu+AoA3hb+NelR4m6HnFdSKXkds7Pk41xbDy3mcsjrGAodl5clJ8P3+n88bo+8LMr8HE55OZOWnBvbe0LQ6dA/2NAQEWNut7wePp/8e5SIy+uRTMgxvzJNh7P5stTV1eELZ5/RqrzR4Zdd4vE4Nm7ciPnz5ze95nQ6MXnyZKxfv77Z+2OxGGKxWNP/19XVtXveLmXn82cFxZg2/vL55R3Bpww+Asr8tBgAOIzcNt6pjD583s4YfMgDhayAvL31wYfcPMmvdKD0Kuvndsu7sp2XDXobuy5ftDVvAHLuCGRlIxDMaf6BlLwv+xvqxVimJNsoK9N8GmPZckw7HtXBh/I5QB+cqIOPpHwsZwflmKe9gw9lmyaV7yknR9mmnt4x+HC5lMGekuf8yu9RXBt8xGNiLOCX87F2XLQmb3R4Vj5y5AhSqRSKi4vTXi8uLkZVVVWz95eXlyMvL6/pryNbLxNRz9DWvAEwdxD1ZF3+T8L58+ejpqam6W///v1dvUhE1AMwdxD1XB1+2aVv375wuVyork6/hlpdXY2SkpJm7/f5fPD5fB29GETUg7Q1bwDMHUQ9WYcPPrxeL8aNG4c1a9Zg+vTpAD6/cWzNmjWYM2dOR88uzfAzRokxj0O+2SySlG+2cirX/LL98r0bfpc8P59LvqYJAFZSjjuV24P9ykVI7V6RhJHXUbm/Sb/e65aXxeWVryOOPXesGDvrrLPEmHa9063cC+RSrr8ePnJIjH38sXzzKwBsU26c3fbJJ2Lsk63y52qUG/F6uo7MG8EsH7Kymg9KnEZOd7GwPIix4g1izO+Vj4GgckO2W7kk7oSSO5TjCgACXiUHQL6XLKbcjOtzy8eW16PlHDGk3jip3fPiVO5bcSjrp90Pp91GE26Q70/Tvgnt/jQDJakCcCobzqPc86Hd85KIyfd1uJUcGNAG9xl+U+JeeRmazbfV72yDefPmYebMmTjvvPNwwQUXYPHixQiHw7j11ls7Y3ZE1AswbxCdPjpl8PG1r30Nhw8fxoMPPoiqqiqce+65WL16dbObyYiIGjFvEJ0+Ou0Jp3PmzOn0yyxE1LswbxCdHrq82oWIiIhOLxx8EBERka04+CAiIiJbddo9H13hjDPOEGNel/LIciOXU1kJ+fHqAW8fMeZxyNP0OvRS2ySU0lelDMunlJRaSo+WZEouw3Io41NHSnlku1HKcLXyLWU5tcc1O5QSPK1cWHtcdXFRoRjrc8lF8kQBTJp4qRxUHkuciMv7TTIpb5uDBw6Ise3b5NLejz7cknk5Egm8+Le/ip/rztxIwp3hGNNKWLX84HEqZahO5ZHt2jSVfTkWkUt7XS79uSZ+t1zGnlB61Dghr4dRHkVgHPJPSAryMen1yMupldPCyN+FlqtSlnxcNTTI2/vo4cNirLhvgbwsSp52efWfXZey3VzKtlGqntXfjZjyOHst5yYy9OgyyrROxjMfREREZCsOPoiIiMhWHHwQERGRrTj4ICIiIltx8EFERES24uCDiIiIbNWrSm2HDOovxpJKuZgjJZe2RhvCYizgUzq3Qi7t8ijleQCQSsrlvUbpTutxy2V48ai8jkgppXRKJ1CHJZf2xjOUYTXya6W2Simd1iXTqZSnaWXG9TG5zC5cVyfGggG95NGbpZQSOuVtqu0ZWifQkaPkjs7Dhw8XY1dNvirj6+FwuMeW2nqdKXgzlMdaSfkY0I9XpWRW+ZwzJe9bXo+8/ziUjtgep3IcA/Ao+5blkD/rtORy+2RU69Atd/aOxuX5ZSnHh0s70C35u4CRc0A4Kue4jRs/EGMJpey5IPd8MebTfhuU1QMAh1HWUcllWg50KHnVspQya2V+JsPnMr0m4ZkPIiIishUHH0RERGQrDj6IiIjIVhx8EBERka04+CAiIiJbcfBBREREtupxpbb+QJYY8ygls4moXDKVCNfLM4zLJWiupBxzJuVxnTMll6gCgFvpiOv2yF+ZWynhjWqdKZXyKEspX3MoXULrakJiLFspszNJrVRLqVHzyMvZEJO/J6mrKwCklC6g544+W14WAFl+vxhzKeXSeldfudRW684bS2oleJnL+tRyv27O43bA626+jdUydafSSVXp+uxSiqMdyuc8UDobK8dqytLrNF25Xnl5lO7dsOTcaSWVfSEllwzX14bEWHaWfHw4lZyTVPKxlhtDSufaY7VyLOCWj6u4kqriCXmbub3KvgbAKMdeKqV1vZa/w7iy3bxuebsZpbTZSmUoZ8/wmoRnPoiIiMhWHHwQERGRrTj4ICIiIltx8EFERES24uCDiIiIbMXBBxEREdmqx5XaDho4SIzVHz8uxuIJudQoUS93L+3jl0vJcpTSz6yUXKLkUsq+AMCvlLB6ffI8rahcMpxUyk1h5DGoQ2nBGI0rHWGVbXrwgFyj5vYqpcRuuUTZ7ZdLDKuPHZNjR2vEWK4yTTj1cmmjjOu1pp1GKaVzKuW0bmWiMaWUOpXMPD/p9Z7A50jB52h+DKUc8nbQOtcmlJJrp1Jqayzlc472lcy7W2iJ6lLK9I1S+gulI2rSkqeZUrr61tfVirF92jZVylu1MtTSXPkxDEcPHxZj/98Wudx+zOjRYsxSvouY8tgHv9Fzh6WUPUca5JjXLW+bZELO1S63vN0SyqMPYhk6gsfjckf2k3X4mY8f//jHcDgcaX9nnnlmR8+GiHoR5g2i00unnPkYPXo0Xn/99X/NRHmICRERwLxBdDrplKPb7XajpKSkMyZNRL0U8wbR6aNTbjjdsWMHBgwYgGHDhuEb3/gG9u3bJ743FouhtrY27Y+ITj9tyRsAcwdRT9bhg4/x48dj+fLlWL16NZYuXYrdu3fj0ksvRV1d5hsQy8vLkZeX1/RXWlra0YtERN1cW/MGwNxB1JN1+OBj2rRpuOmmmzBmzBhMnToVL730EkKhEP77v/874/vnz5+Pmpqapr/9+/d39CIRUTfX1rwBMHcQ9WSdfkdXfn4+Ro4cic8++yxj3OfzweeTy1lP5nbL5Y9bt34ixlxKmWI8Kpd95WcHxdixQ3L5VsQnb9ospXwXAPKylc69SvdNSykndsaVzpRK2ahLKcGLROSyqtpaubz1eEhelKwceXtnBeWYUb7fvbt2i7Gdu3aJsXFjx8jzc+rj9qTSDdKllK9ppYTGyCXYSeW7Tybk8jy3sN2k1+3SUt4A5NzhSsbgSjbfpy2l3NCpdASN1CiXczKUGzYyTrkM1RWQ84NXKW31uuVuuADgSITFWEpZVqTk6ToydAhuZBzydguH5TL26mp5WYK52fL8lOPOKDcox+vl+fk9cj4+HAqJsQ8+kkt0gz55e44YNkyMAYBbybmxBvlMYMCt/DbE5FydUroWqw3YoxmOC+VxDyfr9AxTX1+PnTt3on///p09KyLqJZg3iHq3Dh983HvvvVi3bh327NmDd955BzfccANcLhduueWWjp4VEfUSzBtEp5cOv+xy4MAB3HLLLTh69CiKiopwySWX4N1330VRUVFHz4qIegnmDaLTS4cPPlasWNHRkySiXo55g+j0wsZyREREZCsOPoiIiMhWPa55Qk1ELu3a9PF2MebzKKuqdP30eOWSqV0H5ecK5Abk8q3iwgJ5WQAM6NdHjPXJk8tNPV6lO61Souz2BsSYlZDL7OIpufSzoCBPjNWE5dLmg1VHxFgkVinG3C65Juz4cblU0u2S94vjSlfOPQf1p2+W9C0UY/m5OWIsmZC3DYy87zuMXJ6ZTMj7t9frz/i6y6WXdHZnPoeB39G87NBh5FJErdTWp5Q4Z1vy8ZEHeRs6a+SSWJ8lz88vr8Ln022QSyqdUbnc1OtUyv9TSg6olbdbTlCeZkGhfHzsPlAlxnbtl2OffrZGjB0/EhJj9VGlzDqxVYy5IH8uoZQZnzNqpBgDgH+79moxNrBY/m2I+eX9JhqW97d4WN6muUa+58oRaV7261DmczKe+SAiIiJbcfBBREREtuLgg4iIiGzFwQcRERHZioMPIiIishUHH0RERGSrHldqG3HIJaOVIbnMx6NUDiaScgladk6uGKtNKKV7Ru7qevDYcXlhAOyqlEtK85QS3ryszGWTAJCjlHdm58hlsS6lU6RTWRZvUO5MWXk4JMZ27asWY1WH5W1aE5JL2+rr5f0iHJVLE/M+kUu3iwvl9QOAc0aNEGPjzpW75Rbkyh2Ns5SybyTlEt1USv43hiV8v5ZSft7dHdy3D1lZzbdjQunuW1crdwtNKR2DDx48KMaO++Ty73C9XMbdr49chpodlI9xAHC55XLLuFJyrZXbO5Uy/bBSvht1yiW6MHJe2Vchl9vvPiDngHBcXk5/Xj8x5gjKXV21ozyoPNqgcu+nYqyiQs5xAPC///sPMXbWGXJH3KJ8+bcqUh8SY+Hao2IscdYoMVZf0/x3LKyUep+MZz6IiIjIVhx8EBERka04+CAiIiJbcfBBREREtuLgg4iIiGzFwQcRERHZioMPIiIislWPe86HJ5gvx7Lk51V43PIzOVyQ68MHDBgqxuJR+dkKx44cEmN1Ss01ABwPyNPN8cvPgXA65OeHZAXl+vhglvwMEK1VfVaBvL29AXk5Dx2Vn3Fw+Lj83IBIQt5d3X55WQJOeVk82XKNvxtym/pdu/aLMQA4UinX8qdi8nS/NPZsMTawuK8yTflZFMm4/HwLDzI/F8KKyN9Dd/fOhvfg8zV/Bo3DIT8nxVLa2Eci8nNi9lRViDHtMRdu5Z99BXny8xqCfjlXAYBPmafHLa+/O8P2auR0y88WaVDa0buV9TAueX5Vx+rFWMKSN1xWTr4YA+RnnMTr5X3dCXmDRqPyfpGrPB/qwnFfEGMAEK6Rc3VU+c3Zt0/O/zt37hRjkaT827j3qPzcjkhD8/WPKXnoZDzzQURERLbi4IOIiIhsxcEHERER2YqDDyIiIrIVBx9ERERkKw4+iIiIyFZtLrV96623sGjRImzcuBGVlZVYtWoVpk+f3hQ3xmDBggX4zW9+g1AohIsvvhhLly7FGWec0SELnJcnlxvm5MrlTbDkEqBIRC4Xiykt15MJuUwzaSk1bw69XM6f00eMeXLzxVhNSG6NHI7L5VRhl7ysLqe8iziPyuWJifhhMVZfJ5faWvJiwqmU/To9Stv4pLx+bpf8XTiS8vr5fXILcgAoyJebcVcclMszg155e4ePy6V0LqUs2CTk8rxhQwZlnlcbWmO3hp1548Ode+D2NN9XsgJySbkxcilmLCmXYuYVyMeqzyuXqMaVMs3D9fL35XIoeQVAjj8oxpIpeR9xKMePyyWvh8Mtz88Xlo/XeELOAceOyaWmgJwgtE0TT8n5vy4s7+vxiPy50qJCMdanoESMhcM1YgwAjh2Xc2effPm7OG/saDF2oPKgGKuJyCXY2w7IvylOZ/PPxePyPtbs861+5/8Jh8MYO3YslixZkjH+6KOP4pe//CV+9atfYcOGDQgGg5g6dapan0xEvRvzBhGdqM1nPqZNm4Zp06ZljBljsHjxYvzwhz/E9ddfDwD4/e9/j+LiYvz1r3/FzTfffGpLS0Q9EvMGEZ2oQ+/52L17N6qqqjB58uSm1/Ly8jB+/HisX78+42disRhqa2vT/ojo9NGevAEwdxD1ZB06+KiqqgIAFBcXp71eXFzcFDtZeXk58vLymv5KS0s7cpGIqJtrT94AmDuIerIur3aZP38+ampqmv7279d7ZhARAcwdRD1Zhw4+Sko+v8O3ujq9qVZ1dXVT7GQ+nw+5ublpf0R0+mhP3gCYO4h6sg7taltWVoaSkhKsWbMG5557LgCgtrYWGzZswJ133tm2ifkLAUfzsZHXJ3coTSWV0teEXE5r5IpKHDoklxolU/L8MpUhNcotkEu0AKCg3wAxlp2rdJINyqWEdfVyp0iHR96mDrdcUhqJK50ik3LM4VZKjZNy2ZtlyWVc8aQcCzfIpZKpmLycJQXyj1lBP7nkGwBKBxaJsWi9fG9CKiUvz/59e8XYoWq5iy4ccnliNJF5ftFo67tTnqoOzRsA6pMGrgzrbLSOqFlyaXRAKTUdVDpcjCWUssPDyuWkI0flnFNc3E+MAYCvb+bSaQAIK6X4llPOZXkFxWLM5ysQY1Gl6rIhKR8D/qB83KUSch5zOeRE7lW66Hq8cq5O+OXYBV+SS1tHDpFzeDQul1kDwO6d8v62c/vHYmzC+XK33NJSeXn2bZHzSiIl5w4rQ65KJJQf05O0efBRX1+Pzz77rOn/d+/ejc2bN6OwsBCDBw/GXXfdhUceeQRnnHEGysrK8KMf/QgDBgxIq+knotML8wYRnajNg4/3338fl19+edP/z5s3DwAwc+ZMLF++HPfddx/C4TBuv/12hEIhXHLJJVi9ejX8fnk0R0S9G/MGEZ2ozYOPSZMmwRjtKXMOLFy4EAsXLjylBSOi3oN5g4hO1OXVLkRERHR64eCDiIiIbMXBBxEREdmqQ0ttO1LhgDI4Xc3LnPxZcjmp05LLFAE5ZpRyooRSTipUKQIAfAH5RjkLcrdHADgaksvJokp3Wo9PLidzupQSZUsuJ0spKxmNyCVjDQ11YswYuezZ7ZTXz+uRl9OvxJzKEDvbJ5f9njO8TIwFffL8AKC2Vu7M2VArl7G6XXKpWr9+cvnuZ3vkcrk6peQxtHlHxtcTidZ3p+xu3L4g3J7m32uRUsLu98o7yZEjB8RYOCzv51A6W0slzgCQVyQ/22Rg2Qh5fgBy8uTS19y+cpnu0WNyx+SUJf9MaJWVETU/yDkuntA6Ksv7pVfpCO33yd13PUo+6qc8O6ZIKcX3K12Ci5TSZQDI9cq/D0f37RNje3fuEWMlhfKjAWqq3xVjnkI558Rdzbd3wql3XT4Rz3wQERGRrTj4ICIiIltx8EFERES24uCDiIiIbMXBBxEREdmKgw8iIiKyVbctte1fOhyuDN1Pg0G5q6tWLudyyCVoJimXd2YF5G6X8aRcZ5ZSHiXtVUpiAcDnk78Wt1teR6db/lzAUsqJlXo5teRSaQfscCrbxpLLW42RSxCdbrm8VSt8zfLKZc9Di+UOw0MHyiVx+Tlyt18AqK2TS8JTUbmjZ6RBjrlL5HK5QLa8n1ZVy+WgIaEcMqV0Je7u8vL6wONtfoy5MpQGNorFomLMofwb7djRkBirrVU6sHrkHOBSSt/3HlS6FwPIrZXLVPPy8uV5Kp17Y1G5FNXhkPcTn0f5eQnKpf8BozwywK2Uchq5M28wIM/PY+QcN6iPXKKbpXTDDdeGxFhSKTMG1CbUKFNKrT/ZtkuMjRw5Sp6o0km7suKgGPNl6M6udY8/Gc98EBERka04+CAiIiJbcfBBREREtuLgg4iIiGzFwQcRERHZioMPIiIislW3LbUNFvaHO0M5mterlGgF5NUJBpSupw55DOZxyyWVXr8ccyhlfW633tVW687oVkrNkpZc3mpBLkNLWXIsqZThxqJyeaJHKYtNpuSurlonzFRC/pwVl0sMc3zyNhtYJJdue53ydtE7KAMFQbn0tU+OPM9ERC7Di9XL65hUuqRGIg1izMpQzg7o+0R35/L4MpayNkTkMkCXUt/ocstlqKmUnDvcbnkfsIz8Oa9PLtPu27e/GAOA7Gw5J/mVTtt5PjmWqUNwI+OQjy2tW3gyKZe35uXK282pdL22Ukq3bKVzrRWTj7k8JXeYpJyPUkqOiyf1jtgRpbQ5S8kde6uOirGPd74qxmIxOa8kYko3+Axd55Nt6IbNMx9ERERkKw4+iIiIyFYcfBAREZGtOPggIiIiW3HwQURERLbi4IOIiIhs1eZS27feeguLFi3Cxo0bUVlZiVWrVmH69OlN8VmzZuGZZ55J+8zUqVOxevXqti2Yyw13hnJVrftkKiGXYdXWKKVPyjR9PrmrYdkwucNgQCnDdbn0Ulsopb9waqV9SgmXXt0lz06JRRvk3cehdJj0KB00LaVUMBmTu7PG5WpSFGTJ2zuodELWyncjSlkfADiMXKKXFZD3qf2HjoixQJa8PMePhMRYuF4uJXRkZy7r7OiutnblDQAo7NsPXl/z/chKyPtkdkDeR6yUvN09Tnlf7tdvgBhzKOX2Wgm/VymJBQC/Xz4mXUpHbK1k1uFSOskqn3MpeawhLO+TTiV3aJ1yjVKG21Ajl6Ee3LNDjB3zyOuXrzzaobhPvhjz++XHRQBANK6Ut7rljr/urFwxdvhAhRgr7V8kxnLi8ndRm6EMN9mG0xltPvMRDocxduxYLFmyRHzP1VdfjcrKyqa/P/3pT22dDRH1IswbRHSiNp/5mDZtGqZNm6a+x+fzoaSkpN0LRUS9C/MGEZ2oU+75WLt2Lfr164dRo0bhzjvvxNGj8imvWCyG2tratD8iOv20JW8AzB1EPVmHDz6uvvpq/P73v8eaNWvws5/9DOvWrcO0adOQSmV+THd5eTny8vKa/kpLSzt6kYiom2tr3gCYO4h6sg7v7XLzzTc3/fcXvvAFjBkzBsOHD8fatWtx5ZVXNnv//PnzMW/evKb/r62tZRIhOs20NW8AzB1EPVmnl9oOGzYMffv2xWeffZYx7vP5kJubm/ZHRKe3lvIGwNxB1JN1elfbAwcO4OjRo+jfX+/GeLLaUAiuDN0UG+rlrqcOpTOl1yPXmgaz5S6KWUosrnRm9CtdZI2ynABgGTmeSGkdaOXyz6RRug06lO6tRi77spQOhhHl+rtf6ZLpVcr6ouFjYqxq/24xVpclzy+SK5e99SuUO0gav17yGI/LlwucSrncnspDYmznfrlcrqpa/lxNRP6eXMKuZimXO+zQ3rwBAFmBHHgzlDMmlG6hgaD8febn9hNjVlI+Vt1eeb8LCCXOAGAcSgdupVs2AFhG+az2b00lpDTghVHyXDKpdGFOybXxtUflcnNt7T1KqW19zWExVlkhH1fFhfKANj/YV4w1KCWqllLyDABJZS21TsEDB8ln/UadMUyMnXu2HPt0134xtunDT5q9Fo/Lj7Q4WZsHH/X19Wn/Gtm9ezc2b96MwsJCFBYW4qGHHsKMGTNQUlKCnTt34r777sOIESMwderUts6KiHoJ5g0iOlGbBx/vv/8+Lr/88qb/b7zmOnPmTCxduhRbtmzBM888g1AohAEDBmDKlCl4+OGH4fPJ/9ojot6NeYOITtTmwcekSZNglMsCr7zyyiktEBH1PswbRHQi9nYhIiIiW3HwQURERLbi4IOIiIhs1emltu3lDwbh9jS/2cyhdEoMBuVukEX9isVYbq5cUgkjlxymknLpXrheLjU1SmdaAIgpJbwpJaZN1uXWOlPK62gsueOvUUp7HUl5mh7IZY1G2SUjSifMSINcuhdwKWVvRt5nIlp3Sac8TQBIKCWYShUeEpC7nR6oOCDGcpVS0YAyv2gsczlkV5fanopwNIZEhvrQnIBc3upSSlgPHZYf815bExJjliUfkCNGjhJj+YVyCadLeWQAADiU9tVJpUxfK5FsiMuPN4jG5OMuGZdzoCMl5zETk5cl6JWPj/z8QjEW8MqdW93Kow/ys+VS/LwcORZX1qFB2S8+/6yS4x1yTirIk8uCs3zyPA/s3yvGpFJ8ABg96oxmr0Uicnn1yXjmg4iIiGzFwQcRERHZioMPIiIishUHH0RERGQrDj6IiIjIVhx8EBERka26baltMCcHbm/z8sHcggLxM26XXGamNANETW1IjPmU0i6TksueIgm51CqldF8EAKOUxQZ88vL43fLX6VHqcJ1OeX4Oh1xO5skOijGjlCE7LLnkL6B0AnUUyt99UNku2QGli66yrRPK15RKKqXLAIwlx+MJeb8ZWlYmxs4YPliMZfnk7377rn1i7MNPtmd83bJ6bqmtz+2B19N8fzh6ROkYfFzupJpKycdyvpKP+veXy/u1jtiJuFzebiml/wBQ2yCXxUYicllsKimvo0vJV16P8ugDJXf6lcciBDzyvhxtkMvtLaXDrta53OWQj1Wv8pvicsnr7lHWPZqUj38AcCjzdCjrmFAefXDg6HEx1hCuEWNupQN3Sf9BzV5zuPR1OxHPfBAREZGtOPggIiIiW3HwQURERLbi4IOIiIhsxcEHERER2YqDDyIiIrJVty21TVpJwGpejuYPyN07AbkkzCglSlqpKYz8OYfSDdEopYrBLLl8FQCCOXIJq9ad1qGU4Wm9MJ1GqSlV1kMbucaU2mZ1cytlb36fXJ7nd8vltD63vKTaNksonXljSodQADBK3CilxoX5SmdKeRXhccnbrWxwqRjb8emnGV+3oJcSd2c1oWPw+JrnicqDFeJnsoLyMXnm2V8QY4V9+8nTzJL312hELok9fvyYGEsoJfwA0GDkcsusLDl35uXKJZVBnxwLKCWlbuVYTildbZNKmX4iIR+TUadc5ulQ9menU3lEg5LHtFJ8t0s+WLVO4QAQjcnxo4flkvAjR+VYXV2dGDseComxYJb8W+TL6dPstWhUX7cT8cwHERER2YqDDyIiIrIVBx9ERERkKw4+iIiIyFYcfBAREZGtOPggIiIiW7Wp1La8vBzPPfcctm3bhkAggIsuugg/+9nPMGrUqKb3RKNR3HPPPVixYgVisRimTp2Kp556CsXFcofHTEwyBpNhaGQsuexLqw50OeVVNUo5bU1trRizlC6CltK5MKl8DgASSqmVX6m3dHvlddS6IdYePirGwnXy+keicrlgSun4W1o6UIy5lc61bodSMitGAGVR4HYpXYuVbZZK6d1FU8r3n1LKJWNK59FjCTnmVjps+gNyR894LPOyWEo5cHvYmTsK+hbB629eOluglMVqHbHdfrlEta5e7rJaXy8fOz6lC7PWndRSuuECwIDiInmefjl3aJ1rjSXvy+FoRIxFa+XyzpBSTnz02GExFlFKlM86a5QY8+TnizEtd7iU5wJo3WljYXndD1TtV+YIHD4ir388Lu8bDWF529SE5M61Xpf8u6Ht32veeKPZa8kW9s8TtenMx7p16zB79my8++67eO2115BIJDBlyhSET1jpu+++Gy+88AJWrlyJdevWoaKiAl/5ylfaMhsi6mWYO4joRG0687F69eq0/1++fDn69euHjRs34rLLLkNNTQ1+97vf4dlnn8UVV1wBAFi2bBnOOussvPvuu7jwwgs7bsmJqMdg7iCiE53SPR81NZ+fyiksLAQAbNy4EYlEApMnT256z5lnnonBgwdj/fr1GacRi8VQW1ub9kdEvRtzB9Hprd2DD8uycNddd+Hiiy/GOeecAwCoqqqC1+tF/knX14qLi1FVVZVxOuXl5cjLy2v6Ky2VHwVNRD0fcwcRtXvwMXv2bHz00UdYsWLFKS3A/PnzUVNT0/S3f79+Mw4R9WzMHUTUrsZyc+bMwYsvvoi33noLgwYNanq9pKQE8XgcoVAo7V8w1dXVKCkpyTgtn88Hn9K4iIh6D+YOIgLaOPgwxmDu3LlYtWoV1q5di7KysrT4uHHj4PF4sGbNGsyYMQMAsH37duzbtw8TJkxo04Il4w2AaV7KFFfbs8oncpJKqW1MKW09uH+fPE2hTBEAPErpnkuJAXrX1xEjh4mxgj5ymaqldJE8cED+F+P+vXvFWCwul9l5PPI6Fhbmi7G++XliLKGURGtb1Chde7UOkqE6uRwwopQYAkBM6e5Yr3SYjDbI5bTRqBzLCsgdVMuGlomxiorMnV61bdYeduaOhDFwZFh+v18eqLjdculrStvvlM7WWvmzdoz7lZLYSFgv04/UyPtWRA7B7VWW1SPHjFLHvv2Tj8XYvj17xFgyJa+jUbpQD+ifeZAKAIV5cl6JKMecFgsdD4mxo8flxxdElLwJ6I8paFCWR3sshFPp+J7lln8bqyor5ViGy6GW0gX9ZG0afMyePRvPPvssnn/+eeTk5DTNPC8vD4FAAHl5efjWt76FefPmobCwELm5uZg7dy4mTJjAu9WJTmPMHUR0ojYNPpYuXQoAmDRpUtrry5Ytw6xZswAAv/jFL+B0OjFjxoy0BwUR0emLuYOITtTmyy4t8fv9WLJkCZYsWdLuhSKi3oW5g4hOxN4uREREZCsOPoiIiMhWHHwQERGRrdr1nA87mFQKxtm8bEcr7XIYueDSoZThGq08SCntSinlqw6HXJ6XVEr3AMDvlUvtYMnXzp1JeboJpRui1yOXGeYrpa8ud74YcygliNqyHD1+XIxpnYItpctspEEubYsoXWSjCaVjp/I5AEgm5H0jGpGn21Avd6bUuvrWN8hl325v5ieEAm3rQtlT7PzsU7i9zctqzx59tviZgFLeqjX4dSo9UbWyw+pDh8RYuFbuQBpT9h2ghW7KSu4cNmKoGCvq11eeprJxPEr5cl5erhhTu+8qNfVa2fy27dvFWH1Y7tyqTTOh5SPlHqewUmoPABEtPzTI+UHreOtTymlrDx0RY6FQSIylMvwWWcrv08l45oOIiIhsxcEHERER2YqDDyIiIrIVBx9ERERkKw4+iIiIyFYcfBAREZGtum2p7fGjR+DKUKoVywqKn/EoJaNaOWlSKUELKGWvHqU1pcstj+u8SndNAMgJ5oixhnqla2VNSIxpJZXJhLz+WUF5eycSchlaMimXfR06LJcZHj8md4NMWvJyJttZ9mal5FLBWEwuedNKGgEgHpe3TUTpeOtxyPuU1iX16FF5u+3ds1uM9UaJWB2M1Xz/i9aHxM84tU6qSkdQp0tOoSnlmNux41MxVq8cx16PnrI9Pr8Ycyt1qlZSLgvWSviRkrdNn8JCeZpKV9+GiFz6GlFi+/cfaNf8lAp2GOURDQ3KMV6jlKiGj8ql1ADgUcpi1TyuPG4gHJI73iYjcvluSpkmMh4XLLUlIiKiboqDDyIiIrIVBx9ERERkKw4+iIiIyFYcfBAREZGtOPggIiIiW3XbUts9O7bC4WxeGubxyGWqLrfS1VYpYXRmmM+/pimX6GqlvS6XXHKUylAGmBZXyjh9Hrn01+uSl8co3TdTSpddrQzNrWxvrVxM7dqoLEtK6zAsRoBoXO74Gq2Xu9NaMfl70sp3ASCRkOdplM/K3yBQp5To1tfKpXRIyKV0vZHf7YQ7Q6l7XCnT9Lvl4yNTHmrkVMqfnUpZbG5utrwsHnl+2cEsMQYALqWMP8svl+FqXZh3bNsmxmqOHZNjYfmxANqx7PHK66+Vm/uUxyI4nPIx1xCV89FhpfS/Qel461L2mYLcfDEGAHHlONfKkJMJeZtqXb+h/DZA+d10ZPhx0LqZn4xnPoiIiMhWHHwQERGRrTj4ICIiIltx8EFERES24uCDiIiIbMXBBxEREdmqTYOP8vJynH/++cjJyUG/fv0wffp0bN++Pe09kyZNgsPhSPv7zne+06ELTUQ9C3MHEZ2oTc/5WLduHWbPno3zzz8fyWQSDzzwAKZMmYKPP/4YwRNar992221YuHBh0/9nZem16ZkkD1dmrDFOKC2O4ZPrvKE8kwLK8zFc2nM1vEr7amXLupVnCgCAS2l9nVBaX1spuVbf4ZTX0ZnhmQj/iinPB4H8TI5ERK5Vh/a8DiM/4yQak5+dEVZq9Y2yuf0O+YtyKHXz8bj+rJaE0vpai0F5dgCU55WghWfHdDU7c4fT4cr47J6U0hre4Whfu/lYTHmWhfI9B5QE4VSeHxQJ689siR2rEGP7G+RnRFjKs4Uc2nNptGcdueX86PErz05Rcmc8Li9n/XE5B0Sj8rpHo/KzfrRM7Vd+ixJR+XhMQPktAhBRcpn2jCTL0p7XJK9JUnkmiUnJ6+j1NJ+m/jyRdG0afKxevTrt/5cvX45+/fph48aNuOyyy5pez8rKQklJSVsmTUS9GHMHEZ3olO75qKmpAQAUFhamvf7HP/4Rffv2xTnnnIP58+ejoUEeWcZiMdTW1qb9EVHvxtxBdHpr9+PVLcvCXXfdhYsvvhjnnHNO0+tf//rXMWTIEAwYMABbtmzBf/zHf2D79u147rnnMk6nvLwcDz30UHsXg4h6GOYOInIYrdGE4s4778TLL7+Mt99+G4MGDRLf98Ybb+DKK6/EZ599huHDhzeLx2IxxE64ll9bW4vS0lIAwczPlT/N7/lQY9r9C+2858OV4breCZ8UI1ZCvv7Iez7svudD+S5aUFNTg9zc3HZ/PpPOzh0TvzoXbm/zHid9+/YV53XyGZgTadexjRLT7vkIN8j3bsSU3h6JhL7fxWI1YqyhG93z4fZp95mJISTj8r4cj8rbuzPu+XBq93zE5WXJ9geUqQINYXl5QqGQGGv3PR+Wsn8n5O/em2GaViqFA7s+bFXeaNeZjzlz5uDFF1/EW2+9pSYPABg/fjwAiAnE5/PB55ObIRFR78HcQURAGwcfxhjMnTsXq1atwtq1a1FWVtbiZzZv3gwA6N+/f7sWkIh6PuYOIjpRmwYfs2fPxrPPPovnn38eOTk5qKqqAgDk5eUhEAhg586dePbZZ3HNNdegT58+2LJlC+6++25cdtllGDNmTBsXLZb5fLlS+gOlJA4OpVm5cso6pcSSDq2sSN60xuibPaWcytVOrWmXQZzKpSWl0gopSzmVGZG3jcPIE/X55PW3LPn0r1HW3aW0fk6l5M85PcqlI0s+5aidGgeAlHLaFcppTmiXZLTPdXN25o76mqMZL5dG6kLiZw5VyJdXY1H5clcqKce0SySJhLKfK5c5nMrpcwDweOR9xK1dXlUu57o9ckw57JBUSv+jYXnbxGLyJam6WuXyqrxJEcyRLwG5lMsnRinPjimXR5LKcVwT0y+daeW0KSUHOJSLRJZp36VXt1v+3XBkyNX63nnStNuyIEuXLgXw+cOATrRs2TLMmjULXq8Xr7/+OhYvXoxwOIzS0lLMmDEDP/zhD9syGyLqZZg7iOhEbb7soiktLcW6detOaYGIqPdh7iCiE7G3CxEREdmKgw8iIiKyFQcfREREZCsOPoiIiMhW7X68eucTblBzKDeuaR31tBJVrXzNJU8zoZSFJpQumWjfQ2U/55VLAp0u5etUlsep1KillHI5j0/uOJqXlyfG4nGlRC2ufIdKyaz2REGtPA1e5TvUphmR1+H/ZirHtPpEtVhN64apfa57d7ztaNX7d2Y8FrRSba28XXs6pNunlCK65M85lH3AqzxVuaUuv9p0tTL9pPKE0/p6+TjQusxayqOFncpjCrTu3F4l5/QbMECMhevlJ7/Who6LsaSSA4z2VFjleGxQ8h+gfxfqjdtKCtCWx6Ps3y7l6cgNDc07OuuPgkjHMx9ERERkKw4+iIiIyFYcfBAREZGtOPggIiIiW3HwQURERLbi4IOIiIhs1X1Lbd2+zCWJWpmi1p5V6VyoTrO9MZc8P4dWEgvA2e7l0UJKp0yl46FWhZydFRBjWUosEq5VYnJHS6SUMjO146u8EtGE0kVWi2mltEAL5bTt6zAJKJ2Z1WmeXqW2LisKZ4bSckv5ziytbFI5CFJO+Vh2Kt2rtd0jlpI75SYTepmmVt6qlRNr3G55PTxK6b9L6YjqVkpGU0onWb9XXhZfwCfGjh+Vt2m4rl6MeZTfFJdDzvHxmPIdttBh1ii5Wiuldiq/cQ5le/vd8jrW14bEWEO4efmyaUP3XJ75ICIiIltx8EFERES24uCDiIiIbMXBBxEREdmKgw8iIiKyFQcfREREZKvuW2qblErKlBo1t9Lx0eeXY2r5qlK+pJTgObTS3hZonQvbG9O6+jriclmYS1lHKyWXJzY0yOVr4Xo5hoi8LHBpnYLlEJJKqam2zdpQNtY27Syldmrfr1KifJr5vCtq8+9OP3aUTqKW/J2YhFIyqnXKFSN67khpxwAAl9IR1+eTS1Fdyjydyjy1w84o5e+phHycpyIRMRb3yOsQicjHgJZz1DJrr7zu0Qa57Fnd11r4adC2qVZqq33OrXy/Rsn/x49Wi7FEvPn3pP4GnYRnPoiIiMhWHHwQERGRrTj4ICIiIltx8EFERES24uCDiIiIbMXBBxEREdmqTaW2S5cuxdKlS7Fnzx4AwOjRo/Hggw9i2rRpAIBoNIp77rkHK1asQCwWw9SpU/HUU0+huLi4AxdZKeVJKuWGWqzjlwQGSkmcUi4GQC+3tNpXTqt1fTWQu7cmnfKyJvLyxVg8USfPLxoVY/o6KDGlXE6l1jxqZa8tjNvb2X1Y7c7bg8tp7cwd0UQczlTz40/rzmqU78ulfM6pdG51Kt2rtQ6kLqWTqlb2+n8fFkNaCa9Rjq2kcmyllP01kZRjrqhcTpuol3NHStk2wZicV7RyWq2LeCyi5arWl5Wmf6x9nwP078LtkfdFl7LfHKs+JMYSMTnnZN5snVRqO2jQIPz0pz/Fxo0b8f777+OKK67A9ddfj61btwIA7r77brzwwgtYuXIl1q1bh4qKCnzlK19pyyyIqBdi7iCiEzlMW54KkkFhYSEWLVqEG2+8EUVFRXj22Wdx4403AgC2bduGs846C+vXr8eFF17YqunV1tYiLy/vVBapm+g5Zz6gnPmAcuYjOGCgGDPKuLbh6DFlUdp5BuN0OPPRCWfvWlJTU4Pc3NxOmXZn5Y6ioSPgzPAvZO3Mh6UcO6f7mQ/tB8LuMx/+ggIxVjR4qBg7fKBCjMWVh5qZpJZT2/fTmdAeFtgC7edaO/MRUPbh45X7xFhN7WExlinFGWOQjFutyhvtvucjlUphxYoVCIfDmDBhAjZu3IhEIoHJkyc3vefMM8/E4MGDsX79enE6sVgMtbW1aX9E1HsxdxBRmwcfH374IbKzs+Hz+fCd73wHq1atwtlnn42qqip4vV7k5+envb+4uBhVVVXi9MrLy5GXl9f0V1pa2uaVIKLuj7mDiBq1efAxatQobN68GRs2bMCdd96JmTNn4uOPP273AsyfPx81NTVNf/v372/3tIio+2LuIKJGbW4s5/V6MWLECADAuHHj8N577+Hxxx/H1772NcTjcYRCobR/wVRXV6OkpEScns/nUxseEVHvwNxBRI1OuautZVmIxWIYN24cPB4P1qxZgxkzZgAAtm/fjn379mHChAmnvKA9j3LjYELuhtjtWHLHw0hYuVFLu6cqrtzgqnQCbT+tc217J9nCB9WbhrWS4Z5bTttWnZU7PF5/xhszM92E2vQZraurdqOm1vVajAAObfdRbmTUuu8CUI+flLLfWcrNocmEfLzG43LH6IhyU2kqIufApHIDaFBZzkBeH3maSs5JROV10MpwNVr3WfX4B5BS05UcDCo3G4drj4ux2tqQNkOR09l8+PD58ildxE/QpsHH/PnzMW3aNAwePBh1dXV49tlnsXbtWrzyyivIy8vDt771LcybNw+FhYXIzc3F3LlzMWHChFbfrU5EvRNzBxGdqE2Dj0OHDuHf//3fUVlZiby8PIwZMwavvPIKrrrqKgDAL37xCzidTsyYMSPtQUFEdHpj7iCiE53ycz46Wu95zkfv5yzoL8a0vcrU1ctBuy+7aE7lOR9a3GiXXZRt0wU68zkfHa0xdwwYeU6bL7tol1Y65bKLEnNq02zpKoBDPn60Z5n0mMsuffqJsQEjRomxOuXZQrGwvCydcdklic657JKbFRBjkboaMXakUr5R2xj5u3dleI6NMQbxWLxzn/NBRERE1B4cfBAREZGtTrnapaN1s6tApDDK5QP1a9SD7V6eDp9mu9ehhXgP2sd70vHYuKyWdOmuvd+nEjMO+Rho72UX9VLOKVx2MUoVjbjNWoppDSu1R7aruUNZTmWaKaXFQnvXoeUNLn1M/pzVwmUX7ant2mWXdn9P2v7dxljja63JG91u8FFXJz/Xn7oXE6ru6kXoOnr+aDneQ9TV1fWYe7Aac0fVzk+6eEmos8Qq94qx40rsdKB0zOoUqaR8v09r8ka3u+HUsixUVFQgJycHDocDtbW1KC0txf79+3vMjW924HaRcdtk1pbtYoxBXV0dBgwYoN5w2Z2cmDvq6uq4Dwh4fGTG7SJr7bZpS97odmc+nE4nBg0a1Oz13Nxc7hAZcLvIuG0ya+126SlnPBqdmDsaT3tzH5Bx22TG7SJrzbZpbd7oGf+kISIiol6Dgw8iIiKyVbcffPh8PixYsIANpE7C7SLjtsnsdNoup9O6thW3TWbcLrLO2Dbd7oZTIiIi6t26/ZkPIiIi6l04+CAiIiJbcfBBREREtuLgg4iIiGzFwQcRERHZqlsPPpYsWYKhQ4fC7/dj/Pjx+Oc//9nVi2S7t956C1/+8pcxYMAAOBwO/PWvf02LG2Pw4IMPon///ggEApg8eTJ27NjRNQtro/Lycpx//vnIyclBv379MH36dGzfvj3tPdFoFLNnz0afPn2QnZ2NGTNmoLq69/ejWbp0KcaMGdP0NMIJEybg5ZdfboqfDtuFuYO5Q8LckZndeaPbDj7+/Oc/Y968eViwYAE++OADjB07FlOnTsWhQ4e6etFsFQ6HMXbsWCxZsiRj/NFHH8Uvf/lL/OpXv8KGDRsQDAYxdepURKNRm5fUXuvWrcPs2bPx7rvv4rXXXkMikcCUKVMQDoeb3nP33XfjhRdewMqVK7Fu3TpUVFTgK1/5ShcutT0GDRqEn/70p9i4cSPef/99XHHFFbj++uuxdetWAL1/uzB3fI65IzPmjsxszxumm7rgggvM7Nmzm/4/lUqZAQMGmPLy8i5cqq4FwKxatarp/y3LMiUlJWbRokVNr4VCIePz+cyf/vSnLljCrnPo0CEDwKxbt84Y8/l28Hg8ZuXKlU3v+eSTTwwAs379+q5azC5TUFBgfvvb354W24W5oznmDhlzh6wz80a3PPMRj8exceNGTJ48uek1p9OJyZMnY/369V24ZN3L7t27UVVVlbad8vLyMH78+NNuO9XU1AAACgsLAQAbN25EIpFI2zZnnnkmBg8efFptm1QqhRUrViAcDmPChAm9frswd7QOc8e/MHc0Z0fe6HZdbQHgyJEjSKVSKC4uTnu9uLgY27Zt66Kl6n6qqqoAION2aoydDizLwl133YWLL74Y55xzDoDPt43X60V+fn7ae0+XbfPhhx9iwoQJiEajyM7OxqpVq3D22Wdj8+bNvXq7MHe0DnPH55g70tmZN7rl4IOoLWbPno2PPvoIb7/9dlcvSrcxatQobN68GTU1NfjLX/6CmTNnYt26dV29WETdCnNHOjvzRre87NK3b1+4XK5md9JWV1ejpKSki5aq+2ncFqfzdpozZw5efPFFvPnmmxg0aFDT6yUlJYjH4wiFQmnvP122jdfrxYgRIzBu3DiUl5dj7NixePzxx3v9dmHuaB3mDuaOTOzMG91y8OH1ejFu3DisWbOm6TXLsrBmzRpMmDChC5eseykrK0NJSUnadqqtrcWGDRt6/XYyxmDOnDlYtWoV3njjDZSVlaXFx40bB4/Hk7Zttm/fjn379vX6bZOJZVmIxWK9frswd7QOcwdzR2t0at7omHtiO96KFSuMz+czy5cvNx9//LG5/fbbTX5+vqmqqurqRbNVXV2d2bRpk9m0aZMBYH7+85+bTZs2mb179xpjjPnpT39q8vPzzfPPP2+2bNlirr/+elNWVmYikUgXL3nnuvPOO01eXp5Zu3atqaysbPpraGhoes93vvMdM3jwYPPGG2+Y999/30yYMMFMmDChC5faHvfff79Zt26d2b17t9myZYu5//77jcPhMK+++qoxpvdvF+aOzzF3ZMbckZndeaPbDj6MMeaJJ54wgwcPNl6v11xwwQXm3Xff7epFst2bb75pADT7mzlzpjHm85K5H/3oR6a4uNj4fD5z5ZVXmu3bt3ftQtsg0zYBYJYtW9b0nkgkYr773e+agoICk5WVZW644QZTWVnZdQttk29+85tmyJAhxuv1mqKiInPllVc2JRBjTo/twtzB3CFh7sjM7rzhMMaY9p0zISIiImq7bnnPBxEREfVeHHwQERGRrTj4ICIiIltx8EFERES24uCDiIiIbMXBBxEREdmKgw8iIiKyFQcfREREZCsOPoiIiMhWHHwQERGRrTj4ICIiIlv9//uLpd07WEAMAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "visualise(x_test, x_test_adv, predictions_benign, predictions_adv, y_test)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7eab976f-84ab-4a4e-927a-a0761907517b", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.18" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/tests/attacks/evasion/test_composite_adversarial_attack.py b/tests/attacks/evasion/test_composite_adversarial_attack.py new file mode 100644 index 0000000000..cd135d01b5 --- /dev/null +++ b/tests/attacks/evasion/test_composite_adversarial_attack.py @@ -0,0 +1,192 @@ +# MIT License +# +# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2023 +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the +# Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +import logging + +import numpy as np +import pytest + +from art.attacks.evasion import CompositeAdversarialAttackPyTorch +from art.estimators.estimator import BaseEstimator, LossGradientsMixin +from art.estimators.classification.classifier import ClassifierMixin + +from tests.attacks.utils import backend_test_classifier_type_check_fail +from tests.utils import ARTTestException, get_cifar10_image_classifier_pt + +logger = logging.getLogger(__name__) + + +@pytest.fixture() +def fix_get_cifar10_subset(get_cifar10_dataset): + (x_train_cifar10, y_train_cifar10), (x_test_cifar10, y_test_cifar10) = get_cifar10_dataset + n_train = 100 + n_test = 11 + yield x_train_cifar10[:n_train], y_train_cifar10[:n_train], x_test_cifar10[:n_test], y_test_cifar10[:n_test] + + +@pytest.mark.skip_framework( + "tensorflow1", "tensorflow2", "tensorflow2v1", "keras", "non_dl_frameworks", "mxnet", "kerastf", "huggingface" +) +def test_generate(art_warning, fix_get_cifar10_subset): + try: + (x_train, y_train, x_test, y_test) = fix_get_cifar10_subset + + classifier = get_cifar10_image_classifier_pt(from_logits=False, load_init=True) + attack = CompositeAdversarialAttackPyTorch(classifier) + + x_train_adv = attack.generate(x=x_train, y=y_train) + x_test_adv = attack.generate(x=x_test, y=y_test) + + assert x_train.shape == x_train_adv.shape + assert np.min(x_train_adv) >= 0.0 + assert np.max(x_train_adv) <= 1.0 + assert x_test.shape == x_test_adv.shape + assert np.min(x_test_adv) >= 0.0 + assert np.max(x_test_adv) <= 1.0 + + except ARTTestException as e: + art_warning(e) + + +@pytest.mark.skip_framework( + "tensorflow1", "tensorflow2", "tensorflow2v1", "keras", "non_dl_frameworks", "mxnet", "kerastf" +) +def test_check_params(art_warning): + try: + classifier = get_cifar10_image_classifier_pt(from_logits=False, load_init=True) + + with pytest.raises(ValueError): + _ = CompositeAdversarialAttackPyTorch(classifier, enabled_attack=(0, 1, 2, 3, 4, 5, 6, 7)) + + with pytest.raises(ValueError): + _ = CompositeAdversarialAttackPyTorch(classifier, hue_epsilon=(-10.0, 0.0)) + with pytest.raises(ValueError): + _ = CompositeAdversarialAttackPyTorch(classifier, hue_epsilon=(0.0, 10.0)) + with pytest.raises(TypeError): + _ = CompositeAdversarialAttackPyTorch(classifier, hue_epsilon=(-1, 2.0)) + with pytest.raises(TypeError): + _ = CompositeAdversarialAttackPyTorch(classifier, hue_epsilon=3.14) + with pytest.raises(TypeError): + _ = CompositeAdversarialAttackPyTorch(classifier, hue_epsilon=(0.0, 10.0, 20.0)) + with pytest.raises(TypeError): + _ = CompositeAdversarialAttackPyTorch(classifier, hue_epsilon=("1.0", 2.0)) + + with pytest.raises(ValueError): + _ = CompositeAdversarialAttackPyTorch(classifier, sat_epsilon=(-10.0, 0.0)) + with pytest.raises(ValueError): + _ = CompositeAdversarialAttackPyTorch(classifier, sat_epsilon=(0.0, -10.0)) + with pytest.raises(TypeError): + _ = CompositeAdversarialAttackPyTorch(classifier, sat_epsilon=(1, 2.0)) + with pytest.raises(TypeError): + _ = CompositeAdversarialAttackPyTorch(classifier, sat_epsilon=2.0) + with pytest.raises(TypeError): + _ = CompositeAdversarialAttackPyTorch(classifier, sat_epsilon=(0.0, 10.0, 20.0)) + with pytest.raises(TypeError): + _ = CompositeAdversarialAttackPyTorch(classifier, sat_epsilon=("1.0", 2.0)) + + with pytest.raises(ValueError): + _ = CompositeAdversarialAttackPyTorch(classifier, rot_epsilon=(-450.0, 359.0)) + with pytest.raises(ValueError): + _ = CompositeAdversarialAttackPyTorch(classifier, rot_epsilon=(10.0, -10.0)) + with pytest.raises(TypeError): + _ = CompositeAdversarialAttackPyTorch(classifier, rot_epsilon=(1.0, 2)) + with pytest.raises(TypeError): + _ = CompositeAdversarialAttackPyTorch(classifier, rot_epsilon=10) + with pytest.raises(TypeError): + _ = CompositeAdversarialAttackPyTorch(classifier, rot_epsilon=(0.0, 10.0, 20.0)) + with pytest.raises(TypeError): + _ = CompositeAdversarialAttackPyTorch(classifier, rot_epsilon=("10", 20.0)) + + with pytest.raises(ValueError): + _ = CompositeAdversarialAttackPyTorch(classifier, bri_epsilon=(-10.0, 0.0)) + with pytest.raises(ValueError): + _ = CompositeAdversarialAttackPyTorch(classifier, bri_epsilon=(0.0, 10.0)) + with pytest.raises(TypeError): + _ = CompositeAdversarialAttackPyTorch(classifier, bri_epsilon=(-1, 1.0)) + with pytest.raises(TypeError): + _ = CompositeAdversarialAttackPyTorch(classifier, bri_epsilon=1.0) + with pytest.raises(TypeError): + _ = CompositeAdversarialAttackPyTorch(classifier, bri_epsilon=(0.0, 10.0, 20.0)) + with pytest.raises(TypeError): + _ = CompositeAdversarialAttackPyTorch(classifier, bri_epsilon=("1.0", 2.0)) + + with pytest.raises(ValueError): + _ = CompositeAdversarialAttackPyTorch(classifier, con_epsilon=(-10.0, 10.0)) + with pytest.raises(ValueError): + _ = CompositeAdversarialAttackPyTorch(classifier, con_epsilon=(0.0, -10.0)) + with pytest.raises(TypeError): + _ = CompositeAdversarialAttackPyTorch(classifier, con_epsilon=(1, 2.0)) + with pytest.raises(TypeError): + _ = CompositeAdversarialAttackPyTorch(classifier, con_epsilon=2.0) + with pytest.raises(TypeError): + _ = CompositeAdversarialAttackPyTorch(classifier, con_epsilon=(0.0, 10.0, 20.0)) + with pytest.raises(TypeError): + _ = CompositeAdversarialAttackPyTorch(classifier, con_epsilon=("1.0", 2.0)) + + with pytest.raises(ValueError): + _ = CompositeAdversarialAttackPyTorch(classifier, pgd_epsilon=(-0.5, 2.0)) + with pytest.raises(ValueError): + _ = CompositeAdversarialAttackPyTorch(classifier, pgd_epsilon=(8 / 255, -8 / 255)) + with pytest.raises(TypeError): + _ = CompositeAdversarialAttackPyTorch(classifier, pgd_epsilon=(-2, 1)) + with pytest.raises(TypeError): + _ = CompositeAdversarialAttackPyTorch(classifier, pgd_epsilon=8 / 255) + with pytest.raises(TypeError): + _ = CompositeAdversarialAttackPyTorch(classifier, pgd_epsilon=(0.0, 10.0, 20.0)) + with pytest.raises(TypeError): + _ = CompositeAdversarialAttackPyTorch(classifier, pgd_epsilon=("2/255", 3 / 255)) + + with pytest.raises(TypeError): + _ = CompositeAdversarialAttackPyTorch(classifier, early_stop="true") + with pytest.raises(TypeError): + _ = CompositeAdversarialAttackPyTorch(classifier, early_stop=1) + + with pytest.raises(TypeError): + _ = CompositeAdversarialAttackPyTorch(classifier, max_iter="max") + with pytest.raises(ValueError): + _ = CompositeAdversarialAttackPyTorch(classifier, max_iter=-5) + with pytest.raises(TypeError): + _ = CompositeAdversarialAttackPyTorch(classifier, max_iter=2.5) + + with pytest.raises(TypeError): + _ = CompositeAdversarialAttackPyTorch(classifier, max_inner_iter="max") + with pytest.raises(ValueError): + _ = CompositeAdversarialAttackPyTorch(classifier, max_inner_iter=-5) + with pytest.raises(TypeError): + _ = CompositeAdversarialAttackPyTorch(classifier, max_inner_iter=2.5) + + with pytest.raises(ValueError): + _ = CompositeAdversarialAttackPyTorch(classifier, attack_order="schedule") + + with pytest.raises(ValueError): + _ = CompositeAdversarialAttackPyTorch(classifier, batch_size=-1) + + with pytest.raises(TypeError): + _ = CompositeAdversarialAttackPyTorch(classifier, verbose="true") + + except ARTTestException as e: + art_warning(e) + + +@pytest.mark.framework_agnostic +def test_classifier_type_check_fail(art_warning): + try: + backend_test_classifier_type_check_fail( + CompositeAdversarialAttackPyTorch, [BaseEstimator, LossGradientsMixin, ClassifierMixin] + ) + except ARTTestException as e: + art_warning(e) diff --git a/tests/attacks/inference/attribute_inference/test_true_label_baseline.py b/tests/attacks/inference/attribute_inference/test_true_label_baseline.py index 05de0df14a..9209ff25b7 100644 --- a/tests/attacks/inference/attribute_inference/test_true_label_baseline.py +++ b/tests/attacks/inference/attribute_inference/test_true_label_baseline.py @@ -605,8 +605,8 @@ def transform_other_feature(x): baseline_inferred_test ) - expected_train_acc = {"nn": 0.81, "rf": 0.95, "gb": 0.95, "lr": 0.81, "dt": 0.94, "knn": 0.87, "svm": 0.81} - expected_test_acc = {"nn": 0.88, "rf": 0.79, "gb": 0.8, "lr": 0.88, "dt": 0.74, "knn": 0.86, "svm": 0.88} + expected_train_acc = {"nn": 0.81, "rf": 0.93, "gb": 0.95, "lr": 0.81, "dt": 0.94, "knn": 0.87, "svm": 0.81} + expected_test_acc = {"nn": 0.88, "rf": 0.78, "gb": 0.8, "lr": 0.88, "dt": 0.74, "knn": 0.86, "svm": 0.88} assert expected_train_acc[model_type] <= baseline_train_acc assert expected_test_acc[model_type] <= baseline_test_acc diff --git a/tests/attacks/inference/membership_inference/test_black_box.py b/tests/attacks/inference/membership_inference/test_black_box.py index 0896620404..7f32d3697c 100644 --- a/tests/attacks/inference/membership_inference/test_black_box.py +++ b/tests/attacks/inference/membership_inference/test_black_box.py @@ -48,7 +48,38 @@ def test_black_box_image(art_warning, get_default_mnist_subset, image_dl_estimat @pytest.mark.parametrize("model_type", ["nn", "rf", "gb", "lr", "dt", "knn", "svm"]) -def test_black_box_tabular(art_warning, model_type, tabular_dl_estimator_for_attack, get_iris_dataset): +def test_black_box_tabular(art_warning, model_type, decision_tree_estimator, get_iris_dataset): + try: + classifier = decision_tree_estimator() + attack = MembershipInferenceBlackBox(classifier, attack_model_type=model_type) + backend_check_membership_accuracy(attack, get_iris_dataset, attack_train_ratio, 0.25) + except ARTTestException as e: + art_warning(e) + + +@pytest.mark.parametrize("model_type", ["nn", "rf", "gb", "lr", "dt", "knn", "svm"]) +def test_black_box_tabular_no_label(art_warning, model_type, decision_tree_estimator, get_iris_dataset): + try: + classifier = decision_tree_estimator() + attack = MembershipInferenceBlackBox(classifier, attack_model_type=model_type) + backend_check_membership_accuracy(attack, get_iris_dataset, attack_train_ratio, 0.25, False) + except ARTTestException as e: + art_warning(e) + + +@pytest.mark.parametrize("model_type", ["nn", "rf", "gb", "lr", "dt", "knn", "svm"]) +def test_black_box_loss_tabular(art_warning, model_type, decision_tree_estimator, get_iris_dataset): + try: + classifier = decision_tree_estimator() + if type(classifier).__name__ == "PyTorchClassifier" or type(classifier).__name__ == "TensorFlowV2Classifier": + attack = MembershipInferenceBlackBox(classifier, input_type="loss", attack_model_type=model_type) + backend_check_membership_accuracy(attack, get_iris_dataset, attack_train_ratio, 0.25) + except ARTTestException as e: + art_warning(e) + + +@pytest.mark.parametrize("model_type", ["nn", "rf", "gb", "lr", "dt", "knn", "svm"]) +def test_black_box_tabular_dl(art_warning, model_type, tabular_dl_estimator_for_attack, get_iris_dataset): try: classifier = tabular_dl_estimator_for_attack(MembershipInferenceBlackBox) attack = MembershipInferenceBlackBox(classifier, attack_model_type=model_type) @@ -58,7 +89,17 @@ def test_black_box_tabular(art_warning, model_type, tabular_dl_estimator_for_att @pytest.mark.parametrize("model_type", ["nn", "rf", "gb", "lr", "dt", "knn", "svm"]) -def test_black_box_loss_tabular(art_warning, model_type, tabular_dl_estimator_for_attack, get_iris_dataset): +def test_black_box_tabular_no_label_dl(art_warning, model_type, tabular_dl_estimator_for_attack, get_iris_dataset): + try: + classifier = tabular_dl_estimator_for_attack(MembershipInferenceBlackBox) + attack = MembershipInferenceBlackBox(classifier, attack_model_type=model_type) + backend_check_membership_accuracy(attack, get_iris_dataset, attack_train_ratio, 0.25, False) + except ARTTestException as e: + art_warning(e) + + +@pytest.mark.parametrize("model_type", ["nn", "rf", "gb", "lr", "dt", "knn", "svm"]) +def test_black_box_loss_tabular_dl(art_warning, model_type, tabular_dl_estimator_for_attack, get_iris_dataset): try: classifier = tabular_dl_estimator_for_attack(MembershipInferenceBlackBox) if type(classifier).__name__ == "PyTorchClassifier" or type(classifier).__name__ == "TensorFlowV2Classifier": @@ -115,55 +156,62 @@ def test_black_box_keras_loss(art_warning, get_iris_dataset): art_warning(e) -def test_black_box_tabular_rf(art_warning, tabular_dl_estimator_for_attack, get_iris_dataset): +@pytest.mark.skip_framework("tensorflow", "keras", "scikitlearn", "mxnet", "kerastf") +def test_black_box_with_model(art_warning, tabular_dl_estimator_for_attack, estimator_for_attack, get_iris_dataset): try: classifier = tabular_dl_estimator_for_attack(MembershipInferenceBlackBox) - attack = MembershipInferenceBlackBox(classifier, attack_model_type="rf") - backend_check_membership_accuracy(attack, get_iris_dataset, attack_train_ratio, 0.2) + attack_model = estimator_for_attack(num_features=2 * num_classes_iris) + attack = MembershipInferenceBlackBox(classifier, attack_model=attack_model) + backend_check_membership_accuracy(attack, get_iris_dataset, attack_train_ratio, 0.25) except ARTTestException as e: art_warning(e) -def test_black_box_tabular_gb(art_warning, tabular_dl_estimator_for_attack, get_iris_dataset): +@pytest.mark.parametrize("model_type", ["nn", "rf", "gb", "lr", "dt", "knn", "svm"]) +def test_black_box_tabular_prob(art_warning, decision_tree_estimator, get_iris_dataset, model_type): try: - classifier = tabular_dl_estimator_for_attack(MembershipInferenceBlackBox) - attack = MembershipInferenceBlackBox(classifier, attack_model_type="gb") - # train attack model using only attack_train_ratio of data - backend_check_membership_accuracy(attack, get_iris_dataset, attack_train_ratio, 0.25) + classifier = decision_tree_estimator() + attack = MembershipInferenceBlackBox(classifier, attack_model_type=model_type) + backend_check_membership_probabilities(attack, get_iris_dataset, attack_train_ratio) except ARTTestException as e: art_warning(e) -@pytest.mark.skip_framework("tensorflow", "keras", "scikitlearn", "mxnet", "kerastf") -def test_black_box_with_model(art_warning, tabular_dl_estimator_for_attack, estimator_for_attack, get_iris_dataset): +def test_black_box_with_model_prob(art_warning, decision_tree_estimator, estimator_for_attack, get_iris_dataset): try: - classifier = tabular_dl_estimator_for_attack(MembershipInferenceBlackBox) + classifier = decision_tree_estimator() attack_model = estimator_for_attack(num_features=2 * num_classes_iris) attack = MembershipInferenceBlackBox(classifier, attack_model=attack_model) - backend_check_membership_accuracy(attack, get_iris_dataset, attack_train_ratio, 0.25) + backend_check_membership_probabilities(attack, get_iris_dataset, attack_train_ratio) except ARTTestException as e: art_warning(e) -def test_black_box_tabular_prob_rf(art_warning, tabular_dl_estimator_for_attack, get_iris_dataset): +@pytest.mark.parametrize("model_type", ["nn", "rf", "gb", "lr", "dt", "knn", "svm"]) +def test_black_box_pred(art_warning, model_type, decision_tree_estimator, get_iris_dataset): try: - classifier = tabular_dl_estimator_for_attack(MembershipInferenceBlackBox) - attack = MembershipInferenceBlackBox(classifier, attack_model_type="rf") - backend_check_membership_probabilities(attack, get_iris_dataset, attack_train_ratio) + (x_train, _), (x_test, _) = get_iris_dataset + classifier = decision_tree_estimator() + attack = MembershipInferenceBlackBox(classifier, attack_model_type=model_type) + pred_x = classifier.predict(x_train) + test_pred_x = classifier.predict(x_test) + pred = (pred_x, test_pred_x) + backend_check_membership_accuracy_pred(attack, get_iris_dataset, pred, attack_train_ratio, 0.25) except ARTTestException as e: art_warning(e) -def test_black_box_tabular_prob_nn(art_warning, tabular_dl_estimator_for_attack, get_iris_dataset): +@pytest.mark.parametrize("model_type", ["nn", "rf", "gb", "lr", "dt", "knn", "svm"]) +def test_black_box_tabular_prob_dl(art_warning, tabular_dl_estimator_for_attack, get_iris_dataset, model_type): try: classifier = tabular_dl_estimator_for_attack(MembershipInferenceBlackBox) - attack = MembershipInferenceBlackBox(classifier, attack_model_type="nn") + attack = MembershipInferenceBlackBox(classifier, attack_model_type=model_type) backend_check_membership_probabilities(attack, get_iris_dataset, attack_train_ratio) except ARTTestException as e: art_warning(e) -def test_black_box_with_model_prob( +def test_black_box_with_model_prob_dl( art_warning, tabular_dl_estimator_for_attack, estimator_for_attack, get_iris_dataset ): try: @@ -176,7 +224,7 @@ def test_black_box_with_model_prob( @pytest.mark.parametrize("model_type", ["nn", "rf", "gb", "lr", "dt", "knn", "svm"]) -def test_black_box_pred(art_warning, model_type, tabular_dl_estimator_for_attack, get_iris_dataset): +def test_black_box_pred_dl(art_warning, model_type, tabular_dl_estimator_for_attack, get_iris_dataset): try: (x_train, _), (x_test, _) = get_iris_dataset classifier = tabular_dl_estimator_for_attack(MembershipInferenceBlackBox) @@ -207,7 +255,29 @@ def test_black_box_loss_regression_pred(art_warning, model_type, get_diabetes_da art_warning(e) -def test_errors(art_warning, tabular_dl_estimator_for_attack, get_iris_dataset): +def test_errors(art_warning, decision_tree_estimator, get_iris_dataset): + try: + classifier = decision_tree_estimator() + (x_train, y_train), (x_test, y_test) = get_iris_dataset + with pytest.raises(ValueError): + MembershipInferenceBlackBox(classifier, attack_model_type="a") + with pytest.raises(ValueError): + MembershipInferenceBlackBox(classifier, input_type="a") + attack = MembershipInferenceBlackBox(classifier) + with pytest.raises(ValueError): + attack.fit(x_train, y_test, x_test, y_test) + with pytest.raises(ValueError): + attack.fit(x_train, y_train, x_test, y_train) + with pytest.raises(ValueError): + attack.infer(x_train, y_test) + attack.fit(x_train, y_train, x_test, y_test) + with pytest.raises(ValueError): + attack.infer(x_test, y_test=None) + except ARTTestException as e: + art_warning(e) + + +def test_errors_dl(art_warning, tabular_dl_estimator_for_attack, get_iris_dataset): try: classifier = tabular_dl_estimator_for_attack(MembershipInferenceBlackBox) (x_train, y_train), (x_test, y_test) = get_iris_dataset @@ -223,10 +293,17 @@ def test_errors(art_warning, tabular_dl_estimator_for_attack, get_iris_dataset): attack.fit(x_train, y_train, x_test, y_train) with pytest.raises(ValueError): attack.infer(x_train, y_test) + attack.fit(x_train, y_train, x_test, y_test) + with pytest.raises(ValueError): + attack.infer(x_test, y_test=None) attack = MembershipInferenceBlackBox(classifier, input_type="loss") + with pytest.raises(ValueError): + attack.fit(x_train, test_x=x_test) attack.fit(x_train, y_train, x_test, y_test) with pytest.raises(ValueError): attack.infer(None, y_test, pred=pred_test) + with pytest.raises(ValueError): + attack.infer(x_test, y_test=None) except ARTTestException as e: art_warning(e) @@ -240,19 +317,27 @@ def test_classifier_type_check_fail(art_warning): art_warning(e) -def backend_check_membership_accuracy(attack, dataset, attack_train_ratio, approx): +def backend_check_membership_accuracy(attack, dataset, attack_train_ratio, approx, use_label=True): (x_train, y_train), (x_test, y_test) = dataset attack_train_size = int(len(x_train) * attack_train_ratio) attack_test_size = int(len(x_test) * attack_train_ratio) # train attack model using only attack_train_ratio of data - attack.fit( - x_train[:attack_train_size], y_train[:attack_train_size], x_test[:attack_test_size], y_test[:attack_test_size] - ) - - # infer attacked feature on remainder of data - inferred_train = attack.infer(x_train[attack_train_size:], y_train[attack_train_size:]) - inferred_test = attack.infer(x_test[attack_test_size:], y_test[attack_test_size:]) + if use_label: + attack.fit( + x_train[:attack_train_size], + y_train[:attack_train_size], + x_test[:attack_test_size], + y_test[:attack_test_size], + ) + # infer attacked feature on remainder of data + inferred_train = attack.infer(x_train[attack_train_size:], y_train[attack_train_size:]) + inferred_test = attack.infer(x_test[attack_test_size:], y_test[attack_test_size:]) + else: + attack.fit(x_train[:attack_train_size], test_x=x_test[:attack_test_size]) + # infer attacked feature on remainder of data + inferred_train = attack.infer(x_train[attack_train_size:]) + inferred_test = attack.infer(x_test[attack_test_size:]) # check accuracy backend_check_accuracy(inferred_train, inferred_test, approx)