Skip to content

Commit

Permalink
updates tests
Browse files Browse the repository at this point in the history
  • Loading branch information
antoinedemathelin committed Oct 31, 2024
1 parent 43149e2 commit 1b21fde
Show file tree
Hide file tree
Showing 15 changed files with 113 additions and 102 deletions.
5 changes: 1 addition & 4 deletions adapt/feature_based/_deepcoral.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,10 +190,7 @@ def train_step(self, data):
self.optimizer_enc.apply_gradients(zip(gradients_enc, trainable_vars_enc))

# Update metrics
self.compiled_metrics.update_state(ys, ys_pred)
self.compiled_loss(ys, ys_pred)
# Return a dict mapping metric names to current value
logs = {m.name: m.result() for m in self.metrics}
logs = self._update_logs(ys, ys_pred)
logs.update({"disc_loss": disc_loss})
return logs

Expand Down
25 changes: 7 additions & 18 deletions adapt/feature_based/_mcd.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,15 +122,12 @@ def pretrain_step(self, data):
gradients_disc = disc_tape.gradient(disc_loss, trainable_vars_disc)

# Update weights
self.optimizer.apply_gradients(zip(gradients_task, trainable_vars_task))
self.optimizer_enc.apply_gradients(zip(gradients_enc, trainable_vars_enc))
self.optimizer_disc.apply_gradients(zip(gradients_disc, trainable_vars_disc))
self.pretrain_optimizer.apply_gradients(zip(gradients_task, trainable_vars_task))
self.pretrain_optimizer_enc.apply_gradients(zip(gradients_enc, trainable_vars_enc))
self.pretrain_optimizer_disc.apply_gradients(zip(gradients_disc, trainable_vars_disc))

# Update metrics
self.compiled_metrics.update_state(ys, ys_pred)
self.compiled_loss(ys, ys_pred)
# Return a dict mapping metric names to current value
logs = {m.name: m.result() for m in self.metrics}
logs = self._update_logs(ys, ys_pred)
return logs


Expand Down Expand Up @@ -162,7 +159,7 @@ def train_step(self, data):
# Compute gradients
trainable_vars_enc = self.encoder_.trainable_variables
gradients_enc = enc_tape.gradient(enc_loss, trainable_vars_enc)
self.optimizer.apply_gradients(zip(gradients_enc, trainable_vars_enc))
self.optimizer_enc.apply_gradients(zip(gradients_enc, trainable_vars_enc))

# loss
with tf.GradientTape() as task_tape, tf.GradientTape() as enc_tape, tf.GradientTape() as disc_tape:
Expand Down Expand Up @@ -212,10 +209,7 @@ def train_step(self, data):
self.optimizer_disc.apply_gradients(zip(gradients_disc, trainable_vars_disc))

# Update metrics
self.compiled_metrics.update_state(ys, ys_pred)
self.compiled_loss(ys, ys_pred)
# Return a dict mapping metric names to current value
logs = {m.name: m.result() for m in self.metrics}
logs = self._update_logs(ys, ys_pred)
logs.update({"disc_loss": discrepancy})
return logs

Expand Down Expand Up @@ -264,12 +258,7 @@ def _initialize_networks(self):


def _initialize_weights(self, shape_X):
# Init weights encoder
self(np.zeros((1,) + shape_X))
X_enc = self.encoder_(np.zeros((1,) + shape_X))
self.task_(X_enc)
self.discriminator_(X_enc)

super()._initialize_weights(shape_X)
# Add noise to discriminator in order to
# differentiate from task
weights = self.discriminator_.get_weights()
Expand Down
12 changes: 2 additions & 10 deletions adapt/feature_based/_mdd.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,11 +157,7 @@ def train_step(self, data):
self.optimizer_disc.apply_gradients(zip(gradients_disc, trainable_vars_disc))

# Update metrics
self.compiled_metrics.update_state(ys, ys_pred)
self.compiled_loss(ys, ys_pred)
# Return a dict mapping metric names to current value
logs = {m.name: m.result() for m in self.metrics}
# disc_metrics = self._get_disc_metrics(ys_disc, yt_disc)
logs = self._update_logs(ys, ys_pred)
logs.update({"disc_loss": disc_loss})
return logs

Expand Down Expand Up @@ -189,11 +185,7 @@ def _initialize_networks(self):

def _initialize_weights(self, shape_X):
# Init weights encoder
self(np.zeros((1,) + shape_X))
X_enc = self.encoder_(np.zeros((1,) + shape_X))
self.task_(X_enc)
self.discriminator_(X_enc)

super()._initialize_weights(shape_X)
# Add noise to discriminator in order to
# differentiate from task
weights = self.discriminator_.get_weights()
Expand Down
72 changes: 53 additions & 19 deletions adapt/instance_based/_iwn.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@

from adapt.base import BaseAdaptDeep, make_insert_doc
from adapt.utils import (check_arrays, check_network, get_default_task,
set_random_seed, check_estimator, check_sample_weight)
set_random_seed, check_estimator, check_sample_weight, check_if_compiled)

EPS = np.finfo(np.float32).eps

Expand Down Expand Up @@ -141,8 +141,21 @@ def _initialize_networks(self):
name="weighter")
self.sigma_ = tf.Variable(self.sigma_init,
trainable=self.update_sigma)



if not hasattr(self, "estimator_"):
self.estimator_ = check_estimator(self.estimator,
copy=self.copy,
force_copy=True)


def _initialize_weights(self, shape_X):
if hasattr(self, "weighter_"):
self.weighter_.build((None,) + shape_X)
self.build((None,) + shape_X)
if isinstance(self.estimator_, Model):
self.estimator_.build((None,) + shape_X)


def pretrain_step(self, data):
# Unpack the data.
Xs, Xt, ys, yt = self._unpack_data(data)
Expand All @@ -163,7 +176,7 @@ def pretrain_step(self, data):
gradients = tape.gradient(loss, trainable_vars)

# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
self.pretrain_optimizer.apply_gradients(zip(gradients, trainable_vars))

logs = {"loss": loss}
return logs
Expand Down Expand Up @@ -200,7 +213,7 @@ def train_step(self, data):

# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
self.optimizer.apply_gradients(zip(gradients_sigma, [self.sigma_]))
self.optimizer_sigma.apply_gradients(zip(gradients_sigma, [self.sigma_]))

# Return a dict mapping metric names to current value
logs = {"loss": loss, "sigma": self.sigma_}
Expand All @@ -214,6 +227,26 @@ def fit(self, X, y=None, Xt=None, yt=None, domains=None,
return self


def compile(self,
optimizer=None,
loss=None,
metrics=None,
loss_weights=None,
weighted_metrics=None,
run_eagerly=None,
steps_per_execution=None,
**kwargs):
super().compile(optimizer=optimizer,
loss=loss,
metrics=metrics,
loss_weights=loss_weights,
weighted_metrics=weighted_metrics,
run_eagerly=run_eagerly,
steps_per_execution=steps_per_execution,
**kwargs)
self.optimizer_sigma = self.optimizer.__class__.from_config(self.optimizer.get_config())


def fit_weights(self, Xs, Xt, **fit_params):
"""
Fit importance weighting.
Expand Down Expand Up @@ -276,22 +309,23 @@ def fit_estimator(self, X, y, sample_weight=None,
X, y = check_arrays(X, y, accept_sparse=True)
set_random_seed(random_state)

if (not warm_start) or (not hasattr(self, "estimator_")):
estimator = self.estimator
self.estimator_ = check_estimator(estimator,
if not hasattr(self, "estimator_"):
self.estimator_ = check_estimator(self.estimator,
copy=self.copy,
force_copy=True)
if isinstance(self.estimator_, Model):
compile_params = {}
if estimator._is_compiled:
compile_params["loss"] = deepcopy(estimator.loss)
compile_params["optimizer"] = deepcopy(estimator.optimizer)
else:
raise ValueError("The given `estimator` argument"
" is not compiled yet. "
"Please give a compiled estimator or "
"give a `loss` and `optimizer` arguments.")
self.estimator_.compile(**compile_params)

estimator = self.estimator
if isinstance(self.estimator_, Model):
compile_params = {}
if check_if_compiled(estimator):
compile_params["loss"] = deepcopy(estimator.loss)
compile_params["optimizer"] = deepcopy(estimator.optimizer)
else:
raise ValueError("The given `estimator` argument"
" is not compiled yet. "
"Please give a compiled estimator or "
"give a `loss` and `optimizer` arguments.")
self.estimator_.compile(**compile_params)

fit_args = [
p.name
Expand Down
42 changes: 35 additions & 7 deletions adapt/instance_based/_wann.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,16 @@ def _initialize_networks(self):
name="discriminator")


def _initialize_weights(self, shape_X):
if hasattr(self, "weighter_"):
self.weighter_.build((None,) + shape_X)
if hasattr(self, "task_"):
self.task_.build((None,) + shape_X)
if hasattr(self, "discriminator_"):
self.discriminator_.build((None,) + shape_X)
self.build((None,) + shape_X)


def _add_regularization(self, weighter):
for i in range(len(weighter.layers)):
if hasattr(weighter.layers[i], "kernel_constraint"):
Expand Down Expand Up @@ -149,7 +159,7 @@ def pretrain_step(self, data):
gradients = tape.gradient(loss, trainable_vars)

# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
self.pretrain_optimizer.apply_gradients(zip(gradients, trainable_vars))

logs = {"loss": loss}
return logs
Expand Down Expand Up @@ -217,15 +227,33 @@ def train_step(self, data):

# Update weights
self.optimizer.apply_gradients(zip(gradients_task, trainable_vars_task))
self.optimizer.apply_gradients(zip(gradients_weight, trainable_vars_weight))
self.optimizer.apply_gradients(zip(gradients_disc, trainable_vars_disc))
self.optimizer_weight.apply_gradients(zip(gradients_weight, trainable_vars_weight))
self.optimizer_disc.apply_gradients(zip(gradients_disc, trainable_vars_disc))

# Update metrics
self.compiled_metrics.update_state(ys, ys_pred)
self.compiled_loss(ys, ys_pred)
# Return a dict mapping metric names to current value
logs = {m.name: m.result() for m in self.metrics}
logs = self._update_logs(ys, ys_pred)
return logs


def compile(self,
optimizer=None,
loss=None,
metrics=None,
loss_weights=None,
weighted_metrics=None,
run_eagerly=None,
steps_per_execution=None,
**kwargs):
super().compile(optimizer=optimizer,
loss=loss,
metrics=metrics,
loss_weights=loss_weights,
weighted_metrics=weighted_metrics,
run_eagerly=run_eagerly,
steps_per_execution=steps_per_execution,
**kwargs)
self.optimizer_weight = self.optimizer.__class__.from_config(self.optimizer.get_config())
self.optimizer_disc = self.optimizer.__class__.from_config(self.optimizer.get_config())


def predict_weights(self, X):
Expand Down
13 changes: 4 additions & 9 deletions adapt/parameter_based/_finetuning.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,10 +146,7 @@ def pretrain_step(self, data):
self.optimizer.apply_gradients(zip(gradients_task, trainable_vars_task))

# Update metrics
self.compiled_metrics.update_state(ys, ys_pred)
self.compiled_loss(ys, ys_pred)
# Return a dict mapping metric names to current value
logs = {m.name: m.result() for m in self.metrics}
logs = self._update_logs(ys, ys_pred)
return logs


Expand Down Expand Up @@ -185,13 +182,11 @@ def train_step(self, data):

# Update weights
self.optimizer.apply_gradients(zip(gradients_task, trainable_vars_task))
self.optimizer_enc.apply_gradients(zip(gradients_enc, trainable_vars_enc))
if len(trainable_vars_enc) > 0:
self.optimizer_enc.apply_gradients(zip(gradients_enc, trainable_vars_enc))

# Update metrics
self.compiled_metrics.update_state(ys, ys_pred)
self.compiled_loss(ys, ys_pred)
# Return a dict mapping metric names to current value
logs = {m.name: m.result() for m in self.metrics}
logs = self._update_logs(ys, ys_pred)
return logs


Expand Down
2 changes: 1 addition & 1 deletion adapt/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -590,7 +590,7 @@ def check_if_compiled(network):
"""
if hasattr(network, "compiled") and network.compiled:
return True
elif hasattr(network, "_is_compiled") and networtf._is_compiled:
elif hasattr(network, "_is_compiled") and network._is_compiled:
return True
else:
return False
Expand Down
9 changes: 3 additions & 6 deletions tests/test_finetuning.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,7 @@
from adapt.utils import make_classification_da
from adapt.parameter_based import FineTuning
from tensorflow.keras.initializers import GlorotUniform
try:
from tensorflow.keras.optimizers.legacy import Adam
except:
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.optimizers import Adam

np.random.seed(0)
tf.random.set_seed(0)
Expand Down Expand Up @@ -44,7 +41,7 @@ def test_finetune():
loss="bce", optimizer=Adam(), random_state=0)
fine_tuned.fit(Xt[ind], yt[ind], epochs=100, verbose=0)

assert np.abs(fine_tuned.encoder_.get_weights()[0] - model.encoder_.get_weights()[0]).sum() > 1.
assert np.abs(fine_tuned.encoder_.get_weights()[0] - model.encoder_.get_weights()[0]).sum() > 0.5
assert np.mean((fine_tuned.predict(Xt).ravel()>0.5) == yt) > 0.9

fine_tuned = FineTuning(encoder=model.encoder_, task=model.task_,
Expand All @@ -53,7 +50,7 @@ def test_finetune():
fine_tuned.fit(Xt[ind], yt[ind], epochs=100, verbose=0)

assert np.abs(fine_tuned.encoder_.get_weights()[0] - model.encoder_.get_weights()[0]).sum() == 0.
assert np.abs(fine_tuned.encoder_.get_weights()[-1] - model.encoder_.get_weights()[-1]).sum() > 1.
assert np.abs(fine_tuned.encoder_.get_weights()[-1] - model.encoder_.get_weights()[-1]).sum() > .5

fine_tuned = FineTuning(encoder=model.encoder_, task=model.task_,
training=[False],
Expand Down
5 changes: 1 addition & 4 deletions tests/test_iwc.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,7 @@
from adapt.utils import make_classification_da
from adapt.instance_based import IWC
from adapt.utils import get_default_discriminator
try:
from tensorflow.keras.optimizers.legacy import Adam
except:
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.optimizers import Adam

Xs, ys, Xt, yt = make_classification_da()

Expand Down
5 changes: 1 addition & 4 deletions tests/test_iwn.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,7 @@
from adapt.instance_based import IWN
from adapt.utils import get_default_task
from sklearn.neighbors import KNeighborsClassifier
try:
from tensorflow.keras.optimizers.legacy import Adam
except:
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.optimizers import Adam

Xs, ys, Xt, yt = make_classification_da()

Expand Down
5 changes: 1 addition & 4 deletions tests/test_mcd.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,7 @@
import tensorflow as tf
from tensorflow.keras import Sequential, Model
from tensorflow.keras.layers import Dense
try:
from tensorflow.keras.optimizers.legacy import Adam
except:
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.initializers import GlorotUniform

from adapt.feature_based import MCD
Expand Down
5 changes: 1 addition & 4 deletions tests/test_mdd.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,7 @@
import tensorflow as tf
from tensorflow.keras import Sequential, Model
from tensorflow.keras.layers import Dense
try:
from tensorflow.keras.optimizers.legacy import Adam
except:
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.initializers import GlorotUniform

from adapt.feature_based import MDD
Expand Down
Loading

0 comments on commit 1b21fde

Please sign in to comment.