From 06cf993bac9434b7f9e1f51b45471dfa346708f9 Mon Sep 17 00:00:00 2001 From: MatthewMiddlehurst Date: Sun, 14 Apr 2024 11:07:45 +0100 Subject: [PATCH 01/31] tensorflow bound --- pyproject.toml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 2faf66252c..803c677b8b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -78,8 +78,8 @@ all_extras = [ "statsmodels>=0.12.1", "stumpy>=1.5.1", "tbats>=1.1.0", - "tensorflow>=2.12; python_version < '3.12' and python_version >= '3.9'", - "tensorflow-addons; python_version < '3.12'", + "tensorflow>=2.12; python_version >= '3.9'", + "tensorflow-addons; python_version >= '3.9'", "torch>=1.13.1", "tsfresh>=0.20.0", "tslearn>=0.5.2", @@ -92,8 +92,8 @@ all_extras = [ ] dl = [ "keras-self-attention", - "tensorflow>=2.12; python_version < '3.12' and python_version >= '3.9'", - "tensorflow-addons; python_version < '3.12'", + "tensorflow>=2.12; python_version >= '3.9'", + "tensorflow-addons; python_version >= '3.9'", ] unstable_extras = [ "mrsqm>=0.0.1,<0.1.0 ; platform_system == 'Darwin'", # requires gcc and fftw to be installed for Windows and some other OS (see http://www.fftw.org/index.html) From e296be4c6fdd48acba40c56955d67e836feda7a4 Mon Sep 17 00:00:00 2001 From: hadifawaz1999 Date: Fri, 26 Apr 2024 09:00:12 +0200 Subject: [PATCH 02/31] add manually instance normalization --- aeon/classification/deep_learning/_encoder.py | 2 +- aeon/networks/_encoder.py | 336 +++++++++++++++++- aeon/regression/deep_learning/_encoder.py | 2 +- aeon/utils/networks/__init__.py | 1 + aeon/utils/networks/tensorflow_addons.py | 62 ++++ 5 files changed, 397 insertions(+), 6 deletions(-) create mode 100644 aeon/utils/networks/__init__.py create mode 100644 aeon/utils/networks/tensorflow_addons.py diff --git a/aeon/classification/deep_learning/_encoder.py b/aeon/classification/deep_learning/_encoder.py index b35ece0528..bde6b24b8a 100644 --- a/aeon/classification/deep_learning/_encoder.py +++ b/aeon/classification/deep_learning/_encoder.py @@ -83,7 +83,7 @@ class EncoderClassifier(BaseDeepClassifier): """ _tags = { - "python_dependencies": ["tensorflow", "tensorflow_addons"], + "python_dependencies": "tensorflow", } def __init__( diff --git a/aeon/networks/_encoder.py b/aeon/networks/_encoder.py index 459b18112b..6b67a11bf5 100644 --- a/aeon/networks/_encoder.py +++ b/aeon/networks/_encoder.py @@ -2,7 +2,336 @@ __maintainer__ = [] +import logging + +import tensorflow as tf +from typeguard import typechecked + from aeon.networks.base import BaseDeepNetwork +from aeon.utils.networks import tensorflow_addons as types + + +@tf.keras.utils.register_keras_serializable(package="Addons") +class GroupNormalization(tf.keras.layers.Layer): + """Group normalization layer. + + Source: "Group Normalization" (Yuxin Wu & Kaiming He, 2018) + https://arxiv.org/abs/1803.08494 + + Group Normalization divides the channels into groups and computes + within each group the mean and variance for normalization. + Empirically, its accuracy is more stable than batch norm in a wide + range of small batch sizes, if learning rate is adjusted linearly + with batch sizes. + + Relation to Layer Normalization: + If the number of groups is set to 1, then this operation becomes identical + to Layer Normalization. + + Relation to Instance Normalization: + If the number of groups is set to the + input dimension (number of groups is equal + to number of channels), then this operation becomes + identical to Instance Normalization. + + Args: + groups: Integer, the number of groups for Group Normalization. + Can be in the range [1, N] where N is the input dimension. + The input dimension must be divisible by the number of groups. + Defaults to 32. + axis: Integer, the axis that should be normalized. + epsilon: Small float added to variance to avoid dividing by zero. + center: If True, add offset of `beta` to normalized tensor. + If False, `beta` is ignored. + scale: If True, multiply by `gamma`. + If False, `gamma` is not used. + beta_initializer: Initializer for the beta weight. + gamma_initializer: Initializer for the gamma weight. + beta_regularizer: Optional regularizer for the beta weight. + gamma_regularizer: Optional regularizer for the gamma weight. + beta_constraint: Optional constraint for the beta weight. + gamma_constraint: Optional constraint for the gamma weight. + + Input shape: + Arbitrary. Use the keyword argument `input_shape` + (tuple of integers, does not include the samples axis) + when using this layer as the first layer in a model. + + Output shape: + Same shape as input. + + Notes + ----- + This code was taken from the soon to be deprecated project + tensorflow_addons: + https://github.com/tensorflow/addons/tree/v0.20.0 + """ + + @typechecked + def __init__( + self, + groups: int = 32, + axis: int = -1, + epsilon: float = 1e-3, + center: bool = True, + scale: bool = True, + beta_initializer: types.Initializer = "zeros", + gamma_initializer: types.Initializer = "ones", + beta_regularizer: types.Regularizer = None, + gamma_regularizer: types.Regularizer = None, + beta_constraint: types.Constraint = None, + gamma_constraint: types.Constraint = None, + **kwargs, + ): + super().__init__(**kwargs) + self.supports_masking = True + self.groups = groups + self.axis = axis + self.epsilon = epsilon + self.center = center + self.scale = scale + self.beta_initializer = tf.keras.initializers.get(beta_initializer) + self.gamma_initializer = tf.keras.initializers.get(gamma_initializer) + self.beta_regularizer = tf.keras.regularizers.get(beta_regularizer) + self.gamma_regularizer = tf.keras.regularizers.get(gamma_regularizer) + self.beta_constraint = tf.keras.constraints.get(beta_constraint) + self.gamma_constraint = tf.keras.constraints.get(gamma_constraint) + self._check_axis() + + def build(self, input_shape): + + self._check_if_input_shape_is_none(input_shape) + self._set_number_of_groups_for_instance_norm(input_shape) + self._check_size_of_dimensions(input_shape) + self._create_input_spec(input_shape) + + self._add_gamma_weight(input_shape) + self._add_beta_weight(input_shape) + self.built = True + super().build(input_shape) + + def call(self, inputs): + + input_shape = tf.keras.backend.int_shape(inputs) + tensor_input_shape = tf.shape(inputs) + + reshaped_inputs, group_shape = self._reshape_into_groups( + inputs, input_shape, tensor_input_shape + ) + + normalized_inputs = self._apply_normalization(reshaped_inputs, input_shape) + + is_instance_norm = (input_shape[self.axis] // self.groups) == 1 + if not is_instance_norm: + outputs = tf.reshape(normalized_inputs, tensor_input_shape) + else: + outputs = normalized_inputs + + return outputs + + def get_config(self): + config = { + "groups": self.groups, + "axis": self.axis, + "epsilon": self.epsilon, + "center": self.center, + "scale": self.scale, + "beta_initializer": tf.keras.initializers.serialize(self.beta_initializer), + "gamma_initializer": tf.keras.initializers.serialize( + self.gamma_initializer + ), + "beta_regularizer": tf.keras.regularizers.serialize(self.beta_regularizer), + "gamma_regularizer": tf.keras.regularizers.serialize( + self.gamma_regularizer + ), + "beta_constraint": tf.keras.constraints.serialize(self.beta_constraint), + "gamma_constraint": tf.keras.constraints.serialize(self.gamma_constraint), + } + base_config = super().get_config() + return {**base_config, **config} + + def _reshape_into_groups(self, inputs, input_shape, tensor_input_shape): + + group_shape = [tensor_input_shape[i] for i in range(len(input_shape))] + is_instance_norm = (input_shape[self.axis] // self.groups) == 1 + if not is_instance_norm: + group_shape[self.axis] = input_shape[self.axis] // self.groups + group_shape.insert(self.axis, self.groups) + group_shape = tf.stack(group_shape) + reshaped_inputs = tf.reshape(inputs, group_shape) + return reshaped_inputs, group_shape + else: + return inputs, group_shape + + def _apply_normalization(self, reshaped_inputs, input_shape): + + group_shape = tf.keras.backend.int_shape(reshaped_inputs) + group_reduction_axes = list(range(1, len(group_shape))) + is_instance_norm = (input_shape[self.axis] // self.groups) == 1 + if not is_instance_norm: + axis = -2 if self.axis == -1 else self.axis - 1 + else: + axis = -1 if self.axis == -1 else self.axis - 1 + group_reduction_axes.pop(axis) + + mean, variance = tf.nn.moments( + reshaped_inputs, group_reduction_axes, keepdims=True + ) + + gamma, beta = self._get_reshaped_weights(input_shape) + normalized_inputs = tf.nn.batch_normalization( + reshaped_inputs, + mean=mean, + variance=variance, + scale=gamma, + offset=beta, + variance_epsilon=self.epsilon, + ) + return normalized_inputs + + def _get_reshaped_weights(self, input_shape): + broadcast_shape = self._create_broadcast_shape(input_shape) + gamma = None + beta = None + if self.scale: + gamma = tf.reshape(self.gamma, broadcast_shape) + + if self.center: + beta = tf.reshape(self.beta, broadcast_shape) + return gamma, beta + + def _check_if_input_shape_is_none(self, input_shape): + dim = input_shape[self.axis] + if dim is None: + raise ValueError( + "Axis " + str(self.axis) + " of " + "input tensor should have a defined dimension " + "but the layer received an input with shape " + str(input_shape) + "." + ) + + def _set_number_of_groups_for_instance_norm(self, input_shape): + dim = input_shape[self.axis] + + if self.groups == -1: + self.groups = dim + + def _check_size_of_dimensions(self, input_shape): + + dim = input_shape[self.axis] + if dim < self.groups: + raise ValueError( + "Number of groups (" + str(self.groups) + ") cannot be " + "more than the number of channels (" + str(dim) + ")." + ) + + if dim % self.groups != 0: + raise ValueError( + "Number of groups (" + str(self.groups) + ") must be a " + "multiple of the number of channels (" + str(dim) + ")." + ) + + def _check_axis(self): + + if self.axis == 0: + raise ValueError( + "You are trying to normalize your batch axis. Do you want to " + "use tf.layer.batch_normalization instead" + ) + + def _create_input_spec(self, input_shape): + + dim = input_shape[self.axis] + self.input_spec = tf.keras.layers.InputSpec( + ndim=len(input_shape), axes={self.axis: dim} + ) + + def _add_gamma_weight(self, input_shape): + + dim = input_shape[self.axis] + shape = (dim,) + + if self.scale: + self.gamma = self.add_weight( + shape=shape, + name="gamma", + initializer=self.gamma_initializer, + regularizer=self.gamma_regularizer, + constraint=self.gamma_constraint, + ) + else: + self.gamma = None + + def _add_beta_weight(self, input_shape): + + dim = input_shape[self.axis] + shape = (dim,) + + if self.center: + self.beta = self.add_weight( + shape=shape, + name="beta", + initializer=self.beta_initializer, + regularizer=self.beta_regularizer, + constraint=self.beta_constraint, + ) + else: + self.beta = None + + def _create_broadcast_shape(self, input_shape): + broadcast_shape = [1] * len(input_shape) + is_instance_norm = (input_shape[self.axis] // self.groups) == 1 + if not is_instance_norm: + broadcast_shape[self.axis] = input_shape[self.axis] // self.groups + broadcast_shape.insert(self.axis, self.groups) + else: + broadcast_shape[self.axis] = self.groups + return broadcast_shape + + +@tf.keras.utils.register_keras_serializable(package="Addons") +class InstanceNormalization(GroupNormalization): + """Instance normalization layer. + + Instance Normalization is an specific case of ```GroupNormalization```since + it normalizes all features of one channel. The Groupsize is equal to the + channel size. Empirically, its accuracy is more stable than batch norm in a + wide range of small batch sizes, if learning rate is adjusted linearly + with batch sizes. + + Arguments + axis: Integer, the axis that should be normalized. + epsilon: Small float added to variance to avoid dividing by zero. + center: If True, add offset of `beta` to normalized tensor. + If False, `beta` is ignored. + scale: If True, multiply by `gamma`. + If False, `gamma` is not used. + beta_initializer: Initializer for the beta weight. + gamma_initializer: Initializer for the gamma weight. + beta_regularizer: Optional regularizer for the beta weight. + gamma_regularizer: Optional regularizer for the gamma weight. + beta_constraint: Optional constraint for the beta weight. + gamma_constraint: Optional constraint for the gamma weight. + + Input shape + Arbitrary. Use the keyword argument `input_shape` + (tuple of integers, does not include the samples axis) + when using this layer as the first layer in a model. + + Output shape + Same shape as input. + + References + ---------- + - [Instance Normalization: The Missing Ingredient for Fast Stylization] + (https://arxiv.org/abs/1607.08022) + """ + + def __init__(self, **kwargs): + if "groups" in kwargs: + logging.warning("The given value for groups will be overwritten.") + + kwargs["groups"] = -1 + super().__init__(**kwargs) class EncoderNetwork(BaseDeepNetwork): @@ -45,7 +374,7 @@ class EncoderNetwork(BaseDeepNetwork): """ - _tags = {"python_dependencies": ["tensorflow", "tensorflow_addons"]} + _tags = {"python_dependencies": "tensorflow"} def __init__( self, @@ -84,7 +413,6 @@ def build_network(self, input_shape, **kwargs): output_layer : a keras layer """ import tensorflow as tf - import tensorflow_addons as tfa self._kernel_size = ( [5, 11, 21] if self.kernel_size is None else self.kernel_size @@ -103,7 +431,7 @@ def build_network(self, input_shape, **kwargs): strides=self.strides, )(x) - conv = tfa.layers.InstanceNormalization()(conv) + conv = InstanceNormalization()(conv) conv = tf.keras.layers.PReLU(shared_axes=[1])(conv) conv = tf.keras.layers.Dropout(self.dropout_proba)(conv) @@ -134,7 +462,7 @@ def build_network(self, input_shape, **kwargs): hidden_fc_layer = tf.keras.layers.Dense( units=self.fc_units, activation=self.activation )(attention) - hidden_fc_layer = tfa.layers.InstanceNormalization()(hidden_fc_layer) + hidden_fc_layer = InstanceNormalization()(hidden_fc_layer) # output layer before classification layer diff --git a/aeon/regression/deep_learning/_encoder.py b/aeon/regression/deep_learning/_encoder.py index cc96982cda..1ec0299acc 100644 --- a/aeon/regression/deep_learning/_encoder.py +++ b/aeon/regression/deep_learning/_encoder.py @@ -101,7 +101,7 @@ class EncoderRegressor(BaseDeepRegressor): """ _tags = { - "python_dependencies": ["tensorflow", "tensorflow_addons"], + "python_dependencies": "tensorflow", } def __init__( diff --git a/aeon/utils/networks/__init__.py b/aeon/utils/networks/__init__.py new file mode 100644 index 0000000000..e9fd066e3f --- /dev/null +++ b/aeon/utils/networks/__init__.py @@ -0,0 +1 @@ +"""Utils for tensorflow_addons.""" diff --git a/aeon/utils/networks/tensorflow_addons.py b/aeon/utils/networks/tensorflow_addons.py new file mode 100644 index 0000000000..baf738b5cb --- /dev/null +++ b/aeon/utils/networks/tensorflow_addons.py @@ -0,0 +1,62 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Types for typing functions signatures.""" + +import importlib +from typing import Callable, List, Union + +import numpy as np +import tensorflow as tf +from tensorflow.python.keras.engine import keras_tensor + +Number = Union[ + float, + int, + np.float16, + np.float32, + np.float64, + np.int8, + np.int16, + np.int32, + np.int64, + np.uint8, + np.uint16, + np.uint32, + np.uint64, +] + +Initializer = Union[None, dict, str, Callable, tf.keras.initializers.Initializer] +Regularizer = Union[None, dict, str, Callable, tf.keras.regularizers.Regularizer] +Constraint = Union[None, dict, str, Callable, tf.keras.constraints.Constraint] +Activation = Union[None, str, Callable] +if importlib.util.find_spec("tensorflow.keras.optimizers.legacy") is not None: + Optimizer = Union[ + tf.keras.optimizers.Optimizer, tf.keras.optimizers.legacy.Optimizer, str + ] +else: + Optimizer = Union[tf.keras.optimizers.Optimizer, str] + +TensorLike = Union[ + List[Union[Number, list]], + tuple, + Number, + np.ndarray, + tf.Tensor, + tf.SparseTensor, + tf.Variable, + keras_tensor.KerasTensor, +] +FloatTensorLike = Union[tf.Tensor, float, np.float16, np.float32, np.float64] +AcceptableDTypes = Union[tf.DType, np.dtype, type, int, str, None] From 535376d154bd2fa2a6d841b790d6a77ca4d8d88f Mon Sep 17 00:00:00 2001 From: hadifawaz1999 Date: Fri, 26 Apr 2024 09:06:32 +0200 Subject: [PATCH 03/31] empty commit From 36bc0671727dd4cd1ac26783a9cd3b552045ce5f Mon Sep 17 00:00:00 2001 From: hadifawaz1999 Date: Fri, 26 Apr 2024 09:09:38 +0200 Subject: [PATCH 04/31] fix doxs --- aeon/networks/_encoder.py | 22 ++++------------------ 1 file changed, 4 insertions(+), 18 deletions(-) diff --git a/aeon/networks/_encoder.py b/aeon/networks/_encoder.py index 6b67a11bf5..a991057314 100644 --- a/aeon/networks/_encoder.py +++ b/aeon/networks/_encoder.py @@ -34,7 +34,8 @@ class GroupNormalization(tf.keras.layers.Layer): to number of channels), then this operation becomes identical to Instance Normalization. - Args: + Parameters + ---------- groups: Integer, the number of groups for Group Normalization. Can be in the range [1, N] where N is the input dimension. The input dimension must be divisible by the number of groups. @@ -52,14 +53,6 @@ class GroupNormalization(tf.keras.layers.Layer): beta_constraint: Optional constraint for the beta weight. gamma_constraint: Optional constraint for the gamma weight. - Input shape: - Arbitrary. Use the keyword argument `input_shape` - (tuple of integers, does not include the samples axis) - when using this layer as the first layer in a model. - - Output shape: - Same shape as input. - Notes ----- This code was taken from the soon to be deprecated project @@ -298,7 +291,8 @@ class InstanceNormalization(GroupNormalization): wide range of small batch sizes, if learning rate is adjusted linearly with batch sizes. - Arguments + Parameters + ---------- axis: Integer, the axis that should be normalized. epsilon: Small float added to variance to avoid dividing by zero. center: If True, add offset of `beta` to normalized tensor. @@ -312,14 +306,6 @@ class InstanceNormalization(GroupNormalization): beta_constraint: Optional constraint for the beta weight. gamma_constraint: Optional constraint for the gamma weight. - Input shape - Arbitrary. Use the keyword argument `input_shape` - (tuple of integers, does not include the samples axis) - when using this layer as the first layer in a model. - - Output shape - Same shape as input. - References ---------- - [Instance Normalization: The Missing Ingredient for Fast Stylization] From 973f14ffa222a2fe8260a65fc7988f8d3ddcd52a Mon Sep 17 00:00:00 2001 From: hadifawaz1999 Date: Mon, 29 Apr 2024 17:51:38 +0200 Subject: [PATCH 05/31] fix bug --- aeon/classification/deep_learning/_encoder.py | 4 +- aeon/networks/_encoder.py | 600 +++++++++--------- 2 files changed, 308 insertions(+), 296 deletions(-) diff --git a/aeon/classification/deep_learning/_encoder.py b/aeon/classification/deep_learning/_encoder.py index bde6b24b8a..b4bcde1416 100644 --- a/aeon/classification/deep_learning/_encoder.py +++ b/aeon/classification/deep_learning/_encoder.py @@ -255,7 +255,9 @@ def _fit(self, X, y): try: self.model_ = tf.keras.models.load_model( - self.file_path + self.file_name_ + ".keras", compile=False + self.file_path + self.file_name_ + ".keras", + compile=False, + # custom_objects={"InstanceNormalization" : InstanceNormalization} ) if not self.save_best_model: os.remove(self.file_path + self.file_name_ + ".keras") diff --git a/aeon/networks/_encoder.py b/aeon/networks/_encoder.py index a991057314..edcd3d24f8 100644 --- a/aeon/networks/_encoder.py +++ b/aeon/networks/_encoder.py @@ -2,322 +2,330 @@ __maintainer__ = [] -import logging - -import tensorflow as tf -from typeguard import typechecked - from aeon.networks.base import BaseDeepNetwork from aeon.utils.networks import tensorflow_addons as types +from aeon.utils.validation._dependencies import _check_soft_dependencies +if _check_soft_dependencies("tensorflow", severity="none"): + import logging -@tf.keras.utils.register_keras_serializable(package="Addons") -class GroupNormalization(tf.keras.layers.Layer): - """Group normalization layer. - - Source: "Group Normalization" (Yuxin Wu & Kaiming He, 2018) - https://arxiv.org/abs/1803.08494 - - Group Normalization divides the channels into groups and computes - within each group the mean and variance for normalization. - Empirically, its accuracy is more stable than batch norm in a wide - range of small batch sizes, if learning rate is adjusted linearly - with batch sizes. - - Relation to Layer Normalization: - If the number of groups is set to 1, then this operation becomes identical - to Layer Normalization. - - Relation to Instance Normalization: - If the number of groups is set to the - input dimension (number of groups is equal - to number of channels), then this operation becomes - identical to Instance Normalization. - - Parameters - ---------- - groups: Integer, the number of groups for Group Normalization. - Can be in the range [1, N] where N is the input dimension. - The input dimension must be divisible by the number of groups. - Defaults to 32. - axis: Integer, the axis that should be normalized. - epsilon: Small float added to variance to avoid dividing by zero. - center: If True, add offset of `beta` to normalized tensor. - If False, `beta` is ignored. - scale: If True, multiply by `gamma`. - If False, `gamma` is not used. - beta_initializer: Initializer for the beta weight. - gamma_initializer: Initializer for the gamma weight. - beta_regularizer: Optional regularizer for the beta weight. - gamma_regularizer: Optional regularizer for the gamma weight. - beta_constraint: Optional constraint for the beta weight. - gamma_constraint: Optional constraint for the gamma weight. - - Notes - ----- - This code was taken from the soon to be deprecated project - tensorflow_addons: - https://github.com/tensorflow/addons/tree/v0.20.0 - """ - - @typechecked - def __init__( - self, - groups: int = 32, - axis: int = -1, - epsilon: float = 1e-3, - center: bool = True, - scale: bool = True, - beta_initializer: types.Initializer = "zeros", - gamma_initializer: types.Initializer = "ones", - beta_regularizer: types.Regularizer = None, - gamma_regularizer: types.Regularizer = None, - beta_constraint: types.Constraint = None, - gamma_constraint: types.Constraint = None, - **kwargs, - ): - super().__init__(**kwargs) - self.supports_masking = True - self.groups = groups - self.axis = axis - self.epsilon = epsilon - self.center = center - self.scale = scale - self.beta_initializer = tf.keras.initializers.get(beta_initializer) - self.gamma_initializer = tf.keras.initializers.get(gamma_initializer) - self.beta_regularizer = tf.keras.regularizers.get(beta_regularizer) - self.gamma_regularizer = tf.keras.regularizers.get(gamma_regularizer) - self.beta_constraint = tf.keras.constraints.get(beta_constraint) - self.gamma_constraint = tf.keras.constraints.get(gamma_constraint) - self._check_axis() - - def build(self, input_shape): - - self._check_if_input_shape_is_none(input_shape) - self._set_number_of_groups_for_instance_norm(input_shape) - self._check_size_of_dimensions(input_shape) - self._create_input_spec(input_shape) - - self._add_gamma_weight(input_shape) - self._add_beta_weight(input_shape) - self.built = True - super().build(input_shape) - - def call(self, inputs): - - input_shape = tf.keras.backend.int_shape(inputs) - tensor_input_shape = tf.shape(inputs) - - reshaped_inputs, group_shape = self._reshape_into_groups( - inputs, input_shape, tensor_input_shape - ) + import tensorflow as tf + from typeguard import typechecked - normalized_inputs = self._apply_normalization(reshaped_inputs, input_shape) - - is_instance_norm = (input_shape[self.axis] // self.groups) == 1 - if not is_instance_norm: - outputs = tf.reshape(normalized_inputs, tensor_input_shape) - else: - outputs = normalized_inputs - - return outputs - - def get_config(self): - config = { - "groups": self.groups, - "axis": self.axis, - "epsilon": self.epsilon, - "center": self.center, - "scale": self.scale, - "beta_initializer": tf.keras.initializers.serialize(self.beta_initializer), - "gamma_initializer": tf.keras.initializers.serialize( - self.gamma_initializer - ), - "beta_regularizer": tf.keras.regularizers.serialize(self.beta_regularizer), - "gamma_regularizer": tf.keras.regularizers.serialize( - self.gamma_regularizer - ), - "beta_constraint": tf.keras.constraints.serialize(self.beta_constraint), - "gamma_constraint": tf.keras.constraints.serialize(self.gamma_constraint), - } - base_config = super().get_config() - return {**base_config, **config} - - def _reshape_into_groups(self, inputs, input_shape, tensor_input_shape): - - group_shape = [tensor_input_shape[i] for i in range(len(input_shape))] - is_instance_norm = (input_shape[self.axis] // self.groups) == 1 - if not is_instance_norm: - group_shape[self.axis] = input_shape[self.axis] // self.groups - group_shape.insert(self.axis, self.groups) - group_shape = tf.stack(group_shape) - reshaped_inputs = tf.reshape(inputs, group_shape) - return reshaped_inputs, group_shape - else: - return inputs, group_shape - - def _apply_normalization(self, reshaped_inputs, input_shape): - - group_shape = tf.keras.backend.int_shape(reshaped_inputs) - group_reduction_axes = list(range(1, len(group_shape))) - is_instance_norm = (input_shape[self.axis] // self.groups) == 1 - if not is_instance_norm: - axis = -2 if self.axis == -1 else self.axis - 1 - else: - axis = -1 if self.axis == -1 else self.axis - 1 - group_reduction_axes.pop(axis) - - mean, variance = tf.nn.moments( - reshaped_inputs, group_reduction_axes, keepdims=True - ) + @tf.keras.utils.register_keras_serializable(package="Addons") + class GroupNormalization(tf.keras.layers.Layer): + """Group normalization layer. - gamma, beta = self._get_reshaped_weights(input_shape) - normalized_inputs = tf.nn.batch_normalization( - reshaped_inputs, - mean=mean, - variance=variance, - scale=gamma, - offset=beta, - variance_epsilon=self.epsilon, - ) - return normalized_inputs - - def _get_reshaped_weights(self, input_shape): - broadcast_shape = self._create_broadcast_shape(input_shape) - gamma = None - beta = None - if self.scale: - gamma = tf.reshape(self.gamma, broadcast_shape) - - if self.center: - beta = tf.reshape(self.beta, broadcast_shape) - return gamma, beta - - def _check_if_input_shape_is_none(self, input_shape): - dim = input_shape[self.axis] - if dim is None: - raise ValueError( - "Axis " + str(self.axis) + " of " - "input tensor should have a defined dimension " - "but the layer received an input with shape " + str(input_shape) + "." - ) + Source: "Group Normalization" (Yuxin Wu & Kaiming He, 2018) + https://arxiv.org/abs/1803.08494 - def _set_number_of_groups_for_instance_norm(self, input_shape): - dim = input_shape[self.axis] + Group Normalization divides the channels into groups and computes + within each group the mean and variance for normalization. + Empirically, its accuracy is more stable than batch norm in a wide + range of small batch sizes, if learning rate is adjusted linearly + with batch sizes. - if self.groups == -1: - self.groups = dim + Relation to Layer Normalization: + If the number of groups is set to 1, then this operation becomes identical + to Layer Normalization. - def _check_size_of_dimensions(self, input_shape): + Relation to Instance Normalization: + If the number of groups is set to the + input dimension (number of groups is equal + to number of channels), then this operation becomes + identical to Instance Normalization. - dim = input_shape[self.axis] - if dim < self.groups: - raise ValueError( - "Number of groups (" + str(self.groups) + ") cannot be " - "more than the number of channels (" + str(dim) + ")." - ) + Parameters + ---------- + groups: Integer, the number of groups for Group Normalization. + Can be in the range [1, N] where N is the input dimension. + The input dimension must be divisible by the number of groups. + Defaults to 32. + axis: Integer, the axis that should be normalized. + epsilon: Small float added to variance to avoid dividing by zero. + center: If True, add offset of `beta` to normalized tensor. + If False, `beta` is ignored. + scale: If True, multiply by `gamma`. + If False, `gamma` is not used. + beta_initializer: Initializer for the beta weight. + gamma_initializer: Initializer for the gamma weight. + beta_regularizer: Optional regularizer for the beta weight. + gamma_regularizer: Optional regularizer for the gamma weight. + beta_constraint: Optional constraint for the beta weight. + gamma_constraint: Optional constraint for the gamma weight. + + Notes + ----- + This code was taken from the soon to be deprecated project + tensorflow_addons: + https://github.com/tensorflow/addons/tree/v0.20.0 + """ - if dim % self.groups != 0: - raise ValueError( - "Number of groups (" + str(self.groups) + ") must be a " - "multiple of the number of channels (" + str(dim) + ")." + @typechecked + def __init__( + self, + groups: int = 32, + axis: int = -1, + epsilon: float = 1e-3, + center: bool = True, + scale: bool = True, + beta_initializer: types.Initializer = "zeros", + gamma_initializer: types.Initializer = "ones", + beta_regularizer: types.Regularizer = None, + gamma_regularizer: types.Regularizer = None, + beta_constraint: types.Constraint = None, + gamma_constraint: types.Constraint = None, + **kwargs, + ): + super().__init__(**kwargs) + self.supports_masking = True + self.groups = groups + self.axis = axis + self.epsilon = epsilon + self.center = center + self.scale = scale + self.beta_initializer = tf.keras.initializers.get(beta_initializer) + self.gamma_initializer = tf.keras.initializers.get(gamma_initializer) + self.beta_regularizer = tf.keras.regularizers.get(beta_regularizer) + self.gamma_regularizer = tf.keras.regularizers.get(gamma_regularizer) + self.beta_constraint = tf.keras.constraints.get(beta_constraint) + self.gamma_constraint = tf.keras.constraints.get(gamma_constraint) + self._check_axis() + + def build(self, input_shape): + + self._check_if_input_shape_is_none(input_shape) + self._set_number_of_groups_for_instance_norm(input_shape) + self._check_size_of_dimensions(input_shape) + self._create_input_spec(input_shape) + + self._add_gamma_weight(input_shape) + self._add_beta_weight(input_shape) + self.built = True + super().build(input_shape) + + def call(self, inputs): + + input_shape = tf.keras.backend.int_shape(inputs) + tensor_input_shape = tf.shape(inputs) + + reshaped_inputs, group_shape = self._reshape_into_groups( + inputs, input_shape, tensor_input_shape ) - def _check_axis(self): - - if self.axis == 0: - raise ValueError( - "You are trying to normalize your batch axis. Do you want to " - "use tf.layer.batch_normalization instead" + normalized_inputs = self._apply_normalization(reshaped_inputs, input_shape) + + is_instance_norm = (input_shape[self.axis] // self.groups) == 1 + if not is_instance_norm: + outputs = tf.reshape(normalized_inputs, tensor_input_shape) + else: + outputs = normalized_inputs + + return outputs + + def get_config(self): + config = { + "groups": self.groups, + "axis": self.axis, + "epsilon": self.epsilon, + "center": self.center, + "scale": self.scale, + "beta_initializer": tf.keras.initializers.serialize( + self.beta_initializer + ), + "gamma_initializer": tf.keras.initializers.serialize( + self.gamma_initializer + ), + "beta_regularizer": tf.keras.regularizers.serialize( + self.beta_regularizer + ), + "gamma_regularizer": tf.keras.regularizers.serialize( + self.gamma_regularizer + ), + "beta_constraint": tf.keras.constraints.serialize(self.beta_constraint), + "gamma_constraint": tf.keras.constraints.serialize( + self.gamma_constraint + ), + } + base_config = super().get_config() + return {**base_config, **config} + + def _reshape_into_groups(self, inputs, input_shape, tensor_input_shape): + + group_shape = [tensor_input_shape[i] for i in range(len(input_shape))] + is_instance_norm = (input_shape[self.axis] // self.groups) == 1 + if not is_instance_norm: + group_shape[self.axis] = input_shape[self.axis] // self.groups + group_shape.insert(self.axis, self.groups) + group_shape = tf.stack(group_shape) + reshaped_inputs = tf.reshape(inputs, group_shape) + return reshaped_inputs, group_shape + else: + return inputs, group_shape + + def _apply_normalization(self, reshaped_inputs, input_shape): + + group_shape = tf.keras.backend.int_shape(reshaped_inputs) + group_reduction_axes = list(range(1, len(group_shape))) + is_instance_norm = (input_shape[self.axis] // self.groups) == 1 + if not is_instance_norm: + axis = -2 if self.axis == -1 else self.axis - 1 + else: + axis = -1 if self.axis == -1 else self.axis - 1 + group_reduction_axes.pop(axis) + + mean, variance = tf.nn.moments( + reshaped_inputs, group_reduction_axes, keepdims=True ) - def _create_input_spec(self, input_shape): - - dim = input_shape[self.axis] - self.input_spec = tf.keras.layers.InputSpec( - ndim=len(input_shape), axes={self.axis: dim} - ) - - def _add_gamma_weight(self, input_shape): - - dim = input_shape[self.axis] - shape = (dim,) - - if self.scale: - self.gamma = self.add_weight( - shape=shape, - name="gamma", - initializer=self.gamma_initializer, - regularizer=self.gamma_regularizer, - constraint=self.gamma_constraint, + gamma, beta = self._get_reshaped_weights(input_shape) + normalized_inputs = tf.nn.batch_normalization( + reshaped_inputs, + mean=mean, + variance=variance, + scale=gamma, + offset=beta, + variance_epsilon=self.epsilon, ) - else: - self.gamma = None - - def _add_beta_weight(self, input_shape): - - dim = input_shape[self.axis] - shape = (dim,) - - if self.center: - self.beta = self.add_weight( - shape=shape, - name="beta", - initializer=self.beta_initializer, - regularizer=self.beta_regularizer, - constraint=self.beta_constraint, + return normalized_inputs + + def _get_reshaped_weights(self, input_shape): + broadcast_shape = self._create_broadcast_shape(input_shape) + gamma = None + beta = None + if self.scale: + gamma = tf.reshape(self.gamma, broadcast_shape) + + if self.center: + beta = tf.reshape(self.beta, broadcast_shape) + return gamma, beta + + def _check_if_input_shape_is_none(self, input_shape): + dim = input_shape[self.axis] + if dim is None: + raise ValueError( + "Axis " + str(self.axis) + " of " + "input tensor should have a defined dimension " + "but the layer received an input with shape " + + str(input_shape) + + "." + ) + + def _set_number_of_groups_for_instance_norm(self, input_shape): + dim = input_shape[self.axis] + + if self.groups == -1: + self.groups = dim + + def _check_size_of_dimensions(self, input_shape): + + dim = input_shape[self.axis] + if dim < self.groups: + raise ValueError( + "Number of groups (" + str(self.groups) + ") cannot be " + "more than the number of channels (" + str(dim) + ")." + ) + + if dim % self.groups != 0: + raise ValueError( + "Number of groups (" + str(self.groups) + ") must be a " + "multiple of the number of channels (" + str(dim) + ")." + ) + + def _check_axis(self): + + if self.axis == 0: + raise ValueError( + "You are trying to normalize your batch axis. Do you want to " + "use tf.layer.batch_normalization instead" + ) + + def _create_input_spec(self, input_shape): + + dim = input_shape[self.axis] + self.input_spec = tf.keras.layers.InputSpec( + ndim=len(input_shape), axes={self.axis: dim} ) - else: - self.beta = None - - def _create_broadcast_shape(self, input_shape): - broadcast_shape = [1] * len(input_shape) - is_instance_norm = (input_shape[self.axis] // self.groups) == 1 - if not is_instance_norm: - broadcast_shape[self.axis] = input_shape[self.axis] // self.groups - broadcast_shape.insert(self.axis, self.groups) - else: - broadcast_shape[self.axis] = self.groups - return broadcast_shape - - -@tf.keras.utils.register_keras_serializable(package="Addons") -class InstanceNormalization(GroupNormalization): - """Instance normalization layer. - - Instance Normalization is an specific case of ```GroupNormalization```since - it normalizes all features of one channel. The Groupsize is equal to the - channel size. Empirically, its accuracy is more stable than batch norm in a - wide range of small batch sizes, if learning rate is adjusted linearly - with batch sizes. - Parameters - ---------- - axis: Integer, the axis that should be normalized. - epsilon: Small float added to variance to avoid dividing by zero. - center: If True, add offset of `beta` to normalized tensor. - If False, `beta` is ignored. - scale: If True, multiply by `gamma`. - If False, `gamma` is not used. - beta_initializer: Initializer for the beta weight. - gamma_initializer: Initializer for the gamma weight. - beta_regularizer: Optional regularizer for the beta weight. - gamma_regularizer: Optional regularizer for the gamma weight. - beta_constraint: Optional constraint for the beta weight. - gamma_constraint: Optional constraint for the gamma weight. + def _add_gamma_weight(self, input_shape): + + dim = input_shape[self.axis] + shape = (dim,) + + if self.scale: + self.gamma = self.add_weight( + shape=shape, + name="gamma", + initializer=self.gamma_initializer, + regularizer=self.gamma_regularizer, + constraint=self.gamma_constraint, + ) + else: + self.gamma = None + + def _add_beta_weight(self, input_shape): + + dim = input_shape[self.axis] + shape = (dim,) + + if self.center: + self.beta = self.add_weight( + shape=shape, + name="beta", + initializer=self.beta_initializer, + regularizer=self.beta_regularizer, + constraint=self.beta_constraint, + ) + else: + self.beta = None + + def _create_broadcast_shape(self, input_shape): + broadcast_shape = [1] * len(input_shape) + is_instance_norm = (input_shape[self.axis] // self.groups) == 1 + if not is_instance_norm: + broadcast_shape[self.axis] = input_shape[self.axis] // self.groups + broadcast_shape.insert(self.axis, self.groups) + else: + broadcast_shape[self.axis] = self.groups + return broadcast_shape + + @tf.keras.utils.register_keras_serializable(package="Addons") + class InstanceNormalization(GroupNormalization): + """Instance normalization layer. + + Instance Normalization is an specific case of ```GroupNormalization```since + it normalizes all features of one channel. The Groupsize is equal to the + channel size. Empirically, its accuracy is more stable than batch norm in a + wide range of small batch sizes, if learning rate is adjusted linearly + with batch sizes. - References - ---------- - - [Instance Normalization: The Missing Ingredient for Fast Stylization] - (https://arxiv.org/abs/1607.08022) - """ + Parameters + ---------- + axis: Integer, the axis that should be normalized. + epsilon: Small float added to variance to avoid dividing by zero. + center: If True, add offset of `beta` to normalized tensor. + If False, `beta` is ignored. + scale: If True, multiply by `gamma`. + If False, `gamma` is not used. + beta_initializer: Initializer for the beta weight. + gamma_initializer: Initializer for the gamma weight. + beta_regularizer: Optional regularizer for the beta weight. + gamma_regularizer: Optional regularizer for the gamma weight. + beta_constraint: Optional constraint for the beta weight. + gamma_constraint: Optional constraint for the gamma weight. + + References + ---------- + - [Instance Normalization: The Missing Ingredient for Fast Stylization] + (https://arxiv.org/abs/1607.08022) + """ - def __init__(self, **kwargs): - if "groups" in kwargs: - logging.warning("The given value for groups will be overwritten.") + def __init__(self, **kwargs): + if "groups" in kwargs: + logging.warning("The given value for groups will be overwritten.") - kwargs["groups"] = -1 - super().__init__(**kwargs) + kwargs["groups"] = -1 + super().__init__(**kwargs) class EncoderNetwork(BaseDeepNetwork): @@ -400,6 +408,8 @@ def build_network(self, input_shape, **kwargs): """ import tensorflow as tf + tf.keras.config.enable_unsafe_deserialization() + self._kernel_size = ( [5, 11, 21] if self.kernel_size is None else self.kernel_size ) From b67eda7ea0948b809686886c2af3453a2fe9cb9a Mon Sep 17 00:00:00 2001 From: hadifawaz1999 Date: Mon, 29 Apr 2024 17:52:42 +0200 Subject: [PATCH 06/31] add maintainer --- aeon/classification/deep_learning/_encoder.py | 2 +- aeon/networks/_encoder.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/aeon/classification/deep_learning/_encoder.py b/aeon/classification/deep_learning/_encoder.py index b4bcde1416..bf7fa71987 100644 --- a/aeon/classification/deep_learning/_encoder.py +++ b/aeon/classification/deep_learning/_encoder.py @@ -1,6 +1,6 @@ """Encoder Classifier.""" -__maintainer__ = [] +__maintainer__ = ["hadifawaz1999"] __all__ = ["EncoderClassifier"] import gc diff --git a/aeon/networks/_encoder.py b/aeon/networks/_encoder.py index edcd3d24f8..2462f4246d 100644 --- a/aeon/networks/_encoder.py +++ b/aeon/networks/_encoder.py @@ -1,6 +1,6 @@ """Encoder Classifier.""" -__maintainer__ = [] +__maintainer__ = ["hadifawaz1999"] from aeon.networks.base import BaseDeepNetwork from aeon.utils.networks import tensorflow_addons as types From a1c78e6994a6ccbbb8b709891a010b564ea33389 Mon Sep 17 00:00:00 2001 From: hadifawaz1999 Date: Mon, 29 Apr 2024 17:57:55 +0200 Subject: [PATCH 07/31] re arrange --- aeon/networks/_encoder.py | 279 +++++++++++++++++++------------------- 1 file changed, 140 insertions(+), 139 deletions(-) diff --git a/aeon/networks/_encoder.py b/aeon/networks/_encoder.py index 2462f4246d..4733394d18 100644 --- a/aeon/networks/_encoder.py +++ b/aeon/networks/_encoder.py @@ -6,6 +6,146 @@ from aeon.utils.networks import tensorflow_addons as types from aeon.utils.validation._dependencies import _check_soft_dependencies + +class EncoderNetwork(BaseDeepNetwork): + """Establish the network structure for an Encoder. + + Adapted from the implementation used in [1] + + Parameters + ---------- + kernel_size : array of int, default = [5, 11, 21] + Specifies the length of the 1D convolution windows. + n_filters : array of int, default = [128, 256, 512] + Specifying the number of 1D convolution filters used for each layer, + the shape of this array should be the same as kernel_size. + max_pool_size : int, default = 2 + Size of the max pooling windows. + activation : string, default = sigmoid + Keras activation function. + dropout_proba : float, default = 0.2 + specifying the dropout layer probability. + padding : string, default = "same" + Specifying the type of padding used for the 1D convolution. + strides : int, default = 1 + Specifying the sliding rate of the 1D convolution filter. + fc_units : int, default = 256 + Specifying the number of units in the hiddent fully connected layer used in + the EncoderNetwork. + + Notes + ----- + Adapted from source code + https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/encoder.py + + References + ---------- + .. [1] SerrĂ  et al. Towards a Universal Neural Network Encoder for Time Series + In proceedings International Conference of the Catalan Association + for Artificial Intelligence, 120--129 2018. + + + """ + + _tags = {"python_dependencies": "tensorflow"} + + def __init__( + self, + kernel_size=None, + n_filters=None, + dropout_proba=0.2, + max_pool_size=2, + activation="sigmoid", + padding="same", + strides=1, + fc_units=256, + ): + self.kernel_size = kernel_size + self.n_filters = n_filters + self.padding = padding + self.strides = strides + self.max_pool_size = max_pool_size + self.activation = activation + self.dropout_proba = dropout_proba + self.fc_units = fc_units + + super().__init__() + + def build_network(self, input_shape, **kwargs): + """ + Construct a network and return its input and output layers. + + Parameters + ---------- + input_shape : tuple + The shape of the data fed into the input layer. + + Returns + ------- + input_layer : a keras layer + output_layer : a keras layer + """ + import tensorflow as tf + + tf.keras.config.enable_unsafe_deserialization() + + self._kernel_size = ( + [5, 11, 21] if self.kernel_size is None else self.kernel_size + ) + self._n_filters = [128, 256, 512] if self.n_filters is None else self.n_filters + + input_layer = tf.keras.layers.Input(input_shape) + + x = input_layer + + for i in range(len(self._kernel_size)): + conv = tf.keras.layers.Conv1D( + filters=self._n_filters[i], + kernel_size=self._kernel_size[i], + padding=self.padding, + strides=self.strides, + )(x) + + conv = InstanceNormalization()(conv) + conv = tf.keras.layers.PReLU(shared_axes=[1])(conv) + conv = tf.keras.layers.Dropout(self.dropout_proba)(conv) + + if i < len(self._kernel_size) - 1: + conv = tf.keras.layers.MaxPool1D(pool_size=self.max_pool_size)(conv) + + x = conv + + # split attention + + split_index = self._n_filters[-1] // 2 + + attention_multiplier_1 = tf.keras.layers.Softmax()( + tf.keras.layers.Lambda(lambda x: x[:, :, :split_index])(conv) + ) + attention_multiplier_2 = tf.keras.layers.Lambda( + lambda x: x[:, :, split_index:] + )(conv) + + # attention mechanism + + attention = tf.keras.layers.Multiply()( + [attention_multiplier_1, attention_multiplier_2] + ) + + # add fully connected hidden layer + + hidden_fc_layer = tf.keras.layers.Dense( + units=self.fc_units, activation=self.activation + )(attention) + hidden_fc_layer = InstanceNormalization()(hidden_fc_layer) + + # output layer before classification layer + + flatten_layer = tf.keras.layers.Flatten()(hidden_fc_layer) + + return input_layer, flatten_layer + + if _check_soft_dependencies("tensorflow", severity="none"): import logging @@ -326,142 +466,3 @@ def __init__(self, **kwargs): kwargs["groups"] = -1 super().__init__(**kwargs) - - -class EncoderNetwork(BaseDeepNetwork): - """Establish the network structure for an Encoder. - - Adapted from the implementation used in [1] - - Parameters - ---------- - kernel_size : array of int, default = [5, 11, 21] - Specifies the length of the 1D convolution windows. - n_filters : array of int, default = [128, 256, 512] - Specifying the number of 1D convolution filters used for each layer, - the shape of this array should be the same as kernel_size. - max_pool_size : int, default = 2 - Size of the max pooling windows. - activation : string, default = sigmoid - Keras activation function. - dropout_proba : float, default = 0.2 - specifying the dropout layer probability. - padding : string, default = "same" - Specifying the type of padding used for the 1D convolution. - strides : int, default = 1 - Specifying the sliding rate of the 1D convolution filter. - fc_units : int, default = 256 - Specifying the number of units in the hiddent fully connected layer used in - the EncoderNetwork. - - Notes - ----- - Adapted from source code - https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/encoder.py - - References - ---------- - .. [1] SerrĂ  et al. Towards a Universal Neural Network Encoder for Time Series - In proceedings International Conference of the Catalan Association - for Artificial Intelligence, 120--129 2018. - - - """ - - _tags = {"python_dependencies": "tensorflow"} - - def __init__( - self, - kernel_size=None, - n_filters=None, - dropout_proba=0.2, - max_pool_size=2, - activation="sigmoid", - padding="same", - strides=1, - fc_units=256, - ): - self.kernel_size = kernel_size - self.n_filters = n_filters - self.padding = padding - self.strides = strides - self.max_pool_size = max_pool_size - self.activation = activation - self.dropout_proba = dropout_proba - self.fc_units = fc_units - - super().__init__() - - def build_network(self, input_shape, **kwargs): - """ - Construct a network and return its input and output layers. - - Parameters - ---------- - input_shape : tuple - The shape of the data fed into the input layer. - - Returns - ------- - input_layer : a keras layer - output_layer : a keras layer - """ - import tensorflow as tf - - tf.keras.config.enable_unsafe_deserialization() - - self._kernel_size = ( - [5, 11, 21] if self.kernel_size is None else self.kernel_size - ) - self._n_filters = [128, 256, 512] if self.n_filters is None else self.n_filters - - input_layer = tf.keras.layers.Input(input_shape) - - x = input_layer - - for i in range(len(self._kernel_size)): - conv = tf.keras.layers.Conv1D( - filters=self._n_filters[i], - kernel_size=self._kernel_size[i], - padding=self.padding, - strides=self.strides, - )(x) - - conv = InstanceNormalization()(conv) - conv = tf.keras.layers.PReLU(shared_axes=[1])(conv) - conv = tf.keras.layers.Dropout(self.dropout_proba)(conv) - - if i < len(self._kernel_size) - 1: - conv = tf.keras.layers.MaxPool1D(pool_size=self.max_pool_size)(conv) - - x = conv - - # split attention - - split_index = self._n_filters[-1] // 2 - - attention_multiplier_1 = tf.keras.layers.Softmax()( - tf.keras.layers.Lambda(lambda x: x[:, :, :split_index])(conv) - ) - attention_multiplier_2 = tf.keras.layers.Lambda( - lambda x: x[:, :, split_index:] - )(conv) - - # attention mechanism - - attention = tf.keras.layers.Multiply()( - [attention_multiplier_1, attention_multiplier_2] - ) - - # add fully connected hidden layer - - hidden_fc_layer = tf.keras.layers.Dense( - units=self.fc_units, activation=self.activation - )(attention) - hidden_fc_layer = InstanceNormalization()(hidden_fc_layer) - - # output layer before classification layer - - flatten_layer = tf.keras.layers.Flatten()(hidden_fc_layer) - - return input_layer, flatten_layer From c29cc5d75ee70ecdf131ba2966c5b08550423d1e Mon Sep 17 00:00:00 2001 From: hadifawaz1999 Date: Mon, 29 Apr 2024 18:03:28 +0200 Subject: [PATCH 08/31] dep on utils --- aeon/utils/networks/tensorflow_addons.py | 86 +++++++++++++----------- 1 file changed, 45 insertions(+), 41 deletions(-) diff --git a/aeon/utils/networks/tensorflow_addons.py b/aeon/utils/networks/tensorflow_addons.py index baf738b5cb..152cbf5236 100644 --- a/aeon/utils/networks/tensorflow_addons.py +++ b/aeon/utils/networks/tensorflow_addons.py @@ -14,49 +14,53 @@ # ============================================================================== """Types for typing functions signatures.""" -import importlib -from typing import Callable, List, Union +from aeon.utils.validation._dependencies import _check_soft_dependencies -import numpy as np -import tensorflow as tf -from tensorflow.python.keras.engine import keras_tensor +if _check_soft_dependencies("tensorflow", severity="none"): -Number = Union[ - float, - int, - np.float16, - np.float32, - np.float64, - np.int8, - np.int16, - np.int32, - np.int64, - np.uint8, - np.uint16, - np.uint32, - np.uint64, -] + import importlib + from typing import Callable, List, Union -Initializer = Union[None, dict, str, Callable, tf.keras.initializers.Initializer] -Regularizer = Union[None, dict, str, Callable, tf.keras.regularizers.Regularizer] -Constraint = Union[None, dict, str, Callable, tf.keras.constraints.Constraint] -Activation = Union[None, str, Callable] -if importlib.util.find_spec("tensorflow.keras.optimizers.legacy") is not None: - Optimizer = Union[ - tf.keras.optimizers.Optimizer, tf.keras.optimizers.legacy.Optimizer, str + import numpy as np + import tensorflow as tf + from tensorflow.python.keras.engine import keras_tensor + + Number = Union[ + float, + int, + np.float16, + np.float32, + np.float64, + np.int8, + np.int16, + np.int32, + np.int64, + np.uint8, + np.uint16, + np.uint32, + np.uint64, ] -else: - Optimizer = Union[tf.keras.optimizers.Optimizer, str] -TensorLike = Union[ - List[Union[Number, list]], - tuple, - Number, - np.ndarray, - tf.Tensor, - tf.SparseTensor, - tf.Variable, - keras_tensor.KerasTensor, -] -FloatTensorLike = Union[tf.Tensor, float, np.float16, np.float32, np.float64] -AcceptableDTypes = Union[tf.DType, np.dtype, type, int, str, None] + Initializer = Union[None, dict, str, Callable, tf.keras.initializers.Initializer] + Regularizer = Union[None, dict, str, Callable, tf.keras.regularizers.Regularizer] + Constraint = Union[None, dict, str, Callable, tf.keras.constraints.Constraint] + Activation = Union[None, str, Callable] + if importlib.util.find_spec("tensorflow.keras.optimizers.legacy") is not None: + Optimizer = Union[ + tf.keras.optimizers.Optimizer, tf.keras.optimizers.legacy.Optimizer, str + ] + else: + Optimizer = Union[tf.keras.optimizers.Optimizer, str] + + TensorLike = Union[ + List[Union[Number, list]], + tuple, + Number, + np.ndarray, + tf.Tensor, + tf.SparseTensor, + tf.Variable, + keras_tensor.KerasTensor, + ] + FloatTensorLike = Union[tf.Tensor, float, np.float16, np.float32, np.float64] + AcceptableDTypes = Union[tf.DType, np.dtype, type, int, str, None] From 17c40aa35f7b830797eeaa4b36f8b34104f846e6 Mon Sep 17 00:00:00 2001 From: hadifawaz1999 Date: Mon, 29 Apr 2024 18:42:14 +0200 Subject: [PATCH 09/31] fix comments --- aeon/classification/deep_learning/_encoder.py | 1 - aeon/utils/networks/tensorflow_addons.py | 16 ++-------------- pyproject.toml | 2 -- 3 files changed, 2 insertions(+), 17 deletions(-) diff --git a/aeon/classification/deep_learning/_encoder.py b/aeon/classification/deep_learning/_encoder.py index bf7fa71987..5d78e8f21a 100644 --- a/aeon/classification/deep_learning/_encoder.py +++ b/aeon/classification/deep_learning/_encoder.py @@ -257,7 +257,6 @@ def _fit(self, X, y): self.model_ = tf.keras.models.load_model( self.file_path + self.file_name_ + ".keras", compile=False, - # custom_objects={"InstanceNormalization" : InstanceNormalization} ) if not self.save_best_model: os.remove(self.file_path + self.file_name_ + ".keras") diff --git a/aeon/utils/networks/tensorflow_addons.py b/aeon/utils/networks/tensorflow_addons.py index 152cbf5236..8102873599 100644 --- a/aeon/utils/networks/tensorflow_addons.py +++ b/aeon/utils/networks/tensorflow_addons.py @@ -1,19 +1,7 @@ -# Copyright 2019 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== """Types for typing functions signatures.""" +# The following was taken from the tensorflow_addons deprecated package + from aeon.utils.validation._dependencies import _check_soft_dependencies if _check_soft_dependencies("tensorflow", severity="none"): diff --git a/pyproject.toml b/pyproject.toml index 803c677b8b..da1db1511c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -79,7 +79,6 @@ all_extras = [ "stumpy>=1.5.1", "tbats>=1.1.0", "tensorflow>=2.12; python_version >= '3.9'", - "tensorflow-addons; python_version >= '3.9'", "torch>=1.13.1", "tsfresh>=0.20.0", "tslearn>=0.5.2", @@ -93,7 +92,6 @@ all_extras = [ dl = [ "keras-self-attention", "tensorflow>=2.12; python_version >= '3.9'", - "tensorflow-addons; python_version >= '3.9'", ] unstable_extras = [ "mrsqm>=0.0.1,<0.1.0 ; platform_system == 'Darwin'", # requires gcc and fftw to be installed for Windows and some other OS (see http://www.fftw.org/index.html) From f23abc098c7f7cb92a67b0175007d6b3280d7644 Mon Sep 17 00:00:00 2001 From: hadifawaz1999 Date: Mon, 29 Apr 2024 19:54:57 +0200 Subject: [PATCH 10/31] add version and file to header --- aeon/utils/networks/tensorflow_addons.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/aeon/utils/networks/tensorflow_addons.py b/aeon/utils/networks/tensorflow_addons.py index 8102873599..efced7aa73 100644 --- a/aeon/utils/networks/tensorflow_addons.py +++ b/aeon/utils/networks/tensorflow_addons.py @@ -1,6 +1,11 @@ -"""Types for typing functions signatures.""" +"""Types for typing functions signatures. -# The following was taken from the tensorflow_addons deprecated package +The following was taken from the tensorflow_addons deprecated package. + +package: https://www.tensorflow.org/addons +version: 0.23.0 +file: https://github.com/tensorflow/addons/blob/master/tensorflow_addons/utils/types.py +""" from aeon.utils.validation._dependencies import _check_soft_dependencies From d5dbedb92d90bb6e6756331f7b0b88957d7abd99 Mon Sep 17 00:00:00 2001 From: hadifawaz1999 Date: Mon, 29 Apr 2024 19:56:04 +0200 Subject: [PATCH 11/31] re add conflict --- pyproject.toml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index da1db1511c..71cf806a9b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -78,7 +78,8 @@ all_extras = [ "statsmodels>=0.12.1", "stumpy>=1.5.1", "tbats>=1.1.0", - "tensorflow>=2.12; python_version >= '3.9'", + "tensorflow>=2.12; python_version < '3.12' and python_version >= '3.9'", + "tensorflow-addons; python_version < '3.12' and python_version >= '3.9'", "torch>=1.13.1", "tsfresh>=0.20.0", "tslearn>=0.5.2", @@ -91,7 +92,8 @@ all_extras = [ ] dl = [ "keras-self-attention", - "tensorflow>=2.12; python_version >= '3.9'", + "tensorflow>=2.12; python_version < '3.12' and python_version >= '3.9'", + "tensorflow-addons; python_version < '3.12' and python_version >= '3.9'", ] unstable_extras = [ "mrsqm>=0.0.1,<0.1.0 ; platform_system == 'Darwin'", # requires gcc and fftw to be installed for Windows and some other OS (see http://www.fftw.org/index.html) From fc9af0159faff873a0ffab0abbb74bdf79f854cd Mon Sep 17 00:00:00 2001 From: hadifawaz1999 Date: Mon, 29 Apr 2024 19:56:55 +0200 Subject: [PATCH 12/31] remove addon --- pyproject.toml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 71cf806a9b..da1db1511c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -78,8 +78,7 @@ all_extras = [ "statsmodels>=0.12.1", "stumpy>=1.5.1", "tbats>=1.1.0", - "tensorflow>=2.12; python_version < '3.12' and python_version >= '3.9'", - "tensorflow-addons; python_version < '3.12' and python_version >= '3.9'", + "tensorflow>=2.12; python_version >= '3.9'", "torch>=1.13.1", "tsfresh>=0.20.0", "tslearn>=0.5.2", @@ -92,8 +91,7 @@ all_extras = [ ] dl = [ "keras-self-attention", - "tensorflow>=2.12; python_version < '3.12' and python_version >= '3.9'", - "tensorflow-addons; python_version < '3.12' and python_version >= '3.9'", + "tensorflow>=2.12; python_version >= '3.9'", ] unstable_extras = [ "mrsqm>=0.0.1,<0.1.0 ; platform_system == 'Darwin'", # requires gcc and fftw to be installed for Windows and some other OS (see http://www.fftw.org/index.html) From bb83016047aacf3651bda0c8553256bef58db915 Mon Sep 17 00:00:00 2001 From: hadifawaz1999 Date: Mon, 29 Apr 2024 19:57:58 +0200 Subject: [PATCH 13/31] only adodn remove --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index da1db1511c..0b53038fb8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -78,7 +78,7 @@ all_extras = [ "statsmodels>=0.12.1", "stumpy>=1.5.1", "tbats>=1.1.0", - "tensorflow>=2.12; python_version >= '3.9'", + "tensorflow>=2.12; python_version < '3.12' and python_version >= '3.9'", "torch>=1.13.1", "tsfresh>=0.20.0", "tslearn>=0.5.2", @@ -91,7 +91,7 @@ all_extras = [ ] dl = [ "keras-self-attention", - "tensorflow>=2.12; python_version >= '3.9'", + "tensorflow>=2.12; python_version < '3.12' and python_version >= '3.9'", ] unstable_extras = [ "mrsqm>=0.0.1,<0.1.0 ; platform_system == 'Darwin'", # requires gcc and fftw to be installed for Windows and some other OS (see http://www.fftw.org/index.html) From c394d4a0942458b6a82114de163855597ded9d41 Mon Sep 17 00:00:00 2001 From: hadifawaz1999 Date: Tue, 30 Apr 2024 14:17:01 +0200 Subject: [PATCH 14/31] add typeguard dep --- aeon/classification/deep_learning/_encoder.py | 2 +- aeon/networks/_encoder.py | 4 ++-- aeon/regression/deep_learning/_encoder.py | 2 +- pyproject.toml | 2 ++ 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/aeon/classification/deep_learning/_encoder.py b/aeon/classification/deep_learning/_encoder.py index 5d78e8f21a..5d6b936270 100644 --- a/aeon/classification/deep_learning/_encoder.py +++ b/aeon/classification/deep_learning/_encoder.py @@ -83,7 +83,7 @@ class EncoderClassifier(BaseDeepClassifier): """ _tags = { - "python_dependencies": "tensorflow", + "python_dependencies": ["tensorflow", "typeguard"], } def __init__( diff --git a/aeon/networks/_encoder.py b/aeon/networks/_encoder.py index 4733394d18..33959367fb 100644 --- a/aeon/networks/_encoder.py +++ b/aeon/networks/_encoder.py @@ -47,7 +47,7 @@ class EncoderNetwork(BaseDeepNetwork): """ - _tags = {"python_dependencies": "tensorflow"} + _tags = {"python_dependencies": ["tensorflow", "typeguard"]} def __init__( self, @@ -146,7 +146,7 @@ def build_network(self, input_shape, **kwargs): return input_layer, flatten_layer -if _check_soft_dependencies("tensorflow", severity="none"): +if _check_soft_dependencies(["tensorflow", "typeguard"], severity="none"): import logging import tensorflow as tf diff --git a/aeon/regression/deep_learning/_encoder.py b/aeon/regression/deep_learning/_encoder.py index 1ec0299acc..c7088c66dd 100644 --- a/aeon/regression/deep_learning/_encoder.py +++ b/aeon/regression/deep_learning/_encoder.py @@ -101,7 +101,7 @@ class EncoderRegressor(BaseDeepRegressor): """ _tags = { - "python_dependencies": "tensorflow", + "python_dependencies": ["tensorflow", "typeguard"], } def __init__( diff --git a/pyproject.toml b/pyproject.toml index 1d060f87f4..de227511aa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -81,6 +81,7 @@ all_extras = [ "torch>=1.13.1", "tsfresh>=0.20.0", "tslearn>=0.5.2", + "typeguard", "xarray", # for some reason, a heirarchical dask test fails on some Python versions with the @@ -92,6 +93,7 @@ all_extras = [ dl = [ "keras-self-attention", "tensorflow>=2.12; python_version >= '3.9'", + "typeguard", ] unstable_extras = [ "mrsqm>=0.0.1,<0.1.0 ; platform_system == 'Darwin' and python_version < '3.12'", # requires gcc and fftw to be installed for Windows and some other OS (see http://www.fftw.org/index.html) From 9f57a0f111d6f85f2c9056a9b33c250761a982a2 Mon Sep 17 00:00:00 2001 From: hadifawaz1999 Date: Tue, 30 Apr 2024 14:44:53 +0200 Subject: [PATCH 15/31] remove addons from test --- .../deep_learning/tests/test_random_state_deep_learning.py | 2 +- .../tests/test_random_state_deep_learning_cluster.py | 2 +- .../deep_learning/tests/test_random_state_deep_regressor.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/aeon/classification/deep_learning/tests/test_random_state_deep_learning.py b/aeon/classification/deep_learning/tests/test_random_state_deep_learning.py index 10d26be4a6..d05a11bf22 100644 --- a/aeon/classification/deep_learning/tests/test_random_state_deep_learning.py +++ b/aeon/classification/deep_learning/tests/test_random_state_deep_learning.py @@ -13,7 +13,7 @@ @pytest.mark.skipif( - not _check_soft_dependencies(["tensorflow", "tensorflow_addons"], severity="none"), + not _check_soft_dependencies(["tensorflow", "typeguard"], severity="none"), reason="skip test if required soft dependency not available", ) def test_random_state_deep_learning_cls(): diff --git a/aeon/clustering/deep_learning/tests/test_random_state_deep_learning_cluster.py b/aeon/clustering/deep_learning/tests/test_random_state_deep_learning_cluster.py index 08ba140143..9c2a55e1f9 100644 --- a/aeon/clustering/deep_learning/tests/test_random_state_deep_learning_cluster.py +++ b/aeon/clustering/deep_learning/tests/test_random_state_deep_learning_cluster.py @@ -13,7 +13,7 @@ @pytest.mark.skipif( - not _check_soft_dependencies(["tensorflow", "tensorflow_addons"], severity="none"), + not _check_soft_dependencies("tensorflow", severity="none"), reason="skip test if required soft dependency not available", ) def test_random_state_deep_learning_clr(): diff --git a/aeon/regression/deep_learning/tests/test_random_state_deep_regressor.py b/aeon/regression/deep_learning/tests/test_random_state_deep_regressor.py index 535c89370d..3356619e1f 100644 --- a/aeon/regression/deep_learning/tests/test_random_state_deep_regressor.py +++ b/aeon/regression/deep_learning/tests/test_random_state_deep_regressor.py @@ -13,7 +13,7 @@ @pytest.mark.skipif( - not _check_soft_dependencies(["tensorflow", "tensorflow_addons"], severity="none"), + not _check_soft_dependencies(["tensorflow", "typeguard"], severity="none"), reason="skip test if required soft dependency not available", ) def test_random_state_deep_learning_rgs(): From 0e99adb8679f9ef867a267f975b929eb1d90ae52 Mon Sep 17 00:00:00 2001 From: hadifawaz1999 Date: Tue, 30 Apr 2024 14:49:41 +0200 Subject: [PATCH 16/31] remove tag of python<3.12 from base class --- aeon/classification/deep_learning/base.py | 1 - aeon/clustering/deep_learning/base.py | 1 - aeon/networks/base.py | 1 - aeon/regression/deep_learning/base.py | 1 - 4 files changed, 4 deletions(-) diff --git a/aeon/classification/deep_learning/base.py b/aeon/classification/deep_learning/base.py index ed90e65c35..b9e805ae89 100644 --- a/aeon/classification/deep_learning/base.py +++ b/aeon/classification/deep_learning/base.py @@ -45,7 +45,6 @@ class BaseDeepClassifier(BaseClassifier, ABC): "non-deterministic": True, "cant-pickle": True, "python_dependencies": "tensorflow", - "python_version": "<3.12", } def __init__( diff --git a/aeon/clustering/deep_learning/base.py b/aeon/clustering/deep_learning/base.py index 9d60df4f4d..02224c9b83 100644 --- a/aeon/clustering/deep_learning/base.py +++ b/aeon/clustering/deep_learning/base.py @@ -41,7 +41,6 @@ class BaseDeepClusterer(BaseClusterer, ABC): "non-deterministic": True, "cant-pickle": True, "python_dependencies": "tensorflow", - "python_version": "<3.12", } def __init__( diff --git a/aeon/networks/base.py b/aeon/networks/base.py index 86aaaa8043..1af368eb17 100644 --- a/aeon/networks/base.py +++ b/aeon/networks/base.py @@ -17,7 +17,6 @@ def __init__(self): _tags = { "python_dependencies": "tensorflow", - "python_version": "<3.12", } @abstractmethod diff --git a/aeon/regression/deep_learning/base.py b/aeon/regression/deep_learning/base.py index ab174e1d2e..941fe7b99c 100644 --- a/aeon/regression/deep_learning/base.py +++ b/aeon/regression/deep_learning/base.py @@ -38,7 +38,6 @@ class BaseDeepRegressor(BaseRegressor, ABC): "non-deterministic": True, "cant-pickle": True, "python_dependencies": "tensorflow", - "python_version": "<3.12", } def __init__(self, batch_size=40, last_file_name="last_model"): From 949e6bb32ef4c448637aafc0ff54517ff96537f6 Mon Sep 17 00:00:00 2001 From: hadifawaz1999 Date: Mon, 3 Jun 2024 20:06:32 +0200 Subject: [PATCH 17/31] edit pyptoject for test details --- pyproject.toml | 3 --- 1 file changed, 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index de227511aa..f335149d50 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -182,9 +182,6 @@ addopts = ''' --doctest-modules --durations 20 --timeout 600 - --showlocals - --numprocesses logical - --dist worksteal --reruns 2 --only-rerun "crashed while running" ''' From ebbfa99585b25d34c30350778b2284ddcc0c6276 Mon Sep 17 00:00:00 2001 From: hadifawaz1999 Date: Mon, 3 Jun 2024 20:37:37 +0200 Subject: [PATCH 18/31] remove typeguard --- aeon/networks/_encoder.py | 4 +--- pyproject.toml | 2 -- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/aeon/networks/_encoder.py b/aeon/networks/_encoder.py index 33959367fb..ab9c820359 100644 --- a/aeon/networks/_encoder.py +++ b/aeon/networks/_encoder.py @@ -146,11 +146,10 @@ def build_network(self, input_shape, **kwargs): return input_layer, flatten_layer -if _check_soft_dependencies(["tensorflow", "typeguard"], severity="none"): +if _check_soft_dependencies(["tensorflow"], severity="none"): import logging import tensorflow as tf - from typeguard import typechecked @tf.keras.utils.register_keras_serializable(package="Addons") class GroupNormalization(tf.keras.layers.Layer): @@ -201,7 +200,6 @@ class GroupNormalization(tf.keras.layers.Layer): https://github.com/tensorflow/addons/tree/v0.20.0 """ - @typechecked def __init__( self, groups: int = 32, diff --git a/pyproject.toml b/pyproject.toml index f335149d50..0dbd6ab8dd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -81,7 +81,6 @@ all_extras = [ "torch>=1.13.1", "tsfresh>=0.20.0", "tslearn>=0.5.2", - "typeguard", "xarray", # for some reason, a heirarchical dask test fails on some Python versions with the @@ -93,7 +92,6 @@ all_extras = [ dl = [ "keras-self-attention", "tensorflow>=2.12; python_version >= '3.9'", - "typeguard", ] unstable_extras = [ "mrsqm>=0.0.1,<0.1.0 ; platform_system == 'Darwin' and python_version < '3.12'", # requires gcc and fftw to be installed for Windows and some other OS (see http://www.fftw.org/index.html) From 3c2fe6bd49bf72716fc161efd3aa4068f1c685f1 Mon Sep 17 00:00:00 2001 From: hadifawaz1999 Date: Wed, 3 Jul 2024 10:39:57 +0200 Subject: [PATCH 19/31] remove addons --- pyproject.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index ec5d48c89b..8514ca5fb5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -93,7 +93,6 @@ all_extras = [ dl = [ "keras-self-attention", "tensorflow>=2.12; python_version < '3.12'", - "tensorflow-addons; python_version < '3.12'", # dependency of tensorflow, see issue #1724 "keras<3.4", "tensorflow>=2.12; python_version >= '3.9'", From aaa16ab5597a1218a8d012aa9c9ef9886fd3c8db Mon Sep 17 00:00:00 2001 From: hadifawaz1999 Date: Wed, 3 Jul 2024 10:44:07 +0200 Subject: [PATCH 20/31] remove typeguard --- aeon/classification/deep_learning/_encoder.py | 2 +- .../deep_learning/tests/test_random_state_deep_learning.py | 2 +- aeon/networks/_encoder.py | 2 +- aeon/regression/deep_learning/_encoder.py | 2 +- .../deep_learning/tests/test_random_state_deep_regressor.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/aeon/classification/deep_learning/_encoder.py b/aeon/classification/deep_learning/_encoder.py index 5d6b936270..24633ddff8 100644 --- a/aeon/classification/deep_learning/_encoder.py +++ b/aeon/classification/deep_learning/_encoder.py @@ -83,7 +83,7 @@ class EncoderClassifier(BaseDeepClassifier): """ _tags = { - "python_dependencies": ["tensorflow", "typeguard"], + "python_dependencies": ["tensorflow"], } def __init__( diff --git a/aeon/classification/deep_learning/tests/test_random_state_deep_learning.py b/aeon/classification/deep_learning/tests/test_random_state_deep_learning.py index 498648d173..e7c1f4c962 100644 --- a/aeon/classification/deep_learning/tests/test_random_state_deep_learning.py +++ b/aeon/classification/deep_learning/tests/test_random_state_deep_learning.py @@ -13,7 +13,7 @@ @pytest.mark.skipif( - not _check_soft_dependencies(["tensorflow", "typeguard"], severity="none"), + not _check_soft_dependencies(["tensorflow"], severity="none"), reason="skip test if required soft dependency not available", ) def test_random_state_deep_learning_cls(): diff --git a/aeon/networks/_encoder.py b/aeon/networks/_encoder.py index dc6a19b069..8f30b712c7 100644 --- a/aeon/networks/_encoder.py +++ b/aeon/networks/_encoder.py @@ -47,7 +47,7 @@ class EncoderNetwork(BaseDeepLearningNetwork): """ - _tags = {"python_dependencies": ["tensorflow", "typeguard"]} + _tags = {"python_dependencies": ["tensorflow"]} _config = { "python_dependencies": ["tensorflow", "tensorflow-addons"], "python_version": "<3.12", diff --git a/aeon/regression/deep_learning/_encoder.py b/aeon/regression/deep_learning/_encoder.py index c7088c66dd..5a9d0b7e0e 100644 --- a/aeon/regression/deep_learning/_encoder.py +++ b/aeon/regression/deep_learning/_encoder.py @@ -101,7 +101,7 @@ class EncoderRegressor(BaseDeepRegressor): """ _tags = { - "python_dependencies": ["tensorflow", "typeguard"], + "python_dependencies": ["tensorflow"], } def __init__( diff --git a/aeon/regression/deep_learning/tests/test_random_state_deep_regressor.py b/aeon/regression/deep_learning/tests/test_random_state_deep_regressor.py index 01ac9d7309..73616c1f53 100644 --- a/aeon/regression/deep_learning/tests/test_random_state_deep_regressor.py +++ b/aeon/regression/deep_learning/tests/test_random_state_deep_regressor.py @@ -13,7 +13,7 @@ @pytest.mark.skipif( - not _check_soft_dependencies(["tensorflow", "typeguard"], severity="none"), + not _check_soft_dependencies(["tensorflow"], severity="none"), reason="skip test if required soft dependency not available", ) def test_random_state_deep_learning_rgs(): From 03030bbeadc51be527df954d5b0227e8580192a8 Mon Sep 17 00:00:00 2001 From: hadifawaz1999 Date: Wed, 3 Jul 2024 11:10:29 +0200 Subject: [PATCH 21/31] fix bug in python version --- pyproject.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 8514ca5fb5..324c1511cc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -92,7 +92,6 @@ all_extras = [ ] dl = [ "keras-self-attention", - "tensorflow>=2.12; python_version < '3.12'", # dependency of tensorflow, see issue #1724 "keras<3.4", "tensorflow>=2.12; python_version >= '3.9'", From 5538f89f78bbfb8abf7dc98e5ae6e722f1027be8 Mon Sep 17 00:00:00 2001 From: hadifawaz1999 Date: Wed, 3 Jul 2024 11:32:36 +0200 Subject: [PATCH 22/31] remove config on python < 3.12 --- aeon/networks/_ae_fcn.py | 1 - aeon/networks/_ae_resnet.py | 1 - aeon/networks/base.py | 2 -- 3 files changed, 4 deletions(-) diff --git a/aeon/networks/_ae_fcn.py b/aeon/networks/_ae_fcn.py index 37bbdf1aa5..4c6c4b1b74 100644 --- a/aeon/networks/_ae_fcn.py +++ b/aeon/networks/_ae_fcn.py @@ -58,7 +58,6 @@ class AEFCNNetwork(BaseDeepLearningNetwork): _config = { "python_dependencies": ["tensorflow"], - "python_version": "<3.12", "structure": "auto-encoder", } diff --git a/aeon/networks/_ae_resnet.py b/aeon/networks/_ae_resnet.py index 3b817efe8e..8d8820ecf0 100644 --- a/aeon/networks/_ae_resnet.py +++ b/aeon/networks/_ae_resnet.py @@ -67,7 +67,6 @@ class AEResNetNetwork(BaseDeepLearningNetwork): _config = { "python_dependencies": ["tensorflow"], - "python_version": "<3.12", "structure": "auto-encoder", } diff --git a/aeon/networks/base.py b/aeon/networks/base.py index aed3760b87..41ec8d138a 100644 --- a/aeon/networks/base.py +++ b/aeon/networks/base.py @@ -30,7 +30,6 @@ def __init__(self): _config = { "python_dependencies": ["tensorflow"], - "python_version": "<3.12", "structure": "encoder", } @@ -61,7 +60,6 @@ def __init__(self, soft_dependencies="tensorflow", python_version="<3.12"): _config = { "python_dependencies": ["tensorflow"], - "python_version": "<3.12", "structure": "encoder", } From d4b7c427f2de7bbf6d05a015332c212b110125b8 Mon Sep 17 00:00:00 2001 From: hadifawaz1999 Date: Wed, 3 Jul 2024 13:57:29 +0200 Subject: [PATCH 23/31] remove python version from test --- aeon/networks/tests/test_all_networks.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/aeon/networks/tests/test_all_networks.py b/aeon/networks/tests/test_all_networks.py index 2100e488d4..948bd6f373 100644 --- a/aeon/networks/tests/test_all_networks.py +++ b/aeon/networks/tests/test_all_networks.py @@ -5,10 +5,7 @@ import pytest from aeon import networks -from aeon.utils.validation._dependencies import ( - _check_python_version, - _check_soft_dependencies, -) +from aeon.utils.validation._dependencies import _check_soft_dependencies __maintainer__ = [] @@ -21,12 +18,10 @@ def test_network_config(network): """Tests if the config dictionary of classes is correctly configured.""" assert "python_dependencies" in network._config.keys() - assert "python_version" in network._config.keys() assert "structure" in network._config.keys() assert isinstance(network._config["python_dependencies"], list) and ( "tensorflow" in network._config["python_dependencies"] ) - assert isinstance(network._config["python_version"], str) assert isinstance(network._config["structure"], str) @@ -41,7 +36,7 @@ def test_all_networks_functionality(network): ): if _check_soft_dependencies( network._config["python_dependencies"], severity="none" - ) and _check_python_version(network._config["python_version"], severity="none"): + ): my_network = network() if network._config["structure"] == "auto-encoder": From 13449babe7e46d2a6d4d337dfe0d26175589da00 Mon Sep 17 00:00:00 2001 From: hadifawaz1999 Date: Wed, 3 Jul 2024 14:20:44 +0200 Subject: [PATCH 24/31] remove python version from base --- aeon/networks/base.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/aeon/networks/base.py b/aeon/networks/base.py index 41ec8d138a..f280bf7ac6 100644 --- a/aeon/networks/base.py +++ b/aeon/networks/base.py @@ -9,7 +9,6 @@ from aeon.base import BaseObject from aeon.utils.validation._dependencies import ( _check_estimator_deps, - _check_python_version, _check_soft_dependencies, ) @@ -53,9 +52,8 @@ def build_network(self, input_shape, **kwargs): class BaseDeepLearningNetwork(ABC): """Abstract base class for deep learning networks.""" - def __init__(self, soft_dependencies="tensorflow", python_version="<3.12"): + def __init__(self, soft_dependencies="tensorflow"): _check_soft_dependencies(soft_dependencies) - _check_python_version(python_version) super().__init__() _config = { From ac1ae61581ddf9601cb1f1ef5a447bba7a461ef2 Mon Sep 17 00:00:00 2001 From: hadifawaz1999 Date: Wed, 3 Jul 2024 15:16:08 +0200 Subject: [PATCH 25/31] use group norm --- aeon/networks/_encoder.py | 326 +------------------------------------- 1 file changed, 2 insertions(+), 324 deletions(-) diff --git a/aeon/networks/_encoder.py b/aeon/networks/_encoder.py index 8f30b712c7..4b1a6bda1c 100644 --- a/aeon/networks/_encoder.py +++ b/aeon/networks/_encoder.py @@ -3,8 +3,6 @@ __maintainer__ = ["hadifawaz1999"] from aeon.networks.base import BaseDeepLearningNetwork -from aeon.utils.networks import tensorflow_addons as types -from aeon.utils.validation._dependencies import _check_soft_dependencies class EncoderNetwork(BaseDeepLearningNetwork): @@ -111,7 +109,7 @@ def build_network(self, input_shape, **kwargs): strides=self.strides, )(x) - conv = InstanceNormalization()(conv) + conv = tf.keras.layers.GroupNormalization(groups=-1)(conv) conv = tf.keras.layers.PReLU(shared_axes=[1])(conv) conv = tf.keras.layers.Dropout(self.dropout_proba)(conv) @@ -142,330 +140,10 @@ def build_network(self, input_shape, **kwargs): hidden_fc_layer = tf.keras.layers.Dense( units=self.fc_units, activation=self.activation )(attention) - hidden_fc_layer = InstanceNormalization()(hidden_fc_layer) + hidden_fc_layer = tf.keras.layers.GroupNormalization(groups=-1)(hidden_fc_layer) # output layer before classification layer flatten_layer = tf.keras.layers.Flatten()(hidden_fc_layer) return input_layer, flatten_layer - - -if _check_soft_dependencies(["tensorflow"], severity="none"): - import logging - - import tensorflow as tf - - @tf.keras.utils.register_keras_serializable(package="Addons") - class GroupNormalization(tf.keras.layers.Layer): - """Group normalization layer. - - Source: "Group Normalization" (Yuxin Wu & Kaiming He, 2018) - https://arxiv.org/abs/1803.08494 - - Group Normalization divides the channels into groups and computes - within each group the mean and variance for normalization. - Empirically, its accuracy is more stable than batch norm in a wide - range of small batch sizes, if learning rate is adjusted linearly - with batch sizes. - - Relation to Layer Normalization: - If the number of groups is set to 1, then this operation becomes identical - to Layer Normalization. - - Relation to Instance Normalization: - If the number of groups is set to the - input dimension (number of groups is equal - to number of channels), then this operation becomes - identical to Instance Normalization. - - Parameters - ---------- - groups: Integer, the number of groups for Group Normalization. - Can be in the range [1, N] where N is the input dimension. - The input dimension must be divisible by the number of groups. - Defaults to 32. - axis: Integer, the axis that should be normalized. - epsilon: Small float added to variance to avoid dividing by zero. - center: If True, add offset of `beta` to normalized tensor. - If False, `beta` is ignored. - scale: If True, multiply by `gamma`. - If False, `gamma` is not used. - beta_initializer: Initializer for the beta weight. - gamma_initializer: Initializer for the gamma weight. - beta_regularizer: Optional regularizer for the beta weight. - gamma_regularizer: Optional regularizer for the gamma weight. - beta_constraint: Optional constraint for the beta weight. - gamma_constraint: Optional constraint for the gamma weight. - - Notes - ----- - This code was taken from the soon to be deprecated project - tensorflow_addons: - https://github.com/tensorflow/addons/tree/v0.20.0 - """ - - def __init__( - self, - groups: int = 32, - axis: int = -1, - epsilon: float = 1e-3, - center: bool = True, - scale: bool = True, - beta_initializer: types.Initializer = "zeros", - gamma_initializer: types.Initializer = "ones", - beta_regularizer: types.Regularizer = None, - gamma_regularizer: types.Regularizer = None, - beta_constraint: types.Constraint = None, - gamma_constraint: types.Constraint = None, - **kwargs, - ): - super().__init__(**kwargs) - self.supports_masking = True - self.groups = groups - self.axis = axis - self.epsilon = epsilon - self.center = center - self.scale = scale - self.beta_initializer = tf.keras.initializers.get(beta_initializer) - self.gamma_initializer = tf.keras.initializers.get(gamma_initializer) - self.beta_regularizer = tf.keras.regularizers.get(beta_regularizer) - self.gamma_regularizer = tf.keras.regularizers.get(gamma_regularizer) - self.beta_constraint = tf.keras.constraints.get(beta_constraint) - self.gamma_constraint = tf.keras.constraints.get(gamma_constraint) - self._check_axis() - - def build(self, input_shape): - - self._check_if_input_shape_is_none(input_shape) - self._set_number_of_groups_for_instance_norm(input_shape) - self._check_size_of_dimensions(input_shape) - self._create_input_spec(input_shape) - - self._add_gamma_weight(input_shape) - self._add_beta_weight(input_shape) - self.built = True - super().build(input_shape) - - def call(self, inputs): - - input_shape = tf.keras.backend.int_shape(inputs) - tensor_input_shape = tf.shape(inputs) - - reshaped_inputs, group_shape = self._reshape_into_groups( - inputs, input_shape, tensor_input_shape - ) - - normalized_inputs = self._apply_normalization(reshaped_inputs, input_shape) - - is_instance_norm = (input_shape[self.axis] // self.groups) == 1 - if not is_instance_norm: - outputs = tf.reshape(normalized_inputs, tensor_input_shape) - else: - outputs = normalized_inputs - - return outputs - - def get_config(self): - config = { - "groups": self.groups, - "axis": self.axis, - "epsilon": self.epsilon, - "center": self.center, - "scale": self.scale, - "beta_initializer": tf.keras.initializers.serialize( - self.beta_initializer - ), - "gamma_initializer": tf.keras.initializers.serialize( - self.gamma_initializer - ), - "beta_regularizer": tf.keras.regularizers.serialize( - self.beta_regularizer - ), - "gamma_regularizer": tf.keras.regularizers.serialize( - self.gamma_regularizer - ), - "beta_constraint": tf.keras.constraints.serialize(self.beta_constraint), - "gamma_constraint": tf.keras.constraints.serialize( - self.gamma_constraint - ), - } - base_config = super().get_config() - return {**base_config, **config} - - def _reshape_into_groups(self, inputs, input_shape, tensor_input_shape): - - group_shape = [tensor_input_shape[i] for i in range(len(input_shape))] - is_instance_norm = (input_shape[self.axis] // self.groups) == 1 - if not is_instance_norm: - group_shape[self.axis] = input_shape[self.axis] // self.groups - group_shape.insert(self.axis, self.groups) - group_shape = tf.stack(group_shape) - reshaped_inputs = tf.reshape(inputs, group_shape) - return reshaped_inputs, group_shape - else: - return inputs, group_shape - - def _apply_normalization(self, reshaped_inputs, input_shape): - - group_shape = tf.keras.backend.int_shape(reshaped_inputs) - group_reduction_axes = list(range(1, len(group_shape))) - is_instance_norm = (input_shape[self.axis] // self.groups) == 1 - if not is_instance_norm: - axis = -2 if self.axis == -1 else self.axis - 1 - else: - axis = -1 if self.axis == -1 else self.axis - 1 - group_reduction_axes.pop(axis) - - mean, variance = tf.nn.moments( - reshaped_inputs, group_reduction_axes, keepdims=True - ) - - gamma, beta = self._get_reshaped_weights(input_shape) - normalized_inputs = tf.nn.batch_normalization( - reshaped_inputs, - mean=mean, - variance=variance, - scale=gamma, - offset=beta, - variance_epsilon=self.epsilon, - ) - return normalized_inputs - - def _get_reshaped_weights(self, input_shape): - broadcast_shape = self._create_broadcast_shape(input_shape) - gamma = None - beta = None - if self.scale: - gamma = tf.reshape(self.gamma, broadcast_shape) - - if self.center: - beta = tf.reshape(self.beta, broadcast_shape) - return gamma, beta - - def _check_if_input_shape_is_none(self, input_shape): - dim = input_shape[self.axis] - if dim is None: - raise ValueError( - "Axis " + str(self.axis) + " of " - "input tensor should have a defined dimension " - "but the layer received an input with shape " - + str(input_shape) - + "." - ) - - def _set_number_of_groups_for_instance_norm(self, input_shape): - dim = input_shape[self.axis] - - if self.groups == -1: - self.groups = dim - - def _check_size_of_dimensions(self, input_shape): - - dim = input_shape[self.axis] - if dim < self.groups: - raise ValueError( - "Number of groups (" + str(self.groups) + ") cannot be " - "more than the number of channels (" + str(dim) + ")." - ) - - if dim % self.groups != 0: - raise ValueError( - "Number of groups (" + str(self.groups) + ") must be a " - "multiple of the number of channels (" + str(dim) + ")." - ) - - def _check_axis(self): - - if self.axis == 0: - raise ValueError( - "You are trying to normalize your batch axis. Do you want to " - "use tf.layer.batch_normalization instead" - ) - - def _create_input_spec(self, input_shape): - - dim = input_shape[self.axis] - self.input_spec = tf.keras.layers.InputSpec( - ndim=len(input_shape), axes={self.axis: dim} - ) - - def _add_gamma_weight(self, input_shape): - - dim = input_shape[self.axis] - shape = (dim,) - - if self.scale: - self.gamma = self.add_weight( - shape=shape, - name="gamma", - initializer=self.gamma_initializer, - regularizer=self.gamma_regularizer, - constraint=self.gamma_constraint, - ) - else: - self.gamma = None - - def _add_beta_weight(self, input_shape): - - dim = input_shape[self.axis] - shape = (dim,) - - if self.center: - self.beta = self.add_weight( - shape=shape, - name="beta", - initializer=self.beta_initializer, - regularizer=self.beta_regularizer, - constraint=self.beta_constraint, - ) - else: - self.beta = None - - def _create_broadcast_shape(self, input_shape): - broadcast_shape = [1] * len(input_shape) - is_instance_norm = (input_shape[self.axis] // self.groups) == 1 - if not is_instance_norm: - broadcast_shape[self.axis] = input_shape[self.axis] // self.groups - broadcast_shape.insert(self.axis, self.groups) - else: - broadcast_shape[self.axis] = self.groups - return broadcast_shape - - @tf.keras.utils.register_keras_serializable(package="Addons") - class InstanceNormalization(GroupNormalization): - """Instance normalization layer. - - Instance Normalization is an specific case of ```GroupNormalization```since - it normalizes all features of one channel. The Groupsize is equal to the - channel size. Empirically, its accuracy is more stable than batch norm in a - wide range of small batch sizes, if learning rate is adjusted linearly - with batch sizes. - - Parameters - ---------- - axis: Integer, the axis that should be normalized. - epsilon: Small float added to variance to avoid dividing by zero. - center: If True, add offset of `beta` to normalized tensor. - If False, `beta` is ignored. - scale: If True, multiply by `gamma`. - If False, `gamma` is not used. - beta_initializer: Initializer for the beta weight. - gamma_initializer: Initializer for the gamma weight. - beta_regularizer: Optional regularizer for the beta weight. - gamma_regularizer: Optional regularizer for the gamma weight. - beta_constraint: Optional constraint for the beta weight. - gamma_constraint: Optional constraint for the gamma weight. - - References - ---------- - - [Instance Normalization: The Missing Ingredient for Fast Stylization] - (https://arxiv.org/abs/1607.08022) - """ - - def __init__(self, **kwargs): - if "groups" in kwargs: - logging.warning("The given value for groups will be overwritten.") - - kwargs["groups"] = -1 - super().__init__(**kwargs) From 6e9217c15a131ebc203722e86a2b4ccf7c56914d Mon Sep 17 00:00:00 2001 From: hadifawaz1999 Date: Wed, 3 Jul 2024 15:53:11 +0200 Subject: [PATCH 26/31] re-add 3.12 limit --- aeon/utils/networks/tensorflow_addons.py | 59 ------------------------ pyproject.toml | 4 +- 2 files changed, 2 insertions(+), 61 deletions(-) delete mode 100644 aeon/utils/networks/tensorflow_addons.py diff --git a/aeon/utils/networks/tensorflow_addons.py b/aeon/utils/networks/tensorflow_addons.py deleted file mode 100644 index efced7aa73..0000000000 --- a/aeon/utils/networks/tensorflow_addons.py +++ /dev/null @@ -1,59 +0,0 @@ -"""Types for typing functions signatures. - -The following was taken from the tensorflow_addons deprecated package. - -package: https://www.tensorflow.org/addons -version: 0.23.0 -file: https://github.com/tensorflow/addons/blob/master/tensorflow_addons/utils/types.py -""" - -from aeon.utils.validation._dependencies import _check_soft_dependencies - -if _check_soft_dependencies("tensorflow", severity="none"): - - import importlib - from typing import Callable, List, Union - - import numpy as np - import tensorflow as tf - from tensorflow.python.keras.engine import keras_tensor - - Number = Union[ - float, - int, - np.float16, - np.float32, - np.float64, - np.int8, - np.int16, - np.int32, - np.int64, - np.uint8, - np.uint16, - np.uint32, - np.uint64, - ] - - Initializer = Union[None, dict, str, Callable, tf.keras.initializers.Initializer] - Regularizer = Union[None, dict, str, Callable, tf.keras.regularizers.Regularizer] - Constraint = Union[None, dict, str, Callable, tf.keras.constraints.Constraint] - Activation = Union[None, str, Callable] - if importlib.util.find_spec("tensorflow.keras.optimizers.legacy") is not None: - Optimizer = Union[ - tf.keras.optimizers.Optimizer, tf.keras.optimizers.legacy.Optimizer, str - ] - else: - Optimizer = Union[tf.keras.optimizers.Optimizer, str] - - TensorLike = Union[ - List[Union[Number, list]], - tuple, - Number, - np.ndarray, - tf.Tensor, - tf.SparseTensor, - tf.Variable, - keras_tensor.KerasTensor, - ] - FloatTensorLike = Union[tf.Tensor, float, np.float16, np.float32, np.float64] - AcceptableDTypes = Union[tf.DType, np.dtype, type, int, str, None] diff --git a/pyproject.toml b/pyproject.toml index 324c1511cc..aa0c5f0793 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -75,7 +75,7 @@ all_extras = [ "stumpy>=1.5.1", "ruptures>=1.1.9", "tbats>=1.1.0", - "tensorflow>=2.12; python_version >= '3.9'", + "tensorflow>=2.12; python_version < '3.12'", "torch>=1.13.1", "tsfresh>=0.20.0", "tslearn>=0.5.2", @@ -94,7 +94,7 @@ dl = [ "keras-self-attention", # dependency of tensorflow, see issue #1724 "keras<3.4", - "tensorflow>=2.12; python_version >= '3.9'", + "tensorflow>=2.12; python_version < '3.12'", ] unstable_extras = [ "mrsqm>=0.0.1,<0.1.0; platform_system == 'Darwin' and python_version < '3.12'", # requires gcc and fftw to be installed for Windows and some other OS (see http://www.fftw.org/index.html) From e47e3d80804d047813876205ab2da3e212dec488 Mon Sep 17 00:00:00 2001 From: hadifawaz1999 Date: Thu, 4 Jul 2024 11:23:02 +0200 Subject: [PATCH 27/31] remove tags and add python version --- aeon/networks/_ae_fcn.py | 1 + aeon/networks/_ae_resnet.py | 1 + aeon/networks/_encoder.py | 1 - aeon/networks/base.py | 6 +++++- aeon/networks/tests/test_all_networks.py | 9 +++++++-- 5 files changed, 14 insertions(+), 4 deletions(-) diff --git a/aeon/networks/_ae_fcn.py b/aeon/networks/_ae_fcn.py index 4c6c4b1b74..37bbdf1aa5 100644 --- a/aeon/networks/_ae_fcn.py +++ b/aeon/networks/_ae_fcn.py @@ -58,6 +58,7 @@ class AEFCNNetwork(BaseDeepLearningNetwork): _config = { "python_dependencies": ["tensorflow"], + "python_version": "<3.12", "structure": "auto-encoder", } diff --git a/aeon/networks/_ae_resnet.py b/aeon/networks/_ae_resnet.py index 8d8820ecf0..3b817efe8e 100644 --- a/aeon/networks/_ae_resnet.py +++ b/aeon/networks/_ae_resnet.py @@ -67,6 +67,7 @@ class AEResNetNetwork(BaseDeepLearningNetwork): _config = { "python_dependencies": ["tensorflow"], + "python_version": "<3.12", "structure": "auto-encoder", } diff --git a/aeon/networks/_encoder.py b/aeon/networks/_encoder.py index 4b1a6bda1c..cc600aa932 100644 --- a/aeon/networks/_encoder.py +++ b/aeon/networks/_encoder.py @@ -45,7 +45,6 @@ class EncoderNetwork(BaseDeepLearningNetwork): """ - _tags = {"python_dependencies": ["tensorflow"]} _config = { "python_dependencies": ["tensorflow", "tensorflow-addons"], "python_version": "<3.12", diff --git a/aeon/networks/base.py b/aeon/networks/base.py index f280bf7ac6..aed3760b87 100644 --- a/aeon/networks/base.py +++ b/aeon/networks/base.py @@ -9,6 +9,7 @@ from aeon.base import BaseObject from aeon.utils.validation._dependencies import ( _check_estimator_deps, + _check_python_version, _check_soft_dependencies, ) @@ -29,6 +30,7 @@ def __init__(self): _config = { "python_dependencies": ["tensorflow"], + "python_version": "<3.12", "structure": "encoder", } @@ -52,12 +54,14 @@ def build_network(self, input_shape, **kwargs): class BaseDeepLearningNetwork(ABC): """Abstract base class for deep learning networks.""" - def __init__(self, soft_dependencies="tensorflow"): + def __init__(self, soft_dependencies="tensorflow", python_version="<3.12"): _check_soft_dependencies(soft_dependencies) + _check_python_version(python_version) super().__init__() _config = { "python_dependencies": ["tensorflow"], + "python_version": "<3.12", "structure": "encoder", } diff --git a/aeon/networks/tests/test_all_networks.py b/aeon/networks/tests/test_all_networks.py index 948bd6f373..353695b7b7 100644 --- a/aeon/networks/tests/test_all_networks.py +++ b/aeon/networks/tests/test_all_networks.py @@ -5,7 +5,10 @@ import pytest from aeon import networks -from aeon.utils.validation._dependencies import _check_soft_dependencies +from aeon.utils.validation._dependencies import ( + _check_python_version, + _check_soft_dependencies, +) __maintainer__ = [] @@ -18,10 +21,12 @@ def test_network_config(network): """Tests if the config dictionary of classes is correctly configured.""" assert "python_dependencies" in network._config.keys() + assert "python_version" in network._config.keys() assert "structure" in network._config.keys() assert isinstance(network._config["python_dependencies"], list) and ( "tensorflow" in network._config["python_dependencies"] ) + assert isinstance(network._config["python_version"], str) assert isinstance(network._config["structure"], str) @@ -33,7 +38,7 @@ def test_all_networks_functionality(network): if not ( network.__name__ in ["BaseDeepNetwork", "BaseDeepLearningNetwork", "EncoderNetwork"] - ): + ) and _check_python_version(network._config["python_version"], severity="none"): if _check_soft_dependencies( network._config["python_dependencies"], severity="none" ): From 09c35a5db5919dcec273b93e3bff718e7721a3ea Mon Sep 17 00:00:00 2001 From: hadifawaz1999 Date: Thu, 4 Jul 2024 13:38:26 +0200 Subject: [PATCH 28/31] skip random state clr test --- .../tests/test_random_state_deep_learning_cluster.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/aeon/clustering/deep_learning/tests/test_random_state_deep_learning_cluster.py b/aeon/clustering/deep_learning/tests/test_random_state_deep_learning_cluster.py index d695310d1d..1b5766bd9f 100644 --- a/aeon/clustering/deep_learning/tests/test_random_state_deep_learning_cluster.py +++ b/aeon/clustering/deep_learning/tests/test_random_state_deep_learning_cluster.py @@ -7,13 +7,14 @@ from aeon.clustering import deep_learning from aeon.testing.data_generation import make_example_3d_numpy -from aeon.utils.validation._dependencies import _check_soft_dependencies __maintainer__ = ["hadifawaz1999"] @pytest.mark.skipif( - not _check_soft_dependencies("tensorflow", severity="none"), + # not _check_soft_dependencies("tensorflow", severity="none"), + # See Issue #1761 + True, reason="skip test if required soft dependency not available", ) def test_random_state_deep_learning_clr(): From 2bef940730770223a69441b3aa6d300c2b9f6375 Mon Sep 17 00:00:00 2001 From: hadifawaz1999 Date: Thu, 4 Jul 2024 13:52:52 +0200 Subject: [PATCH 29/31] fix test all networks --- aeon/networks/tests/test_all_networks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aeon/networks/tests/test_all_networks.py b/aeon/networks/tests/test_all_networks.py index 353695b7b7..2100e488d4 100644 --- a/aeon/networks/tests/test_all_networks.py +++ b/aeon/networks/tests/test_all_networks.py @@ -38,10 +38,10 @@ def test_all_networks_functionality(network): if not ( network.__name__ in ["BaseDeepNetwork", "BaseDeepLearningNetwork", "EncoderNetwork"] - ) and _check_python_version(network._config["python_version"], severity="none"): + ): if _check_soft_dependencies( network._config["python_dependencies"], severity="none" - ): + ) and _check_python_version(network._config["python_version"], severity="none"): my_network = network() if network._config["structure"] == "auto-encoder": From da619e40e58ddec577290c6b491b157c60dc8a96 Mon Sep 17 00:00:00 2001 From: Tony Bagnall Date: Thu, 4 Jul 2024 13:25:47 +0100 Subject: [PATCH 30/31] set y for channel selection test --- .../collection/tests/test_all_collection_transformers.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/aeon/transformations/collection/tests/test_all_collection_transformers.py b/aeon/transformations/collection/tests/test_all_collection_transformers.py index fcd3399e4e..703d63212b 100644 --- a/aeon/transformations/collection/tests/test_all_collection_transformers.py +++ b/aeon/transformations/collection/tests/test_all_collection_transformers.py @@ -24,7 +24,8 @@ def test_channel_selectors(trans): if issubclass(trans, BaseChannelSelector): # Need fit for channel selection # Must select at least one channel - X, y = make_example_3d_numpy(n_cases=10, n_channels=6, n_timepoints=30) + X, _ = make_example_3d_numpy(n_cases=10, n_channels=6, n_timepoints=30) + y = np.array([0, 1, 0, 1, 0, 1, 0, 1, 0, 1]) cs = trans() assert not cs.get_tag("fit_is_empty") cs.fit(X, y) From fff94883192bb05bda897ba2e909004caf47d684 Mon Sep 17 00:00:00 2001 From: Tony Bagnall Date: Thu, 4 Jul 2024 13:28:19 +0100 Subject: [PATCH 31/31] set y for channel selection test --- .../collection/tests/test_all_collection_transformers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aeon/transformations/collection/tests/test_all_collection_transformers.py b/aeon/transformations/collection/tests/test_all_collection_transformers.py index 703d63212b..db47461d43 100644 --- a/aeon/transformations/collection/tests/test_all_collection_transformers.py +++ b/aeon/transformations/collection/tests/test_all_collection_transformers.py @@ -24,8 +24,8 @@ def test_channel_selectors(trans): if issubclass(trans, BaseChannelSelector): # Need fit for channel selection # Must select at least one channel - X, _ = make_example_3d_numpy(n_cases=10, n_channels=6, n_timepoints=30) - y = np.array([0, 1, 0, 1, 0, 1, 0, 1, 0, 1]) + X, _ = make_example_3d_numpy(n_cases=20, n_channels=6, n_timepoints=30) + y = np.array([0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]) cs = trans() assert not cs.get_tag("fit_is_empty") cs.fit(X, y)