diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index 95435746d4..3c57528fc5 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -27,7 +27,7 @@ jobs: persist-credentials: false - name: Run analysis - uses: ossf/scorecard-action@v2.4.0 + uses: ossf/scorecard-action@v2.4.1 with: results_file: results.sarif results_format: sarif diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3e359cf34a..4349512761 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,7 +29,7 @@ repos: args: [ "--create", "--python-folders", "aeon" ] - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.9.6 + rev: v0.9.7 hooks: - id: ruff args: [ "--fix"] diff --git a/aeon/base/_base_collection.py b/aeon/base/_base_collection.py index ea3b21ed32..4d7f4b4564 100644 --- a/aeon/base/_base_collection.py +++ b/aeon/base/_base_collection.py @@ -1,4 +1,24 @@ -"""Base class for estimators that fit collections of time series.""" +""" +Base class for estimators that fit collections of time series. + + class name: BaseCollectionEstimator + +Defining methods: + preprocessing - _preprocess_collection(self, X, store_metadata=True) + input checking - _check_X(self, X) + input conversion - _convert_X(self, X) + shape checking - _check_shape(self, X) + +Inherited inspection methods: + hyper-parameter inspection - get_params() + fitted parameter inspection - get_fitted_params() + +State: + fitted model/strategy - by convention, any attributes ending in "_" + fitted state flag - is_fitted (property) + fitted state inspection - check_is_fitted() + +""" from abc import abstractmethod diff --git a/aeon/classification/deep_learning/base.py b/aeon/classification/deep_learning/base.py index 2ed56bc0bc..61ddeb3a72 100644 --- a/aeon/classification/deep_learning/base.py +++ b/aeon/classification/deep_learning/base.py @@ -1,6 +1,23 @@ """ Abstract base class for the Keras neural network classifiers. + class name: BaseDeepClassifier + +Defining methods: + fitting - fit(self, X, y) + predicting - predict(self, X) + - predict_proba(self, X) + model building - build_model(self, input_shape, n_classes) (abstract method) + +Inherited inspection methods: + hyper-parameter inspection - get_params() + fitted parameter inspection - get_fitted_params() + +State: + fitted model/strategy - by convention, any attributes ending in "_" + fitted state flag - is_fitted (property) + fitted state inspection - check_is_fitted() + The reason for this class between BaseClassifier and deep_learning classifiers is because we can generalise tags, _predict and _predict_proba """ diff --git a/aeon/classification/feature_based/_catch22.py b/aeon/classification/feature_based/_catch22.py index bfad28dd44..bec5deff88 100644 --- a/aeon/classification/feature_based/_catch22.py +++ b/aeon/classification/feature_based/_catch22.py @@ -67,6 +67,17 @@ class Catch22Classifier(BaseClassifier): if None a 'prefer' value of "threads" is used by default. Valid options are "loky", "multiprocessing", "threading" or a custom backend. See the joblib Parallel documentation for more details. + class_weight{“balanced”, “balanced_subsample”}, dict or list of dicts, default=None + From sklearn documentation: + If not given, all classes are supposed to have weight one. + The “balanced” mode uses the values of y to automatically adjust weights + inversely proportional to class frequencies in the input data as + n_samples / (n_classes * np.bincount(y)) + The “balanced_subsample” mode is the same as “balanced” except that weights + are computed based on the bootstrap sample for every tree grown. + For multi-output, the weights of each column of y will be multiplied. + Note that these weights will be multiplied with sample_weight (passed through + the fit method) if sample_weight is specified. Attributes ---------- @@ -132,6 +143,7 @@ def __init__( random_state=None, n_jobs=1, parallel_backend=None, + class_weight=None, ): self.features = features self.catch24 = catch24 @@ -142,6 +154,7 @@ def __init__( self.random_state = random_state self.n_jobs = n_jobs self.parallel_backend = parallel_backend + self.class_weight = class_weight super().__init__() @@ -175,7 +188,7 @@ def _fit(self, X, y): self.estimator_ = _clone_estimator( ( - RandomForestClassifier(n_estimators=200) + RandomForestClassifier(n_estimators=200, class_weight=self.class_weight) if self.estimator is None else self.estimator ), diff --git a/aeon/classification/feature_based/_signature_classifier.py b/aeon/classification/feature_based/_signature_classifier.py index 88308436f5..a3f659efcf 100644 --- a/aeon/classification/feature_based/_signature_classifier.py +++ b/aeon/classification/feature_based/_signature_classifier.py @@ -61,6 +61,17 @@ class SignatureClassifier(BaseClassifier): Signature truncation depth. random_state : int, default=None If `int`, random_state is the seed used by the random number generator; + class_weight{“balanced”, “balanced_subsample”}, dict or list of dicts, default=None + From sklearn documentation: + If not given, all classes are supposed to have weight one. + The “balanced” mode uses the values of y to automatically adjust weights + inversely proportional to class frequencies in the input data as + n_samples / (n_classes * np.bincount(y)) + The “balanced_subsample” mode is the same as “balanced” except that weights + are computed based on the bootstrap sample for every tree grown. + For multi-output, the weights of each column of y will be multiplied. + Note that these weights will be multiplied with sample_weight (passed through + the fit method) if sample_weight is specified. Attributes ---------- @@ -105,6 +116,7 @@ def __init__( sig_tfm="signature", depth=4, random_state=None, + class_weight=None, ): self.estimator = estimator self.augmentation_list = augmentation_list @@ -116,7 +128,7 @@ def __init__( self.sig_tfm = sig_tfm self.depth = depth self.random_state = random_state - + self.class_weight = class_weight super().__init__() self.signature_method = SignatureTransformer( @@ -135,7 +147,9 @@ def _setup_classification_pipeline(self): """Set up the full signature method pipeline.""" # Use rf if no classifier is set if self.estimator is None: - classifier = RandomForestClassifier(random_state=self.random_state) + classifier = RandomForestClassifier( + random_state=self.random_state, class_weight=self.class_weight + ) else: classifier = _clone_estimator(self.estimator, self.random_state) diff --git a/aeon/classification/feature_based/_summary.py b/aeon/classification/feature_based/_summary.py index a4f34ff688..b6e0056392 100644 --- a/aeon/classification/feature_based/_summary.py +++ b/aeon/classification/feature_based/_summary.py @@ -43,6 +43,17 @@ class SummaryClassifier(BaseClassifier): If `RandomState` instance, random_state is the random number generator; If `None`, the random number generator is the `RandomState` instance used by `np.random`. + class_weight{“balanced”, “balanced_subsample”}, dict or list of dicts, default=None + From sklearn documentation: + If not given, all classes are supposed to have weight one. + The “balanced” mode uses the values of y to automatically adjust weights + inversely proportional to class frequencies in the input data as + n_samples / (n_classes * np.bincount(y)) + The “balanced_subsample” mode is the same as “balanced” except that weights + are computed based on the bootstrap sample for every tree grown. + For multi-output, the weights of each column of y will be multiplied. + Note that these weights will be multiplied with sample_weight (passed through + the fit method) if sample_weight is specified. Attributes ---------- @@ -85,6 +96,7 @@ def __init__( estimator=None, n_jobs=1, random_state=None, + class_weight=None, ): self.summary_stats = summary_stats self.estimator = estimator @@ -92,6 +104,8 @@ def __init__( self.n_jobs = n_jobs self.random_state = random_state + self.class_weight = class_weight + super().__init__() def _fit(self, X, y): @@ -120,7 +134,7 @@ def _fit(self, X, y): self.estimator_ = _clone_estimator( ( - RandomForestClassifier(n_estimators=200) + RandomForestClassifier(n_estimators=200, class_weight=self.class_weight) if self.estimator is None else self.estimator ), diff --git a/aeon/classification/feature_based/_tsfresh.py b/aeon/classification/feature_based/_tsfresh.py index 28dc2dac11..00021da5d8 100644 --- a/aeon/classification/feature_based/_tsfresh.py +++ b/aeon/classification/feature_based/_tsfresh.py @@ -46,6 +46,17 @@ class TSFreshClassifier(BaseClassifier): If `RandomState` instance, random_state is the random number generator; If `None`, the random number generator is the `RandomState` instance used by `np.random`. + class_weight{“balanced”, “balanced_subsample”}, dict or list of dicts, default=None + From sklearn documentation: + If not given, all classes are supposed to have weight one. + The “balanced” mode uses the values of y to automatically adjust weights + inversely proportional to class frequencies in the input data as + n_samples / (n_classes * np.bincount(y)) + The “balanced_subsample” mode is the same as “balanced” except that weights + are computed based on the bootstrap sample for every tree grown. + For multi-output, the weights of each column of y will be multiplied. + Note that these weights will be multiplied with sample_weight (passed through + the fit method) if sample_weight is specified. Attributes ---------- @@ -86,6 +97,7 @@ def __init__( n_jobs=1, chunksize=None, random_state=None, + class_weight=None, ): self.default_fc_parameters = default_fc_parameters self.relevant_feature_extractor = relevant_feature_extractor @@ -99,6 +111,7 @@ def __init__( self._transformer = None self._return_majority_class = False self._majority_class = 0 + self.class_weight = class_weight super().__init__() @@ -137,7 +150,7 @@ def _fit(self, X, y): ) self.estimator_ = _clone_estimator( ( - RandomForestClassifier(n_estimators=200) + RandomForestClassifier(n_estimators=200, class_weight=self.class_weight) if self.estimator is None else self.estimator ), diff --git a/aeon/classification/feature_based/tests/test_catch22.py b/aeon/classification/feature_based/tests/test_catch22.py index 5a709fe4ea..c8067ee57b 100644 --- a/aeon/classification/feature_based/tests/test_catch22.py +++ b/aeon/classification/feature_based/tests/test_catch22.py @@ -1,6 +1,7 @@ """Test catch 22 classifier.""" import numpy as np +import pytest from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import RidgeClassifier @@ -19,3 +20,21 @@ def test_catch22(): c22.fit(X, y) p = c22.predict_proba(X) assert np.all(np.isin(p, [0, 1])) + + +@pytest.mark.parametrize("class_weight", ["balanced", "balanced_subsample"]) +def test_catch22_classifier_with_class_weight(class_weight): + """Test catch22 classifier with class weight.""" + X, y = make_example_3d_numpy( + n_cases=10, n_channels=1, n_timepoints=12, return_y=True, random_state=0 + ) + clf = Catch22Classifier( + estimator=RandomForestClassifier(n_estimators=5), + outlier_norm=True, + random_state=0, + class_weight=class_weight, + ) + clf.fit(X, y) + predictions = clf.predict(X) + assert len(predictions) == len(y) + assert set(predictions).issubset(set(y)) diff --git a/aeon/classification/feature_based/tests/test_signature.py b/aeon/classification/feature_based/tests/test_signature.py index b5c29df2d3..2d3d2972d0 100644 --- a/aeon/classification/feature_based/tests/test_signature.py +++ b/aeon/classification/feature_based/tests/test_signature.py @@ -18,3 +18,24 @@ def test_signature_classifier(): cls = SignatureClassifier(estimator=None) cls._fit(X, y) assert isinstance(cls.pipeline.named_steps["classifier"], RandomForestClassifier) + + +@pytest.mark.skipif( + not _check_soft_dependencies("esig", severity="none"), + reason="skip test if required soft dependency esig not available", +) +@pytest.mark.parametrize("class_weight", ["balanced", "balanced_subsample"]) +def test_signature_classifier_with_class_weight(class_weight): + """Test signature classifier with class weight.""" + X, y = make_example_3d_numpy( + n_cases=10, n_channels=1, n_timepoints=12, return_y=True, random_state=0 + ) + clf = SignatureClassifier( + estimator=RandomForestClassifier(n_estimators=5), + random_state=0, + class_weight=class_weight, + ) + clf.fit(X, y) + predictions = clf.predict(X) + assert len(predictions) == len(y) + assert set(predictions).issubset(set(y)) diff --git a/aeon/classification/feature_based/tests/test_summary.py b/aeon/classification/feature_based/tests/test_summary.py index de698e61cc..0f53130ce0 100644 --- a/aeon/classification/feature_based/tests/test_summary.py +++ b/aeon/classification/feature_based/tests/test_summary.py @@ -1,6 +1,7 @@ """Test summary classifier.""" import numpy as np +import pytest from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import RidgeClassifier @@ -19,3 +20,20 @@ def test_summary_classifier(): cls.fit(X, y) p = cls.predict_proba(X) assert np.all(np.isin(p, [0, 1])) + + +@pytest.mark.parametrize("class_weight", ["balanced", "balanced_subsample"]) +def test_summary_classifier_with_class_weight(class_weight): + """Test summary classifier with class weight.""" + X, y = make_example_3d_numpy( + n_cases=10, n_channels=1, n_timepoints=12, return_y=True, random_state=0 + ) + clf = SummaryClassifier( + estimator=RandomForestClassifier(n_estimators=5), + random_state=0, + class_weight=class_weight, + ) + clf.fit(X, y) + predictions = clf.predict(X) + assert len(predictions) == len(y) + assert set(predictions).issubset(set(y)) diff --git a/aeon/classification/feature_based/tests/test_tsfresh.py b/aeon/classification/feature_based/tests/test_tsfresh.py index 3ab965e6a3..92583e6662 100644 --- a/aeon/classification/feature_based/tests/test_tsfresh.py +++ b/aeon/classification/feature_based/tests/test_tsfresh.py @@ -37,3 +37,24 @@ def test_tsfresh_classifier(): assert cls._majority_class in [0, 1] cls.verbose = 1 cls.fit(X, y) + + +@pytest.mark.skipif( + not _check_soft_dependencies("tsfresh", severity="none"), + reason="skip test if required soft dependency tsfresh not available", +) +@pytest.mark.parametrize("class_weight", ["balanced", "balanced_subsample"]) +def test_tsfresh_classifier_with_class_weight(class_weight): + """Test tsfresh classifier with class weight.""" + X, y = make_example_3d_numpy( + n_cases=10, n_channels=1, n_timepoints=12, return_y=True, random_state=0 + ) + clf = TSFreshClassifier( + estimator=RandomForestClassifier(n_estimators=5), + random_state=0, + class_weight=class_weight, + ) + clf.fit(X, y) + predictions = clf.predict(X) + assert len(predictions) == len(y) + assert set(predictions).issubset(set(y)) diff --git a/aeon/networks/tests/test_fcn.py b/aeon/networks/tests/test_fcn.py new file mode 100644 index 0000000000..60c1cd42f5 --- /dev/null +++ b/aeon/networks/tests/test_fcn.py @@ -0,0 +1,196 @@ +"""Test for the FCNNetwork class.""" + +import pytest + +from aeon.networks import FCNNetwork +from aeon.utils.validation._dependencies import _check_soft_dependencies + + +@pytest.mark.skipif( + not _check_soft_dependencies(["tensorflow"], severity="none"), + reason="Tensorflow soft dependency unavailable.", +) +def test_fcnnetwork_valid(): + """Test FCNNetwork with valid configurations.""" + input_shape = (100, 5) + model = FCNNetwork(n_layers=3) + input_layer, output_layer = model.build_network(input_shape) + + assert hasattr(input_layer, "shape") + assert hasattr(output_layer, "shape") + + +@pytest.mark.skipif( + not _check_soft_dependencies(["tensorflow"], severity="none"), + reason="Tensorflow soft dependency unavailable.", +) +@pytest.mark.parametrize( + "activation, should_raise", + [ + (["relu", "sigmoid", "tanh"], False), + (["relu", "sigmoid"], True), + ( + ["relu", "sigmoid", "tanh", "softmax"], + True, + ), + ("relu", False), + ("sigmoid", False), + ("tanh", False), + ("softmax", False), + ], +) +def test_fcnnetwork_activation(activation, should_raise): + """Test FCNNetwork with valid and invalid activation configurations.""" + input_shape = (100, 5) + if should_raise: + with pytest.raises(ValueError): + model = FCNNetwork(activation=activation) + model.build_network(input_shape) + else: + model = FCNNetwork(activation=activation) + input_layer, output_layer = model.build_network(input_shape) + + assert hasattr(input_layer, "shape") + + assert hasattr(output_layer, "shape") + + +@pytest.mark.skipif( + not _check_soft_dependencies(["tensorflow"], severity="none"), + reason="Tensorflow soft dependency unavailable.", +) +@pytest.mark.parametrize( + "kernel_size, should_raise", + [ + ([3, 1, 2], False), + ([1, 3], True), + ([3, 1, 1, 3], True), + (3, False), + ], +) +def test_fcnnetwork_kernel_size(kernel_size, should_raise): + """Test FCNNetwork with valid and invalid kernel_size configurations.""" + input_shape = (100, 5) + if should_raise: + with pytest.raises(ValueError): + model = FCNNetwork(kernel_size=kernel_size, n_layers=3) + model.build_network(input_shape) + else: + model = FCNNetwork(kernel_size=kernel_size, n_layers=3) + input_layer, output_layer = model.build_network(input_shape) + + assert hasattr(input_layer, "shape") + assert hasattr(output_layer, "shape") + + +@pytest.mark.skipif( + not _check_soft_dependencies(["tensorflow"], severity="none"), + reason="Tensorflow soft dependency unavailable.", +) +@pytest.mark.parametrize( + "dilation_rate, should_raise", + [ + ([1, 2, 1], False), + ([1, 4], True), + ([1, 2, 4, 1], True), + (1, False), + ], +) +def test_fcnnetwork_dilation_rate(dilation_rate, should_raise): + """Test FCNNetwork with valid and invalid dilation_rate configurations.""" + input_shape = (100, 5) + if should_raise: + with pytest.raises(ValueError): + model = FCNNetwork(dilation_rate=dilation_rate, n_layers=3) + model.build_network(input_shape) + else: + model = FCNNetwork(dilation_rate=dilation_rate, n_layers=3) + input_layer, output_layer = model.build_network(input_shape) + + assert hasattr(input_layer, "shape") + assert hasattr(output_layer, "shape") + + +@pytest.mark.skipif( + not _check_soft_dependencies(["tensorflow"], severity="none"), + reason="Tensorflow soft dependency unavailable.", +) +@pytest.mark.parametrize( + "strides, should_raise", + [ + ([1, 2, 3], False), + ([1, 1], True), + ([1, 2, 2, 1], True), + (1, False), + ], +) +def test_fcnnetwork_strides(strides, should_raise): + """Test FCNNetwork with valid and invalid strides configurations.""" + input_shape = (100, 5) + if should_raise: + with pytest.raises(ValueError): + model = FCNNetwork(strides=strides, n_layers=3) + model.build_network(input_shape) + else: + model = FCNNetwork(strides=strides, n_layers=3) + input_layer, output_layer = model.build_network(input_shape) + + assert hasattr(input_layer, "shape") + assert hasattr(output_layer, "shape") + + +@pytest.mark.skipif( + not _check_soft_dependencies(["tensorflow"], severity="none"), + reason="Tensorflow soft dependency unavailable.", +) +@pytest.mark.parametrize( + "padding, should_raise", + [ + (["same", "same", "valid"], False), + (["valid", "same"], True), + (["same", "valid", "same", "valid"], True), + ("same", False), + ("valid", False), + ], +) +def test_fcnnetwork_padding(padding, should_raise): + """Test FCNNetwork with valid and invalid padding configurations.""" + input_shape = (100, 5) + if should_raise: + with pytest.raises(ValueError): + model = FCNNetwork(padding=padding, n_layers=3) + model.build_network(input_shape) + else: + model = FCNNetwork(padding=padding, n_layers=3) + input_layer, output_layer = model.build_network(input_shape) + + assert hasattr(input_layer, "shape") + assert hasattr(output_layer, "shape") + + +@pytest.mark.skipif( + not _check_soft_dependencies(["tensorflow"], severity="none"), + reason="Tensorflow soft dependency unavailable.", +) +@pytest.mark.parametrize( + "n_filters, should_raise", + [ + ([32, 64, 128], False), # Valid case with a list of filters + ([32, 64], True), # Invalid case with fewer filters than layers + ([32, 64, 128, 256], True), # Invalid case with more filters than layers + (32, False), # Valid case with a single filter value + ], +) +def test_fcnnetwork_n_filters(n_filters, should_raise): + """Test FCNNetwork with valid and invalid n_filters configurations.""" + input_shape = (100, 5) + if should_raise: + with pytest.raises(ValueError): + model = FCNNetwork(n_filters=n_filters, n_layers=3) + model.build_network(input_shape) + else: + model = FCNNetwork(n_filters=n_filters, n_layers=3) + input_layer, output_layer = model.build_network(input_shape) + + assert hasattr(input_layer, "shape") + assert hasattr(output_layer, "shape") diff --git a/aeon/networks/tests/test_mlp.py b/aeon/networks/tests/test_mlp.py new file mode 100644 index 0000000000..421a4f2841 --- /dev/null +++ b/aeon/networks/tests/test_mlp.py @@ -0,0 +1,179 @@ +"""Tests for the MLPNetwork Model.""" + +import pytest + +from aeon.networks import MLPNetwork +from aeon.utils.validation._dependencies import _check_soft_dependencies + + +@pytest.mark.skipif( + not _check_soft_dependencies(["tensorflow"], severity="none"), + reason="Tensorflow soft dependency unavailable.", +) +@pytest.mark.parametrize( + "n_layers, n_units, activation", + [ + (3, 500, "relu"), + (5, [256, 128, 128, 64, 32], "sigmoid"), + (2, 128, ["tanh", "relu"]), + ], +) +def test_mlp_initialization(n_layers, n_units, activation): + """Test whether MLPNetwork initializes correctly with different configurations.""" + from tensorflow.keras.layers import Dense, Dropout, Flatten, InputLayer + from tensorflow.keras.models import Model + + mlp = MLPNetwork(n_layers=n_layers, n_units=n_units, activation=activation) + input_layer, output_layer = mlp.build_network((1000, 5)) + + # Wrap in a Model to access internal layers + model = Model(inputs=input_layer, outputs=output_layer) + layers = model.layers + + assert isinstance(layers[0], InputLayer), "Expected first layer to be InputLayer" + + assert isinstance(layers[1], Flatten), "Expected second layer to be Flatten" + + # Check dropout and dense layers ordering + for i in range(n_layers): + dropout_layer = layers[2 + 2 * i] # Dropout before Dense + dense_layer = layers[3 + 2 * i] # Dense comes after Dropout + + assert isinstance( + dropout_layer, Dropout + ), f"Expected Dropout at index {2 + 2 * i}" + assert isinstance(dense_layer, Dense), f"Expected Dense at index {3 + 2 * i}" + + # Assert activation function + expected_activation = ( + activation[i] if isinstance(activation, list) else activation + ) + assert dense_layer.activation.__name__ == expected_activation, ( + f"Expected activation {expected_activation}, " + f"got {dense_layer.activation.__name__}" + ) + + # Assert number of units + expected_units = n_units[i] if isinstance(n_units, list) else n_units + assert ( + dense_layer.units == expected_units + ), f"Expected {expected_units} units, got {dense_layer.units}" + + # Check last layer is Dropout + assert isinstance(layers[-1], Dropout), "Expected final layer to be Dropout" + + # Assert model parameters (Just for show) + assert mlp.n_layers == n_layers + assert mlp.n_units == n_units + assert mlp.activation == activation + + +@pytest.mark.skipif( + not _check_soft_dependencies(["tensorflow"], severity="none"), + reason="Tensorflow soft dependency unavailable.", +) +@pytest.mark.parametrize( + "dropout_rate, n_layers", + [ + (0.2, 3), + ([0.1, 0.2, 0.3], 3), + pytest.param([0.1, 0.2], 3, marks=pytest.mark.xfail(raises=AssertionError)), + ], +) +def test_mlp_dropout_rate(dropout_rate, n_layers): + """Test MLPNetwork dropout_rate configurations.""" + from tensorflow.keras.layers import Dense, Dropout, Flatten, InputLayer + from tensorflow.keras.models import Model + + mlp = MLPNetwork(n_layers=n_layers, dropout_rate=dropout_rate) + input_layer, output_layer = mlp.build_network((1000, 5)) + + # Wrap in a Model to access internal layers + model = Model(inputs=input_layer, outputs=output_layer) + layers = model.layers + + # Check first two layers + assert isinstance(layers[0], InputLayer), "Expected first layer to be InputLayer" + assert isinstance(layers[1], Flatten), "Expected second layer to be Flatten" + + # Check dropout and dense layers ordering + for i in range(n_layers): + dropout_layer = layers[2 + 2 * i] + dense_layer = layers[3 + 2 * i] + + assert isinstance( + dropout_layer, Dropout + ), f"Expected Dropout at index {2 + 2 * i}" + assert isinstance(dense_layer, Dense), f"Expected Dense at index {3 + 2 * i}" + + # Assert dropout rates match expected values + expected_dropout = ( + dropout_rate[i] if isinstance(dropout_rate, list) else dropout_rate + ) + assert ( + dropout_layer.rate == expected_dropout + ), f"Expected {expected_dropout},got {dropout_layer.rate}" + assert isinstance(layers[-1], Dropout), "Expected final layer to be Dropout" + + +@pytest.mark.skipif( + not _check_soft_dependencies(["tensorflow"], severity="none"), + reason="Tensorflow soft dependency unavailable.", +) +@pytest.mark.parametrize( + "dropout_last", + [0.3, 0.5, pytest.param(1.2, marks=pytest.mark.xfail(raises=AssertionError))], +) +def test_mlp_dropout_last(dropout_last): + """Test MLPNetwork dropout_last configurations.""" + from tensorflow.keras.layers import Dropout, Flatten, InputLayer + from tensorflow.keras.models import Model + + mlp = MLPNetwork(dropout_last=dropout_last) + input_layer, output_layer = mlp.build_network((1000, 5)) + + # Wrap in a Model to access internal layers + model = Model(inputs=input_layer, outputs=output_layer) + layers = model.layers + + assert isinstance(layers[0], InputLayer), "Expected first layer to be InputLayer" + assert isinstance(layers[1], Flatten), "Expected second layer to be Flatten" + assert isinstance(layers[-1], Dropout), "Expected final layer to be Dropout" + + assert ( + layers[-1].rate == dropout_last + ), f"Expected {dropout_last}, got {layers[-1].rate}" + + +@pytest.mark.skipif( + not _check_soft_dependencies(["tensorflow"], severity="none"), + reason="Tensorflow soft dependency unavailable.", +) +@pytest.mark.parametrize("use_bias", [True, False]) +def test_mlp_use_bias(use_bias): + """Test MLPNetwork use_bias configurations.""" + from tensorflow.keras.layers import Dense, Dropout, Flatten, InputLayer + from tensorflow.keras.models import Model + + mlp = MLPNetwork(use_bias=use_bias) + input_layer, output_layer = mlp.build_network((1000, 5)) + + # Wrap in a Model to access internal layers + model = Model(inputs=input_layer, outputs=output_layer) + layers = model.layers + + assert isinstance(layers[0], InputLayer), "Expected first layer to be InputLayer" + assert isinstance(layers[1], Flatten), "Expected second layer to be Flatten" + assert isinstance(layers[-1], Dropout), "Expected final layer to be Dropout" + + # Find the last Dense layer before the final Dropout layer + last_dense_layer = next( + (layer for layer in reversed(layers) if isinstance(layer, Dense)), None + ) + + assert last_dense_layer is not None, "No Dense layer found before final Dropout" + assert isinstance(last_dense_layer, Dense), "Expected last layer to be Dense" + + assert ( + last_dense_layer.use_bias == use_bias + ), f"Expected use_bias {use_bias}, got {last_dense_layer.use_bias}" diff --git a/aeon/transformations/series/__init__.py b/aeon/transformations/series/__init__.py index 031073b2e6..8b71ba9fc8 100644 --- a/aeon/transformations/series/__init__.py +++ b/aeon/transformations/series/__init__.py @@ -6,8 +6,10 @@ "ClaSPTransformer", "DFTSeriesTransformer", "Dobin", + "ExpSmoothingSeriesTransformer", "GaussSeriesTransformer", "MatrixProfileSeriesTransformer", + "MovingAverageSeriesTransformer", "PLASeriesTransformer", "SGSeriesTransformer", "StatsModelsACF", @@ -31,8 +33,10 @@ from aeon.transformations.series._clasp import ClaSPTransformer from aeon.transformations.series._dft import DFTSeriesTransformer from aeon.transformations.series._dobin import Dobin +from aeon.transformations.series._exp_smoothing import ExpSmoothingSeriesTransformer from aeon.transformations.series._gauss import GaussSeriesTransformer from aeon.transformations.series._matrix_profile import MatrixProfileSeriesTransformer +from aeon.transformations.series._moving_average import MovingAverageSeriesTransformer from aeon.transformations.series._pca import PCASeriesTransformer from aeon.transformations.series._pla import PLASeriesTransformer from aeon.transformations.series._scaled_logit import ScaledLogitSeriesTransformer diff --git a/docs/api_reference/transformations.rst b/docs/api_reference/transformations.rst index fa3184af7b..2a56fd847f 100644 --- a/docs/api_reference/transformations.rst +++ b/docs/api_reference/transformations.rst @@ -165,8 +165,10 @@ Series transforms ClaSPTransformer DFTSeriesTransformer Dobin + ExpSmoothingSeriesTransformer GaussSeriesTransformer MatrixProfileSeriesTransformer + MovingAverageSeriesTransformer PLASeriesTransformer SGSeriesTransformer StatsModelsACF