diff --git a/.all-contributorsrc b/.all-contributorsrc
index 859fb1e5e8..10fb4bd7ed 100644
--- a/.all-contributorsrc
+++ b/.all-contributorsrc
@@ -2656,6 +2656,24 @@
"contributions": [
"doc"
]
+ },
+ {
+ "login": "shinymack",
+ "name": "Akash Kawle",
+ "avatar_url": "https://avatars.githubusercontent.com/u/128881349?v=4",
+ "profile": "https://github.com/shinymack",
+ "contributions": [
+ "code"
+ ]
+ },
+ {
+ "login": "kevinzb56",
+ "name": "Kevin Shah",
+ "avatar_url": "https://avatars.githubusercontent.com/u/161136814?v=4",
+ "profile": "https://github.com/kevinzb56",
+ "contributions": [
+ "doc"
+ ]
}
],
"commitType": "docs"
diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml
index 95435746d4..3c57528fc5 100644
--- a/.github/workflows/scorecard.yml
+++ b/.github/workflows/scorecard.yml
@@ -27,7 +27,7 @@ jobs:
persist-credentials: false
- name: Run analysis
- uses: ossf/scorecard-action@v2.4.0
+ uses: ossf/scorecard-action@v2.4.1
with:
results_file: results.sarif
results_format: sarif
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index aece71f7c1..4349512761 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -29,7 +29,7 @@ repos:
args: [ "--create", "--python-folders", "aeon" ]
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.9.5
+ rev: v0.9.7
hooks:
- id: ruff
args: [ "--fix"]
@@ -48,7 +48,7 @@ repos:
args: [ "--profile=black", "--multi-line=3" ]
- repo: https://github.com/pycqa/flake8
- rev: 7.1.1
+ rev: 7.1.2
hooks:
- id: flake8
additional_dependencies: [ flake8-bugbear, flake8-print, Flake8-pyproject ]
diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md
index 91c7468d42..1c9b7a4d12 100644
--- a/CONTRIBUTORS.md
+++ b/CONTRIBUTORS.md
@@ -1,7 +1,7 @@
# Contributors
-[](#contributors)
+[](#contributors)
This project follows the [all-contributors](https://github.com/all-contributors/all-contributors) specification. Contributions of any kind welcome!
@@ -28,12 +28,13 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
 Afzal Ansari π» π |
 Ahmed Bilal π |
 AidenRushbrooke π» β οΈ |
+  Akash Kawle π» |
 Akhil Jasson π |
 Akshat Nayak π» |
 Akshat Rampuria π |
-  Aleksandr Grekov π |
+  Aleksandr Grekov π |
 Alex Hawkins-Hooker π» |
 Alexandra Amidon π π π€ |
 Ali Ismail-Fawaz π» π π β οΈ π§ π π’ β
π§βπ« π‘ |
@@ -41,9 +42,9 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
 Ali Yazdizadeh π |
 Alwin π π» π§ |
 An Hoang π π» |
-  Andreas Kanz β
|
+  Andreas Kanz β
|
 AndrΓ© Guarnier De Mitri π» |
 Angus Dempster π» β οΈ β
|
 Antoine Guillaume π» π |
@@ -51,9 +52,9 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
 Aparna Sakshi π» |
 Arelo Tanoh π |
 Arepalli Yashwanth Reddy π» π π |
-  Arik Ermshaus π» |
+  Arik Ermshaus π» |
 Arnav π» |
 Aryan Pola π» π |
 Ayushmaan Seth π» π β οΈ π π β
|
@@ -61,9 +62,9 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
 Badr-Eddine Marani π» |
 Benedikt Heidrich π» |
 Benjamin Bluhm π» π π‘ |
-  Bhaskar Dhariyal π» β οΈ |
+  Bhaskar Dhariyal π» β οΈ |
 Binay Kumar π» π β οΈ |
 Bohan Zhang π» |
 Bouke Postma π» π π€ |
@@ -71,9 +72,9 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
 Carlos Borrajo π» π |
 Carlos Ramos CarreΓ±o π |
 Chang Wei Tan π» |
-  Cheuk Ting Ho π» |
+  Cheuk Ting Ho π» |
 Christian Kastner π» π |
 Christopher Dahlin π» |
 Christopher Lo π» π€ |
@@ -81,9 +82,9 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
 Ciaran Gilbert π π» π β οΈ π€ |
 ClaudiaSanches π» β οΈ |
 Corvin Paul π |
-  Cyril Meyer β οΈ π π» |
+  Cyril Meyer β οΈ π π» |
 Daniel Burkhardt Cerigo π» |
 Daniel L. π |
 Daniel MartΓn MartΓnez π π |
@@ -91,9 +92,9 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
 Daniele Carli π |
 Dave Hirschfeld π |
 David Buchaca Prats π» |
-  David Guijo-Rubio π» π€ |
+  David Guijo-Rubio π» π€ |
 Divya Tiwari π» π£ |
 Dmitriy Valetov π» β
|
 Doug Ollerenshaw π |
@@ -101,9 +102,9 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
 Dylan Sherry π |
 Emilia Rose π» β οΈ |
 Emmanuel Ferdman π |
-  Er Jie Yong π π» |
+  Er Jie Yong π π» |
 Evan Miller β
|
 Eyal Shafran π» |
 Federico Garza π» π‘ |
@@ -111,9 +112,9 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
 Ferdinand Rewicki π» π |
 Florian Stinner π» β οΈ |
 Francesco Spinnato π» |
-  Franz Kiraly π πΌ π» π π¨ π π‘ π΅ π π€ π§ π§βπ« π π¬ π π’ β οΈ β
πΉ |
+  Franz Kiraly π πΌ π» π π¨ π π‘ π΅ π π€ π§ π§βπ« π π¬ π π’ β οΈ β
πΉ |
 Freddy A Boulton π β οΈ |
 Futuer π |
 Gabriel Riegner π |
@@ -121,9 +122,9 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
 George Langley π |
 George Oastler π» β οΈ π¦ π‘ π |
 Gilberto Barbosa π» |
-  Grace Gao π» π |
+  Grace Gao π» π |
 Guilherme Arcencio π» β οΈ |
 Guzal Bulatova π π» π π§βπ« π π β οΈ |
 HYang1996 π» β οΈ π β
|
@@ -131,9 +132,9 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
 Hedeer El Showk π π π» |
 Huayi Wei β
|
 Ifeanyi30 π» |
-  Ilja Maurer π» |
+  Ilja Maurer π» |
 Ilyas Moutawwakil π» π |
 Ireoluwatomiwa π |
 Ishan Nangia π€ |
@@ -141,9 +142,9 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
 Jack Russon π» |
 James Large π» π β οΈ π π§ |
 James Morrill π» |
-  Jasmine Liaw π» |
+  Jasmine Liaw π» |
 Jason Lines π» πΌ π π¨ π π π€ π π¬ π π’ π‘ |
 Jason Mok π |
 Jason Pong π» β οΈ |
@@ -151,9 +152,9 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
 JonathanBechtel π» π€ β οΈ |
 Joren Hammudoglu π |
 Juan Orduz β
π |
-  Julian Cooper π» π€ |
+  Julian Cooper π» π€ |
 Juliana π» |
 Justin Shenk π |
 Kai Lion π» β οΈ π |
@@ -161,179 +162,180 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
 Kavya Rambhia π» |
 Kejsi Take π» |
 Kevin Lam π» π‘ β οΈ |
-  Kirstie Whitaker π€ π |
+  Kevin Shah π |
+  Kirstie Whitaker π€ π |
 Kishan Manani π» π β οΈ π π€ |
 Krum Arnaudov π π» |
 Kutay Koralturk π» π |
 Leonidas Tsaprounis π» π π§βπ« π |
 Lielle Ravid π» π |
 Logan Duffy π» π β οΈ π π€ |
-  Lorena Pantano π€ |
-  Lorenzo Toniazzi π» |
+  Lorena Pantano π€ |
+  Lorenzo Toniazzi π» |
 Lovkush π» β οΈ π€ π§βπ« π |
 Luca Bennett π» π β οΈ |
 Luis Ventura π» |
 Luis Zugasti π |
 Lukasz Mentel π» π π β οΈ π π§ π§βπ« |
 Marcelo Trylesinski π |
-  Marco Gorelli π |
-  Margaret Gorlin π» π‘ β οΈ |
+  Marco Gorelli π |
+  Margaret Gorlin π» π‘ β οΈ |
 Mariam Jabara π» |
 Marielle π π» π€ |
 Markus LΓΆning π» β οΈ π§ π¦ π π π‘ π β
πΌ π π¨ π π π€ π π¬ π’ π§βπ« πΉ |
 Martin Walter π» π π π π§βπ« π€ π¨ π π π’ |
 Martina G. Vilas π π€ |
 Matthew Middlehurst π π» π£ π π¨ π‘ π€ π π§ π§βπ« π£ π¬ π¬ π β οΈ β
π’ |
-  Max Patzelt π» |
-  Miao Cai π π» |
+  Max Patzelt π» |
+  Miao Cai π π» |
 Michael F. Mbouopda π» π π |
 Michael Feil π» β οΈ π€ |
 Michal Chromcak π» π β οΈ β
|
 Mirae Parker π» β οΈ |
 Mohammed Saif Kazamel π |
 Morad :) π» β οΈ π |
-  Multivin12 π» β οΈ |
-  MΓ‘rcio A. Freitas Jr π |
+  Multivin12 π» β οΈ |
+  MΓ‘rcio A. Freitas Jr π |
 Niek van der Laan π» |
 Nikhil Gupta π» π π |
 Nikola Shahpazov π |
 Nilesh Kumar π» |
 Nima Nooshiri π |
 Ninnart Fuengfusin π» |
-  Noa Ben Ami π» β οΈ π |
-  Oleksandr Shchur π π» |
+  Noa Ben Ami π» β οΈ π |
+  Oleksandr Shchur π π» |
 Oleksii Kachaiev π» β οΈ |
 Oliver Matthews π» |
 Patrick MΓΌller π» |
 Patrick Rockenschaub π» π¨ π€ β οΈ |
 Patrick SchΓ€fer π» β
|
 Paul π |
-  Paul Rabich π» |
-  Paul Yim π» π‘ β οΈ |
+  Paul Rabich π» |
+  Paul Yim π» π‘ β οΈ |
 Philip π |
 Philipp Kortmann π» π |
 Phillip Wenig π» |
 Piyush Gade π» π |
 Pulkit Verma π |
 Quaterion π |
-  Rafael AyllΓ³n-GavilΓ‘n π» |
-  Rakshitha Godahewa π» π |
+  Rafael AyllΓ³n-GavilΓ‘n π» |
+  Rakshitha Godahewa π» π |
 RavenRudi π» |
 Raya Chakravarty π |
 Rick van Hattem π |
 Rishabh Bali π» |
 Rishav Kumar Sinha π |
 Rishi Kumar Ray π |
-  Riya Elizabeth John π» β οΈ π |
-  Ronnie Llamado π |
+  Riya Elizabeth John π» β οΈ π |
+  Ronnie Llamado π |
 Ryan Kuhns π» π β
π‘ π€ π β οΈ |
 Sagar Mishra β οΈ |
 Sajaysurya Ganesh π» π π¨ π‘ π€ β οΈ β
|
 Saransh Chopra π π |
 Satya Prakash Pattnaik π |
 Saurabh Dasgupta π» |
-  Sebastiaan Koel π» π |
-  Sebastian Hagn π |
+  Sebastiaan Koel π» π |
+  Sebastian Hagn π |
 Sebastian Schmidl π π» π π¬ β οΈ π π£ |
 Sharathchenna π» |
 Shivansh Subramanian π π» |
 Solomon Botchway π§ |
 Stanislav Khrapov π» |
 Stijn Rotman π» |
-  Svea Marie Meyer π π» |
-  Sylvain Combettes π» π |
+  Svea Marie Meyer π π» |
+  Sylvain Combettes π» π |
 TNTran92 π» |
 Taiwo Owoseni π» |
 Thach Le Nguyen π» β οΈ |
 TheMathcompay Widget Factory Team π |
 Thomas Buckley-Houston π |
 Tom Xu π» π |
-  Tomasz Chodakowski π» π π |
-  Tony Bagnall π» πΌ π π¨ π π π€ π π¬ π π’ π£ |
+  Tomasz Chodakowski π» π π |
+  Tony Bagnall π» πΌ π π¨ π π π€ π π¬ π π’ π£ |
 Tvisha Vedant π» |
 Utkarsh Kumar π» π |
 Utsav Kumar Tiwari π» π |
 Vedant π |
 Viktor Dremov π» |
 ViktorKaz π» π π¨ |
-  Vyomkesh Vyas π» π π‘ β οΈ |
-  Wayne Adams π |
+  Vyomkesh Vyas π» π π‘ β οΈ |
+  Wayne Adams π |
 William Templier π |
 William Zeng π |
 William Zheng π» β οΈ |
 Yair Beer π» |
 Yash Lamba π» |
 Yi-Xuan Xu π» β οΈ π§ π |
-  Ziyao Wei π» |
-  aa25desh π» π |
+  Ziyao Wei π» |
+  aa25desh π» π |
 abandus π€ π» |
 adoherty21 π |
 alexbanwell1 π» π¨ π |
 bethrice44 π π» π β οΈ |
 big-o π» β οΈ π¨ π€ π β
π§βπ« |
 bobbys π» |
-  brett koonce π |
-  btrtts π |
+  brett koonce π |
+  btrtts π |
 chizzi25 π |
 chrisholder π» β οΈ π π¨ π‘ π |
 danbartl π π» π π’ β οΈ β
πΉ |
 hamzahiqb π |
 hiqbal2 π |
 jesellier π» |
-  jschemm π» |
-  julu98 π |
+  jschemm π» |
+  julu98 π |
 kkoziara π» π |
 matteogales π» π¨ π€ |
 neuron283 π» |
 nileenagp π» |
 oleskiewicz π» π β οΈ |
 pabworks π» β οΈ |
-  patiently pending world peace π» |
-  raishubham1 π |
+  patiently pending world peace π» |
+  raishubham1 π |
 simone-pignotti π» π |
 sophijka π π§ |
 sri1419 π» |
 tensorflow-as-tf π» |
 vNtzYy π |
 ved pawar π |
-  vedazeren π» β οΈ |
-  vincent-nich12 π» |
+  vedazeren π» β οΈ |
+  vincent-nich12 π» |
 vollmersj π |
 xiaobenbenecho π» |
 xiaopu222 π |
diff --git a/aeon/base/_base_collection.py b/aeon/base/_base_collection.py
index ea3b21ed32..4d7f4b4564 100644
--- a/aeon/base/_base_collection.py
+++ b/aeon/base/_base_collection.py
@@ -1,4 +1,24 @@
-"""Base class for estimators that fit collections of time series."""
+"""
+Base class for estimators that fit collections of time series.
+
+ class name: BaseCollectionEstimator
+
+Defining methods:
+ preprocessing - _preprocess_collection(self, X, store_metadata=True)
+ input checking - _check_X(self, X)
+ input conversion - _convert_X(self, X)
+ shape checking - _check_shape(self, X)
+
+Inherited inspection methods:
+ hyper-parameter inspection - get_params()
+ fitted parameter inspection - get_fitted_params()
+
+State:
+ fitted model/strategy - by convention, any attributes ending in "_"
+ fitted state flag - is_fitted (property)
+ fitted state inspection - check_is_fitted()
+
+"""
from abc import abstractmethod
diff --git a/aeon/classification/deep_learning/base.py b/aeon/classification/deep_learning/base.py
index 2ed56bc0bc..61ddeb3a72 100644
--- a/aeon/classification/deep_learning/base.py
+++ b/aeon/classification/deep_learning/base.py
@@ -1,6 +1,23 @@
"""
Abstract base class for the Keras neural network classifiers.
+ class name: BaseDeepClassifier
+
+Defining methods:
+ fitting - fit(self, X, y)
+ predicting - predict(self, X)
+ - predict_proba(self, X)
+ model building - build_model(self, input_shape, n_classes) (abstract method)
+
+Inherited inspection methods:
+ hyper-parameter inspection - get_params()
+ fitted parameter inspection - get_fitted_params()
+
+State:
+ fitted model/strategy - by convention, any attributes ending in "_"
+ fitted state flag - is_fitted (property)
+ fitted state inspection - check_is_fitted()
+
The reason for this class between BaseClassifier and deep_learning classifiers is
because we can generalise tags, _predict and _predict_proba
"""
diff --git a/aeon/classification/feature_based/_catch22.py b/aeon/classification/feature_based/_catch22.py
index bfad28dd44..bec5deff88 100644
--- a/aeon/classification/feature_based/_catch22.py
+++ b/aeon/classification/feature_based/_catch22.py
@@ -67,6 +67,17 @@ class Catch22Classifier(BaseClassifier):
if None a 'prefer' value of "threads" is used by default.
Valid options are "loky", "multiprocessing", "threading" or a custom backend.
See the joblib Parallel documentation for more details.
+ class_weight{βbalancedβ, βbalanced_subsampleβ}, dict or list of dicts, default=None
+ From sklearn documentation:
+ If not given, all classes are supposed to have weight one.
+ The βbalancedβ mode uses the values of y to automatically adjust weights
+ inversely proportional to class frequencies in the input data as
+ n_samples / (n_classes * np.bincount(y))
+ The βbalanced_subsampleβ mode is the same as βbalancedβ except that weights
+ are computed based on the bootstrap sample for every tree grown.
+ For multi-output, the weights of each column of y will be multiplied.
+ Note that these weights will be multiplied with sample_weight (passed through
+ the fit method) if sample_weight is specified.
Attributes
----------
@@ -132,6 +143,7 @@ def __init__(
random_state=None,
n_jobs=1,
parallel_backend=None,
+ class_weight=None,
):
self.features = features
self.catch24 = catch24
@@ -142,6 +154,7 @@ def __init__(
self.random_state = random_state
self.n_jobs = n_jobs
self.parallel_backend = parallel_backend
+ self.class_weight = class_weight
super().__init__()
@@ -175,7 +188,7 @@ def _fit(self, X, y):
self.estimator_ = _clone_estimator(
(
- RandomForestClassifier(n_estimators=200)
+ RandomForestClassifier(n_estimators=200, class_weight=self.class_weight)
if self.estimator is None
else self.estimator
),
diff --git a/aeon/classification/feature_based/_signature_classifier.py b/aeon/classification/feature_based/_signature_classifier.py
index 88308436f5..a3f659efcf 100644
--- a/aeon/classification/feature_based/_signature_classifier.py
+++ b/aeon/classification/feature_based/_signature_classifier.py
@@ -61,6 +61,17 @@ class SignatureClassifier(BaseClassifier):
Signature truncation depth.
random_state : int, default=None
If `int`, random_state is the seed used by the random number generator;
+ class_weight{βbalancedβ, βbalanced_subsampleβ}, dict or list of dicts, default=None
+ From sklearn documentation:
+ If not given, all classes are supposed to have weight one.
+ The βbalancedβ mode uses the values of y to automatically adjust weights
+ inversely proportional to class frequencies in the input data as
+ n_samples / (n_classes * np.bincount(y))
+ The βbalanced_subsampleβ mode is the same as βbalancedβ except that weights
+ are computed based on the bootstrap sample for every tree grown.
+ For multi-output, the weights of each column of y will be multiplied.
+ Note that these weights will be multiplied with sample_weight (passed through
+ the fit method) if sample_weight is specified.
Attributes
----------
@@ -105,6 +116,7 @@ def __init__(
sig_tfm="signature",
depth=4,
random_state=None,
+ class_weight=None,
):
self.estimator = estimator
self.augmentation_list = augmentation_list
@@ -116,7 +128,7 @@ def __init__(
self.sig_tfm = sig_tfm
self.depth = depth
self.random_state = random_state
-
+ self.class_weight = class_weight
super().__init__()
self.signature_method = SignatureTransformer(
@@ -135,7 +147,9 @@ def _setup_classification_pipeline(self):
"""Set up the full signature method pipeline."""
# Use rf if no classifier is set
if self.estimator is None:
- classifier = RandomForestClassifier(random_state=self.random_state)
+ classifier = RandomForestClassifier(
+ random_state=self.random_state, class_weight=self.class_weight
+ )
else:
classifier = _clone_estimator(self.estimator, self.random_state)
diff --git a/aeon/classification/feature_based/_summary.py b/aeon/classification/feature_based/_summary.py
index a4f34ff688..b6e0056392 100644
--- a/aeon/classification/feature_based/_summary.py
+++ b/aeon/classification/feature_based/_summary.py
@@ -43,6 +43,17 @@ class SummaryClassifier(BaseClassifier):
If `RandomState` instance, random_state is the random number generator;
If `None`, the random number generator is the `RandomState` instance used
by `np.random`.
+ class_weight{βbalancedβ, βbalanced_subsampleβ}, dict or list of dicts, default=None
+ From sklearn documentation:
+ If not given, all classes are supposed to have weight one.
+ The βbalancedβ mode uses the values of y to automatically adjust weights
+ inversely proportional to class frequencies in the input data as
+ n_samples / (n_classes * np.bincount(y))
+ The βbalanced_subsampleβ mode is the same as βbalancedβ except that weights
+ are computed based on the bootstrap sample for every tree grown.
+ For multi-output, the weights of each column of y will be multiplied.
+ Note that these weights will be multiplied with sample_weight (passed through
+ the fit method) if sample_weight is specified.
Attributes
----------
@@ -85,6 +96,7 @@ def __init__(
estimator=None,
n_jobs=1,
random_state=None,
+ class_weight=None,
):
self.summary_stats = summary_stats
self.estimator = estimator
@@ -92,6 +104,8 @@ def __init__(
self.n_jobs = n_jobs
self.random_state = random_state
+ self.class_weight = class_weight
+
super().__init__()
def _fit(self, X, y):
@@ -120,7 +134,7 @@ def _fit(self, X, y):
self.estimator_ = _clone_estimator(
(
- RandomForestClassifier(n_estimators=200)
+ RandomForestClassifier(n_estimators=200, class_weight=self.class_weight)
if self.estimator is None
else self.estimator
),
diff --git a/aeon/classification/feature_based/_tsfresh.py b/aeon/classification/feature_based/_tsfresh.py
index 28dc2dac11..00021da5d8 100644
--- a/aeon/classification/feature_based/_tsfresh.py
+++ b/aeon/classification/feature_based/_tsfresh.py
@@ -46,6 +46,17 @@ class TSFreshClassifier(BaseClassifier):
If `RandomState` instance, random_state is the random number generator;
If `None`, the random number generator is the `RandomState` instance used
by `np.random`.
+ class_weight{βbalancedβ, βbalanced_subsampleβ}, dict or list of dicts, default=None
+ From sklearn documentation:
+ If not given, all classes are supposed to have weight one.
+ The βbalancedβ mode uses the values of y to automatically adjust weights
+ inversely proportional to class frequencies in the input data as
+ n_samples / (n_classes * np.bincount(y))
+ The βbalanced_subsampleβ mode is the same as βbalancedβ except that weights
+ are computed based on the bootstrap sample for every tree grown.
+ For multi-output, the weights of each column of y will be multiplied.
+ Note that these weights will be multiplied with sample_weight (passed through
+ the fit method) if sample_weight is specified.
Attributes
----------
@@ -86,6 +97,7 @@ def __init__(
n_jobs=1,
chunksize=None,
random_state=None,
+ class_weight=None,
):
self.default_fc_parameters = default_fc_parameters
self.relevant_feature_extractor = relevant_feature_extractor
@@ -99,6 +111,7 @@ def __init__(
self._transformer = None
self._return_majority_class = False
self._majority_class = 0
+ self.class_weight = class_weight
super().__init__()
@@ -137,7 +150,7 @@ def _fit(self, X, y):
)
self.estimator_ = _clone_estimator(
(
- RandomForestClassifier(n_estimators=200)
+ RandomForestClassifier(n_estimators=200, class_weight=self.class_weight)
if self.estimator is None
else self.estimator
),
diff --git a/aeon/classification/feature_based/tests/test_catch22.py b/aeon/classification/feature_based/tests/test_catch22.py
index 5a709fe4ea..c8067ee57b 100644
--- a/aeon/classification/feature_based/tests/test_catch22.py
+++ b/aeon/classification/feature_based/tests/test_catch22.py
@@ -1,6 +1,7 @@
"""Test catch 22 classifier."""
import numpy as np
+import pytest
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import RidgeClassifier
@@ -19,3 +20,21 @@ def test_catch22():
c22.fit(X, y)
p = c22.predict_proba(X)
assert np.all(np.isin(p, [0, 1]))
+
+
+@pytest.mark.parametrize("class_weight", ["balanced", "balanced_subsample"])
+def test_catch22_classifier_with_class_weight(class_weight):
+ """Test catch22 classifier with class weight."""
+ X, y = make_example_3d_numpy(
+ n_cases=10, n_channels=1, n_timepoints=12, return_y=True, random_state=0
+ )
+ clf = Catch22Classifier(
+ estimator=RandomForestClassifier(n_estimators=5),
+ outlier_norm=True,
+ random_state=0,
+ class_weight=class_weight,
+ )
+ clf.fit(X, y)
+ predictions = clf.predict(X)
+ assert len(predictions) == len(y)
+ assert set(predictions).issubset(set(y))
diff --git a/aeon/classification/feature_based/tests/test_signature.py b/aeon/classification/feature_based/tests/test_signature.py
index b5c29df2d3..2d3d2972d0 100644
--- a/aeon/classification/feature_based/tests/test_signature.py
+++ b/aeon/classification/feature_based/tests/test_signature.py
@@ -18,3 +18,24 @@ def test_signature_classifier():
cls = SignatureClassifier(estimator=None)
cls._fit(X, y)
assert isinstance(cls.pipeline.named_steps["classifier"], RandomForestClassifier)
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies("esig", severity="none"),
+ reason="skip test if required soft dependency esig not available",
+)
+@pytest.mark.parametrize("class_weight", ["balanced", "balanced_subsample"])
+def test_signature_classifier_with_class_weight(class_weight):
+ """Test signature classifier with class weight."""
+ X, y = make_example_3d_numpy(
+ n_cases=10, n_channels=1, n_timepoints=12, return_y=True, random_state=0
+ )
+ clf = SignatureClassifier(
+ estimator=RandomForestClassifier(n_estimators=5),
+ random_state=0,
+ class_weight=class_weight,
+ )
+ clf.fit(X, y)
+ predictions = clf.predict(X)
+ assert len(predictions) == len(y)
+ assert set(predictions).issubset(set(y))
diff --git a/aeon/classification/feature_based/tests/test_summary.py b/aeon/classification/feature_based/tests/test_summary.py
index de698e61cc..0f53130ce0 100644
--- a/aeon/classification/feature_based/tests/test_summary.py
+++ b/aeon/classification/feature_based/tests/test_summary.py
@@ -1,6 +1,7 @@
"""Test summary classifier."""
import numpy as np
+import pytest
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import RidgeClassifier
@@ -19,3 +20,20 @@ def test_summary_classifier():
cls.fit(X, y)
p = cls.predict_proba(X)
assert np.all(np.isin(p, [0, 1]))
+
+
+@pytest.mark.parametrize("class_weight", ["balanced", "balanced_subsample"])
+def test_summary_classifier_with_class_weight(class_weight):
+ """Test summary classifier with class weight."""
+ X, y = make_example_3d_numpy(
+ n_cases=10, n_channels=1, n_timepoints=12, return_y=True, random_state=0
+ )
+ clf = SummaryClassifier(
+ estimator=RandomForestClassifier(n_estimators=5),
+ random_state=0,
+ class_weight=class_weight,
+ )
+ clf.fit(X, y)
+ predictions = clf.predict(X)
+ assert len(predictions) == len(y)
+ assert set(predictions).issubset(set(y))
diff --git a/aeon/classification/feature_based/tests/test_tsfresh.py b/aeon/classification/feature_based/tests/test_tsfresh.py
index 3ab965e6a3..92583e6662 100644
--- a/aeon/classification/feature_based/tests/test_tsfresh.py
+++ b/aeon/classification/feature_based/tests/test_tsfresh.py
@@ -37,3 +37,24 @@ def test_tsfresh_classifier():
assert cls._majority_class in [0, 1]
cls.verbose = 1
cls.fit(X, y)
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies("tsfresh", severity="none"),
+ reason="skip test if required soft dependency tsfresh not available",
+)
+@pytest.mark.parametrize("class_weight", ["balanced", "balanced_subsample"])
+def test_tsfresh_classifier_with_class_weight(class_weight):
+ """Test tsfresh classifier with class weight."""
+ X, y = make_example_3d_numpy(
+ n_cases=10, n_channels=1, n_timepoints=12, return_y=True, random_state=0
+ )
+ clf = TSFreshClassifier(
+ estimator=RandomForestClassifier(n_estimators=5),
+ random_state=0,
+ class_weight=class_weight,
+ )
+ clf.fit(X, y)
+ predictions = clf.predict(X)
+ assert len(predictions) == len(y)
+ assert set(predictions).issubset(set(y))
diff --git a/aeon/clustering/deep_learning/_ae_fcn.py b/aeon/clustering/deep_learning/_ae_fcn.py
index a37a7d40a1..48c35f3dab 100644
--- a/aeon/clustering/deep_learning/_ae_fcn.py
+++ b/aeon/clustering/deep_learning/_ae_fcn.py
@@ -317,6 +317,7 @@ def _fit(self, X):
outputs=X,
batch_size=mini_batch_size,
epochs=self.n_epochs,
+ verbose=self.verbose,
)
try:
@@ -345,6 +346,7 @@ def _fit_multi_rec_model(
outputs,
batch_size,
epochs,
+ verbose,
):
import tensorflow as tf
@@ -451,9 +453,10 @@ def loss(y_true, y_pred):
epoch_loss /= num_batches
history["loss"].append(epoch_loss)
- sys.stdout.write(
- "Training loss at epoch %d: %.4f\n" % (epoch, float(epoch_loss))
- )
+ if verbose:
+ sys.stdout.write(
+ "Training loss at epoch %d: %.4f\n" % (epoch, float(epoch_loss))
+ )
for callback in self.callbacks_:
callback.on_epoch_end(epoch, {"loss": float(epoch_loss)})
diff --git a/aeon/clustering/deep_learning/_ae_resnet.py b/aeon/clustering/deep_learning/_ae_resnet.py
index 868e47d846..bd38deb4c6 100644
--- a/aeon/clustering/deep_learning/_ae_resnet.py
+++ b/aeon/clustering/deep_learning/_ae_resnet.py
@@ -329,6 +329,7 @@ def _fit(self, X):
outputs=X,
batch_size=mini_batch_size,
epochs=self.n_epochs,
+ verbose=self.verbose,
)
try:
@@ -359,6 +360,7 @@ def _fit_multi_rec_model(
outputs,
batch_size,
epochs,
+ verbose,
):
import tensorflow as tf
@@ -463,9 +465,10 @@ def loss(y_true, y_pred):
epoch_loss /= num_batches
history["loss"].append(epoch_loss)
- sys.stdout.write(
- "Training loss at epoch %d: %.4f\n" % (epoch, float(epoch_loss))
- )
+ if verbose:
+ sys.stdout.write(
+ "Training loss at epoch %d: %.4f\n" % (epoch, float(epoch_loss))
+ )
for callback in self.callbacks_:
callback.on_epoch_end(epoch, {"loss": float(epoch_loss)})
diff --git a/aeon/networks/tests/test_ae_fcn.py b/aeon/networks/tests/test_ae_fcn.py
new file mode 100644
index 0000000000..6f19820d02
--- /dev/null
+++ b/aeon/networks/tests/test_ae_fcn.py
@@ -0,0 +1,288 @@
+"""Test for the AEFCNNetwork class."""
+
+import pytest
+
+from aeon.networks import AEFCNNetwork
+from aeon.utils.validation._dependencies import _check_soft_dependencies
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="Tensorflow soft dependency unavailable.",
+)
+def test_aefcn_default():
+ """Default testing for aefcn."""
+ model = AEFCNNetwork()
+ assert model.latent_space_dim == 128
+ assert model.temporal_latent_space is False
+ assert model.n_layers == 3
+ assert model.n_filters is None
+ assert model.kernel_size is None
+ assert model.activation == "relu"
+ assert model.padding == "same"
+ assert model.strides == 1
+ assert model.dilation_rate == 1
+ assert model.use_bias is True
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="Tensorflow soft dependency unavailable.",
+)
+@pytest.mark.parametrize("latent_space_dim", [64, 128, 256])
+def test_aefcn_latent_space(latent_space_dim):
+ """Test AEFCNNetwork with different latent space dimensions."""
+ import tensorflow as tf
+
+ aefcn = AEFCNNetwork(latent_space_dim=latent_space_dim)
+ encoder, decoder = aefcn.build_network((1000, 5))
+ assert isinstance(encoder, tf.keras.models.Model)
+ assert isinstance(decoder, tf.keras.models.Model)
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="Tensorflow soft dependency unavailable.",
+)
+@pytest.mark.parametrize(
+ "kernel_size, should_raise",
+ [
+ ([8, 5, 3], False),
+ (3, False),
+ ([5, 5], True),
+ ([3, 3, 3, 3], True),
+ ],
+)
+def test_aefcnnetwork_kernel_size(kernel_size, should_raise):
+ """Test AEFCNNetwork with different kernel sizes."""
+ import tensorflow as tf
+
+ if should_raise:
+ with pytest.raises(
+ ValueError,
+ match="Number of kernels .* should be the same as number of layers",
+ ):
+ AEFCNNetwork(kernel_size=kernel_size, n_layers=3).build_network((1000, 5))
+ else:
+ aefcn = AEFCNNetwork(kernel_size=kernel_size, n_layers=3)
+ encoder, decoder = aefcn.build_network((1000, 5))
+ assert isinstance(encoder, tf.keras.models.Model)
+ assert isinstance(decoder, tf.keras.models.Model)
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="Tensorflow soft dependency unavailable.",
+)
+@pytest.mark.parametrize(
+ "n_filters, should_raise",
+ [
+ ([128, 256, 128], False),
+ (32, False),
+ ([32, 64], True),
+ ([16, 32, 64, 128], True),
+ ],
+)
+def test_aefcnnetwork_n_filters(n_filters, should_raise):
+ """Test AEFCNNetwork with different number of filters."""
+ import tensorflow as tf
+
+ if should_raise:
+ with pytest.raises(
+ ValueError,
+ match="Number of filters .* should be the same as number of layers",
+ ):
+ AEFCNNetwork(n_filters=n_filters, n_layers=3).build_network((1000, 5))
+ else:
+ aefcn = AEFCNNetwork(n_filters=n_filters, n_layers=3)
+ encoder, decoder = aefcn.build_network((1000, 5))
+ assert isinstance(encoder, tf.keras.models.Model)
+ assert isinstance(decoder, tf.keras.models.Model)
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="Tensorflow soft dependency unavailable.",
+)
+@pytest.mark.parametrize(
+ "dilation_rate, should_raise",
+ [
+ ([1, 2, 1], False),
+ (2, False),
+ ([1, 2], True),
+ ([1, 2, 2, 1], True),
+ ],
+)
+def test_aefcnnetwork_dilation_rate(dilation_rate, should_raise):
+ """Test AEFCNNetwork with different dilation rates."""
+ import tensorflow as tf
+
+ if should_raise:
+ with pytest.raises(
+ ValueError,
+ match="Number of dilations .* should be the same as number of layers",
+ ):
+ AEFCNNetwork(dilation_rate=dilation_rate, n_layers=3).build_network(
+ (1000, 5)
+ )
+ else:
+ aefcn = AEFCNNetwork(dilation_rate=dilation_rate, n_layers=3)
+ encoder, decoder = aefcn.build_network((1000, 5))
+ assert isinstance(encoder, tf.keras.models.Model)
+ assert isinstance(decoder, tf.keras.models.Model)
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="Tensorflow soft dependency unavailable.",
+)
+@pytest.mark.parametrize(
+ "strides, should_raise",
+ [
+ ([1, 2, 1], False),
+ (2, False),
+ ([1, 2], True),
+ ([1, 2, 2, 1], True),
+ ],
+)
+def test_aefcnnetwork_strides(strides, should_raise):
+ """Test AEFCNNetwork with different strides."""
+ import tensorflow as tf
+
+ if should_raise:
+ with pytest.raises(
+ ValueError,
+ match="Number of strides .* should be the same as number of layers",
+ ):
+ AEFCNNetwork(strides=strides, n_layers=3).build_network((1000, 5))
+ else:
+ aefcn = AEFCNNetwork(strides=strides, n_layers=3)
+ encoder, decoder = aefcn.build_network((1000, 5))
+ assert isinstance(encoder, tf.keras.models.Model)
+ assert isinstance(decoder, tf.keras.models.Model)
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="Tensorflow soft dependency unavailable.",
+)
+@pytest.mark.parametrize(
+ "padding, should_raise",
+ [
+ (["same", "valid", "same"], False),
+ ("same", False),
+ (["same", "valid"], True),
+ (
+ ["same", "valid", "same", "valid"],
+ True,
+ ),
+ ],
+)
+def test_aefcnnetwork_padding(padding, should_raise):
+ """Test AEFCNNetwork with different paddings."""
+ import tensorflow as tf
+
+ if should_raise:
+ with pytest.raises(
+ ValueError,
+ match="Number of paddings .* should be the same as number of layers",
+ ):
+ AEFCNNetwork(padding=padding, n_layers=3).build_network((1000, 5))
+ else:
+ aefcn = AEFCNNetwork(padding=padding, n_layers=3)
+ encoder, decoder = aefcn.build_network((1000, 5))
+ assert isinstance(encoder, tf.keras.models.Model)
+ assert isinstance(decoder, tf.keras.models.Model)
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="Tensorflow soft dependency unavailable.",
+)
+@pytest.mark.parametrize(
+ "activation, should_raise",
+ [
+ (["relu", "sigmoid", "tanh"], False),
+ ("sigmoid", False),
+ (["relu", "sigmoid"], True),
+ (
+ ["relu", "sigmoid", "tanh", "softmax"],
+ True,
+ ),
+ ],
+)
+def test_aefcnnetwork_activation(activation, should_raise):
+ """Test AEFCNNetwork with different activations."""
+ import tensorflow as tf
+
+ if should_raise:
+ with pytest.raises(
+ ValueError,
+ match="Number of activations .* should be the same as number of layers",
+ ):
+ AEFCNNetwork(activation=activation, n_layers=3).build_network((1000, 5))
+ else:
+ aefcn = AEFCNNetwork(activation=activation, n_layers=3)
+ encoder, decoder = aefcn.build_network((1000, 5))
+ assert isinstance(encoder, tf.keras.models.Model)
+ assert isinstance(decoder, tf.keras.models.Model)
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="Tensorflow soft dependency unavailable.",
+)
+@pytest.mark.parametrize(
+ "use_bias, should_raise",
+ [
+ ([True, False, True], False),
+ (True, False),
+ ([True, False], True),
+ ([True, False, True, False], True),
+ ],
+)
+def test_aefcnnetwork_use_bias(use_bias, should_raise):
+ """Test AEFCNNetwork with different use_bias values."""
+ import tensorflow as tf
+
+ if should_raise:
+ with pytest.raises(
+ ValueError,
+ match="Number of biases .* should be the same as number of layers",
+ ):
+ AEFCNNetwork(use_bias=use_bias, n_layers=3).build_network((1000, 5))
+ else:
+ aefcn = AEFCNNetwork(use_bias=use_bias, n_layers=3)
+ encoder, decoder = aefcn.build_network((1000, 5))
+ assert isinstance(encoder, tf.keras.models.Model)
+ assert isinstance(decoder, tf.keras.models.Model)
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="Tensorflow soft dependency unavailable.",
+)
+@pytest.mark.parametrize("temporal_latent_space", [True, False])
+def test_aefcnnetwork_temporal_latent_space(temporal_latent_space):
+ """Test for temporal latent space."""
+ import tensorflow as tf
+
+ input_shape = (1000, 5)
+
+ aefcn = AEFCNNetwork(
+ latent_space_dim=128, temporal_latent_space=temporal_latent_space
+ )
+
+ encoder, decoder = aefcn.build_network(input_shape)
+
+ assert isinstance(encoder, tf.keras.models.Model)
+ assert isinstance(decoder, tf.keras.models.Model)
+
+ if temporal_latent_space:
+ assert any(
+ isinstance(layer, tf.keras.layers.Conv1D) for layer in encoder.layers
+ ), "Expected Conv1D layer in encoder but not found."
+ else:
+ assert any(
+ isinstance(layer, tf.keras.layers.Dense) for layer in decoder.layers
+ ), "Expected Dense layer in decoder but not found."
diff --git a/aeon/networks/tests/test_cnn.py b/aeon/networks/tests/test_cnn.py
deleted file mode 100644
index c859397b34..0000000000
--- a/aeon/networks/tests/test_cnn.py
+++ /dev/null
@@ -1,22 +0,0 @@
-"""Tests for the CNN Model."""
-
-import pytest
-
-from aeon.networks import TimeCNNNetwork
-from aeon.utils.validation._dependencies import _check_soft_dependencies
-
-__maintainer__ = []
-
-
-@pytest.mark.skipif(
- not _check_soft_dependencies(["tensorflow"], severity="none"),
- reason="Tensorflow soft dependency unavailable.",
-)
-def test_cnn_input_shape_padding():
- """Test of CNN network with input_shape < 60."""
- input_shape = (40, 2)
- network = TimeCNNNetwork()
- input_layer, output_layer = network.build_network(input_shape=input_shape)
-
- assert input_layer is not None
- assert output_layer is not None
diff --git a/aeon/networks/tests/test_fcn.py b/aeon/networks/tests/test_fcn.py
new file mode 100644
index 0000000000..60c1cd42f5
--- /dev/null
+++ b/aeon/networks/tests/test_fcn.py
@@ -0,0 +1,196 @@
+"""Test for the FCNNetwork class."""
+
+import pytest
+
+from aeon.networks import FCNNetwork
+from aeon.utils.validation._dependencies import _check_soft_dependencies
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="Tensorflow soft dependency unavailable.",
+)
+def test_fcnnetwork_valid():
+ """Test FCNNetwork with valid configurations."""
+ input_shape = (100, 5)
+ model = FCNNetwork(n_layers=3)
+ input_layer, output_layer = model.build_network(input_shape)
+
+ assert hasattr(input_layer, "shape")
+ assert hasattr(output_layer, "shape")
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="Tensorflow soft dependency unavailable.",
+)
+@pytest.mark.parametrize(
+ "activation, should_raise",
+ [
+ (["relu", "sigmoid", "tanh"], False),
+ (["relu", "sigmoid"], True),
+ (
+ ["relu", "sigmoid", "tanh", "softmax"],
+ True,
+ ),
+ ("relu", False),
+ ("sigmoid", False),
+ ("tanh", False),
+ ("softmax", False),
+ ],
+)
+def test_fcnnetwork_activation(activation, should_raise):
+ """Test FCNNetwork with valid and invalid activation configurations."""
+ input_shape = (100, 5)
+ if should_raise:
+ with pytest.raises(ValueError):
+ model = FCNNetwork(activation=activation)
+ model.build_network(input_shape)
+ else:
+ model = FCNNetwork(activation=activation)
+ input_layer, output_layer = model.build_network(input_shape)
+
+ assert hasattr(input_layer, "shape")
+
+ assert hasattr(output_layer, "shape")
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="Tensorflow soft dependency unavailable.",
+)
+@pytest.mark.parametrize(
+ "kernel_size, should_raise",
+ [
+ ([3, 1, 2], False),
+ ([1, 3], True),
+ ([3, 1, 1, 3], True),
+ (3, False),
+ ],
+)
+def test_fcnnetwork_kernel_size(kernel_size, should_raise):
+ """Test FCNNetwork with valid and invalid kernel_size configurations."""
+ input_shape = (100, 5)
+ if should_raise:
+ with pytest.raises(ValueError):
+ model = FCNNetwork(kernel_size=kernel_size, n_layers=3)
+ model.build_network(input_shape)
+ else:
+ model = FCNNetwork(kernel_size=kernel_size, n_layers=3)
+ input_layer, output_layer = model.build_network(input_shape)
+
+ assert hasattr(input_layer, "shape")
+ assert hasattr(output_layer, "shape")
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="Tensorflow soft dependency unavailable.",
+)
+@pytest.mark.parametrize(
+ "dilation_rate, should_raise",
+ [
+ ([1, 2, 1], False),
+ ([1, 4], True),
+ ([1, 2, 4, 1], True),
+ (1, False),
+ ],
+)
+def test_fcnnetwork_dilation_rate(dilation_rate, should_raise):
+ """Test FCNNetwork with valid and invalid dilation_rate configurations."""
+ input_shape = (100, 5)
+ if should_raise:
+ with pytest.raises(ValueError):
+ model = FCNNetwork(dilation_rate=dilation_rate, n_layers=3)
+ model.build_network(input_shape)
+ else:
+ model = FCNNetwork(dilation_rate=dilation_rate, n_layers=3)
+ input_layer, output_layer = model.build_network(input_shape)
+
+ assert hasattr(input_layer, "shape")
+ assert hasattr(output_layer, "shape")
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="Tensorflow soft dependency unavailable.",
+)
+@pytest.mark.parametrize(
+ "strides, should_raise",
+ [
+ ([1, 2, 3], False),
+ ([1, 1], True),
+ ([1, 2, 2, 1], True),
+ (1, False),
+ ],
+)
+def test_fcnnetwork_strides(strides, should_raise):
+ """Test FCNNetwork with valid and invalid strides configurations."""
+ input_shape = (100, 5)
+ if should_raise:
+ with pytest.raises(ValueError):
+ model = FCNNetwork(strides=strides, n_layers=3)
+ model.build_network(input_shape)
+ else:
+ model = FCNNetwork(strides=strides, n_layers=3)
+ input_layer, output_layer = model.build_network(input_shape)
+
+ assert hasattr(input_layer, "shape")
+ assert hasattr(output_layer, "shape")
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="Tensorflow soft dependency unavailable.",
+)
+@pytest.mark.parametrize(
+ "padding, should_raise",
+ [
+ (["same", "same", "valid"], False),
+ (["valid", "same"], True),
+ (["same", "valid", "same", "valid"], True),
+ ("same", False),
+ ("valid", False),
+ ],
+)
+def test_fcnnetwork_padding(padding, should_raise):
+ """Test FCNNetwork with valid and invalid padding configurations."""
+ input_shape = (100, 5)
+ if should_raise:
+ with pytest.raises(ValueError):
+ model = FCNNetwork(padding=padding, n_layers=3)
+ model.build_network(input_shape)
+ else:
+ model = FCNNetwork(padding=padding, n_layers=3)
+ input_layer, output_layer = model.build_network(input_shape)
+
+ assert hasattr(input_layer, "shape")
+ assert hasattr(output_layer, "shape")
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="Tensorflow soft dependency unavailable.",
+)
+@pytest.mark.parametrize(
+ "n_filters, should_raise",
+ [
+ ([32, 64, 128], False), # Valid case with a list of filters
+ ([32, 64], True), # Invalid case with fewer filters than layers
+ ([32, 64, 128, 256], True), # Invalid case with more filters than layers
+ (32, False), # Valid case with a single filter value
+ ],
+)
+def test_fcnnetwork_n_filters(n_filters, should_raise):
+ """Test FCNNetwork with valid and invalid n_filters configurations."""
+ input_shape = (100, 5)
+ if should_raise:
+ with pytest.raises(ValueError):
+ model = FCNNetwork(n_filters=n_filters, n_layers=3)
+ model.build_network(input_shape)
+ else:
+ model = FCNNetwork(n_filters=n_filters, n_layers=3)
+ input_layer, output_layer = model.build_network(input_shape)
+
+ assert hasattr(input_layer, "shape")
+ assert hasattr(output_layer, "shape")
diff --git a/aeon/networks/tests/test_mlp.py b/aeon/networks/tests/test_mlp.py
new file mode 100644
index 0000000000..421a4f2841
--- /dev/null
+++ b/aeon/networks/tests/test_mlp.py
@@ -0,0 +1,179 @@
+"""Tests for the MLPNetwork Model."""
+
+import pytest
+
+from aeon.networks import MLPNetwork
+from aeon.utils.validation._dependencies import _check_soft_dependencies
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="Tensorflow soft dependency unavailable.",
+)
+@pytest.mark.parametrize(
+ "n_layers, n_units, activation",
+ [
+ (3, 500, "relu"),
+ (5, [256, 128, 128, 64, 32], "sigmoid"),
+ (2, 128, ["tanh", "relu"]),
+ ],
+)
+def test_mlp_initialization(n_layers, n_units, activation):
+ """Test whether MLPNetwork initializes correctly with different configurations."""
+ from tensorflow.keras.layers import Dense, Dropout, Flatten, InputLayer
+ from tensorflow.keras.models import Model
+
+ mlp = MLPNetwork(n_layers=n_layers, n_units=n_units, activation=activation)
+ input_layer, output_layer = mlp.build_network((1000, 5))
+
+ # Wrap in a Model to access internal layers
+ model = Model(inputs=input_layer, outputs=output_layer)
+ layers = model.layers
+
+ assert isinstance(layers[0], InputLayer), "Expected first layer to be InputLayer"
+
+ assert isinstance(layers[1], Flatten), "Expected second layer to be Flatten"
+
+ # Check dropout and dense layers ordering
+ for i in range(n_layers):
+ dropout_layer = layers[2 + 2 * i] # Dropout before Dense
+ dense_layer = layers[3 + 2 * i] # Dense comes after Dropout
+
+ assert isinstance(
+ dropout_layer, Dropout
+ ), f"Expected Dropout at index {2 + 2 * i}"
+ assert isinstance(dense_layer, Dense), f"Expected Dense at index {3 + 2 * i}"
+
+ # Assert activation function
+ expected_activation = (
+ activation[i] if isinstance(activation, list) else activation
+ )
+ assert dense_layer.activation.__name__ == expected_activation, (
+ f"Expected activation {expected_activation}, "
+ f"got {dense_layer.activation.__name__}"
+ )
+
+ # Assert number of units
+ expected_units = n_units[i] if isinstance(n_units, list) else n_units
+ assert (
+ dense_layer.units == expected_units
+ ), f"Expected {expected_units} units, got {dense_layer.units}"
+
+ # Check last layer is Dropout
+ assert isinstance(layers[-1], Dropout), "Expected final layer to be Dropout"
+
+ # Assert model parameters (Just for show)
+ assert mlp.n_layers == n_layers
+ assert mlp.n_units == n_units
+ assert mlp.activation == activation
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="Tensorflow soft dependency unavailable.",
+)
+@pytest.mark.parametrize(
+ "dropout_rate, n_layers",
+ [
+ (0.2, 3),
+ ([0.1, 0.2, 0.3], 3),
+ pytest.param([0.1, 0.2], 3, marks=pytest.mark.xfail(raises=AssertionError)),
+ ],
+)
+def test_mlp_dropout_rate(dropout_rate, n_layers):
+ """Test MLPNetwork dropout_rate configurations."""
+ from tensorflow.keras.layers import Dense, Dropout, Flatten, InputLayer
+ from tensorflow.keras.models import Model
+
+ mlp = MLPNetwork(n_layers=n_layers, dropout_rate=dropout_rate)
+ input_layer, output_layer = mlp.build_network((1000, 5))
+
+ # Wrap in a Model to access internal layers
+ model = Model(inputs=input_layer, outputs=output_layer)
+ layers = model.layers
+
+ # Check first two layers
+ assert isinstance(layers[0], InputLayer), "Expected first layer to be InputLayer"
+ assert isinstance(layers[1], Flatten), "Expected second layer to be Flatten"
+
+ # Check dropout and dense layers ordering
+ for i in range(n_layers):
+ dropout_layer = layers[2 + 2 * i]
+ dense_layer = layers[3 + 2 * i]
+
+ assert isinstance(
+ dropout_layer, Dropout
+ ), f"Expected Dropout at index {2 + 2 * i}"
+ assert isinstance(dense_layer, Dense), f"Expected Dense at index {3 + 2 * i}"
+
+ # Assert dropout rates match expected values
+ expected_dropout = (
+ dropout_rate[i] if isinstance(dropout_rate, list) else dropout_rate
+ )
+ assert (
+ dropout_layer.rate == expected_dropout
+ ), f"Expected {expected_dropout},got {dropout_layer.rate}"
+ assert isinstance(layers[-1], Dropout), "Expected final layer to be Dropout"
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="Tensorflow soft dependency unavailable.",
+)
+@pytest.mark.parametrize(
+ "dropout_last",
+ [0.3, 0.5, pytest.param(1.2, marks=pytest.mark.xfail(raises=AssertionError))],
+)
+def test_mlp_dropout_last(dropout_last):
+ """Test MLPNetwork dropout_last configurations."""
+ from tensorflow.keras.layers import Dropout, Flatten, InputLayer
+ from tensorflow.keras.models import Model
+
+ mlp = MLPNetwork(dropout_last=dropout_last)
+ input_layer, output_layer = mlp.build_network((1000, 5))
+
+ # Wrap in a Model to access internal layers
+ model = Model(inputs=input_layer, outputs=output_layer)
+ layers = model.layers
+
+ assert isinstance(layers[0], InputLayer), "Expected first layer to be InputLayer"
+ assert isinstance(layers[1], Flatten), "Expected second layer to be Flatten"
+ assert isinstance(layers[-1], Dropout), "Expected final layer to be Dropout"
+
+ assert (
+ layers[-1].rate == dropout_last
+ ), f"Expected {dropout_last}, got {layers[-1].rate}"
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="Tensorflow soft dependency unavailable.",
+)
+@pytest.mark.parametrize("use_bias", [True, False])
+def test_mlp_use_bias(use_bias):
+ """Test MLPNetwork use_bias configurations."""
+ from tensorflow.keras.layers import Dense, Dropout, Flatten, InputLayer
+ from tensorflow.keras.models import Model
+
+ mlp = MLPNetwork(use_bias=use_bias)
+ input_layer, output_layer = mlp.build_network((1000, 5))
+
+ # Wrap in a Model to access internal layers
+ model = Model(inputs=input_layer, outputs=output_layer)
+ layers = model.layers
+
+ assert isinstance(layers[0], InputLayer), "Expected first layer to be InputLayer"
+ assert isinstance(layers[1], Flatten), "Expected second layer to be Flatten"
+ assert isinstance(layers[-1], Dropout), "Expected final layer to be Dropout"
+
+ # Find the last Dense layer before the final Dropout layer
+ last_dense_layer = next(
+ (layer for layer in reversed(layers) if isinstance(layer, Dense)), None
+ )
+
+ assert last_dense_layer is not None, "No Dense layer found before final Dropout"
+ assert isinstance(last_dense_layer, Dense), "Expected last layer to be Dense"
+
+ assert (
+ last_dense_layer.use_bias == use_bias
+ ), f"Expected use_bias {use_bias}, got {last_dense_layer.use_bias}"
diff --git a/aeon/networks/tests/test_resnet.py b/aeon/networks/tests/test_resnet.py
new file mode 100644
index 0000000000..4d5c58c5b9
--- /dev/null
+++ b/aeon/networks/tests/test_resnet.py
@@ -0,0 +1,109 @@
+"""Tests for the ResNet Model."""
+
+import pytest
+
+from aeon.networks import ResNetNetwork
+from aeon.utils.validation._dependencies import _check_soft_dependencies
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="skip test if required soft dependency not available",
+)
+def test_resnet_default_initialization():
+ """Test if the network initializes with proper attributes."""
+ model = ResNetNetwork()
+ assert isinstance(
+ model, ResNetNetwork
+ ), "Model initialization failed: Incorrect type"
+ assert model.n_residual_blocks == 3, "Default residual blocks count mismatch"
+ assert (
+ model.n_conv_per_residual_block == 3
+ ), "Default convolution blocks count mismatch"
+ assert model.n_filters is None, "Default n_filters should be None"
+ assert model.kernel_size is None, "Default kernel_size should be None"
+ assert model.strides == 1, "Default strides value mismatch"
+ assert model.dilation_rate == 1, "Default dilation rate mismatch"
+ assert model.activation == "relu", "Default activation mismatch"
+ assert model.use_bias is True, "Default use_bias mismatch"
+ assert model.padding == "same", "Default padding mismatch"
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="skip test if required soft dependency not available",
+)
+def test_resnet_custom_initialization():
+ """Test whether custom kwargs are correctly set."""
+ model = ResNetNetwork(
+ n_residual_blocks=3,
+ n_conv_per_residual_block=3,
+ n_filters=[64, 128, 128],
+ kernel_size=[8, 5, 3],
+ activation="relu",
+ strides=1,
+ padding="same",
+ )
+ model.build_network((128, 1))
+ assert isinstance(
+ model, ResNetNetwork
+ ), "Custom initialization failed: Incorrect type"
+ assert model._n_filters == [64, 128, 128], "n_filters list mismatch"
+ assert model._kernel_size == [8, 5, 3], "kernel_size list mismatch"
+ assert model._activation == ["relu", "relu", "relu"], "activation list mismatch"
+ assert model._strides == [1, 1, 1], "strides list mismatch"
+ assert model._padding == ["same", "same", "same"], "padding list mismatch"
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="skip test if required soft dependency not available",
+)
+def test_resnet_invalid_initialization():
+ """Test if the network raises valid exceptions for invalid configurations."""
+ with pytest.raises(ValueError, match=".*same as number of residual blocks.*"):
+ ResNetNetwork(n_filters=[64, 128], n_residual_blocks=3).build_network((128, 1))
+
+ with pytest.raises(ValueError, match=".*same as number of convolution layers.*"):
+ ResNetNetwork(kernel_size=[8, 5], n_conv_per_residual_block=3).build_network(
+ (128, 1)
+ )
+
+ with pytest.raises(ValueError, match=".*same as number of convolution layers.*"):
+ ResNetNetwork(strides=[1, 2], n_conv_per_residual_block=3).build_network(
+ (128, 1)
+ )
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="skip test if required soft dependency not available",
+)
+def test_resnet_build_network():
+ """Test network building with various input shapes."""
+ model = ResNetNetwork()
+
+ input_shapes = [(128, 1), (256, 3), (512, 1)]
+ for shape in input_shapes:
+ input_layer, output_layer = model.build_network(shape)
+ assert hasattr(input_layer, "shape"), "Input layer type mismatch"
+ assert hasattr(output_layer, "shape"), "Output layer type mismatch"
+ assert input_layer.shape[1:] == shape, "Input shape mismatch"
+ assert output_layer.shape[-1] == 128, "Output layer mismatch"
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="skip test if required soft dependency not available",
+)
+def test_resnet_shortcut_layer():
+ """Test the shortcut layer functionality."""
+ model = ResNetNetwork()
+
+ input_shape = (128, 64)
+ input_layer, output_layer = model.build_network(input_shape)
+
+ shortcut = model._shortcut_layer(input_layer, output_layer)
+
+ assert hasattr(shortcut, "shape"), "Shortcut layer output type mismatch"
+ assert shortcut.shape[-1] == 128, "Shortcut output shape mismatch"
diff --git a/aeon/networks/tests/test_time_cnn.py b/aeon/networks/tests/test_time_cnn.py
new file mode 100644
index 0000000000..3f31f1db10
--- /dev/null
+++ b/aeon/networks/tests/test_time_cnn.py
@@ -0,0 +1,274 @@
+"""Tests for the TimeCNNNetwork Model."""
+
+import pytest
+
+from aeon.networks import TimeCNNNetwork
+from aeon.utils.validation._dependencies import _check_soft_dependencies
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="Tensorflow soft dependency unavailable.",
+)
+def test_time_cnn_input_shape_padding():
+ """Test of CNN network with input_shape < 60."""
+ input_shape = (40, 2)
+ network = TimeCNNNetwork()
+ input_layer, output_layer = network.build_network(input_shape=input_shape)
+
+ assert hasattr(input_layer, "shape")
+ assert hasattr(output_layer, "shape")
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="Tensorflow soft dependency unavailable.",
+)
+@pytest.mark.parametrize(
+ "activation, n_layers, should_raise",
+ [
+ ("relu", 2, False),
+ ("sigmoid", 2, False),
+ ("tanh", 2, False),
+ (["relu", "sigmoid", "tanh"], 2, True),
+ (["relu"], 2, True),
+ ],
+)
+def test_time_cnn_activation(activation, n_layers, should_raise):
+ """Test activation configuration handling."""
+ input_shape = (100, 5)
+ if should_raise:
+ with pytest.raises(ValueError):
+ network = TimeCNNNetwork(activation=activation, n_layers=n_layers)
+ network.build_network(input_shape=input_shape)
+ else:
+ network = TimeCNNNetwork(activation=activation, n_layers=n_layers)
+ input_layer, output_layer = network.build_network(input_shape=input_shape)
+
+ assert hasattr(input_layer, "shape")
+ assert hasattr(output_layer, "shape")
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="Tensorflow soft dependency unavailable.",
+)
+@pytest.mark.parametrize(
+ "kernel_size, n_layers, should_raise",
+ [
+ (7, 2, False),
+ ([5, 3], 2, False),
+ ([5, 3, 2], 2, True),
+ ([5], 2, True),
+ ],
+)
+def test_time_cnn_kernel_size(kernel_size, n_layers, should_raise):
+ """Test kernel size configuration with different layer counts."""
+ input_shape = (100, 5)
+ if should_raise:
+ with pytest.raises(ValueError):
+ network = TimeCNNNetwork(n_layers=n_layers, kernel_size=kernel_size)
+ network.build_network(input_shape=input_shape)
+ else:
+ network = TimeCNNNetwork(n_layers=n_layers, kernel_size=kernel_size)
+ input_layer, output_layer = network.build_network(input_shape=input_shape)
+
+ assert hasattr(input_layer, "shape")
+ assert hasattr(output_layer, "shape")
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="Tensorflow soft dependency unavailable.",
+)
+@pytest.mark.parametrize(
+ "n_layers,n_filters,should_raise",
+ [
+ (2, [8, 16], False),
+ (1, [12, 10, 4], True),
+ (2, 8, False),
+ (3, [8], True),
+ ],
+)
+def test_time_cnn_n_filters(n_layers, n_filters, should_raise):
+ """Test filter configuration handling."""
+ input_shape = (100, 5)
+ if should_raise:
+ with pytest.raises(ValueError):
+ network = TimeCNNNetwork(n_layers=n_layers, n_filters=n_filters)
+ network.build_network(input_shape=input_shape)
+ else:
+ network = TimeCNNNetwork(n_layers=n_layers, n_filters=n_filters)
+ input_layer, output_layer = network.build_network(input_shape=input_shape)
+
+ assert hasattr(input_layer, "shape")
+ assert hasattr(output_layer, "shape")
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="Tensorflow soft dependency unavailable.",
+)
+@pytest.mark.parametrize(
+ "avg_pool_size, n_layers, should_raise",
+ [
+ (3, 2, False),
+ ([2, 3], 2, False),
+ ([2, 3, 4], 2, True),
+ ([2], 2, True),
+ ],
+)
+def test_time_cnn_avg_pool_size(avg_pool_size, n_layers, should_raise):
+ """Test average pool size configuration."""
+ input_shape = (100, 5)
+ if should_raise:
+ with pytest.raises(ValueError):
+ network = TimeCNNNetwork(avg_pool_size=avg_pool_size, n_layers=n_layers)
+ network.build_network(input_shape=input_shape)
+ else:
+ network = TimeCNNNetwork(avg_pool_size=avg_pool_size, n_layers=n_layers)
+ input_layer, output_layer = network.build_network(input_shape=input_shape)
+
+ assert hasattr(input_layer, "shape")
+ assert hasattr(output_layer, "shape")
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="Tensorflow soft dependency unavailable.",
+)
+@pytest.mark.parametrize(
+ "strides_pooling, n_layers, should_raise",
+ [
+ (None, 2, False),
+ (2, 2, False),
+ ([2, 3], 2, False),
+ ([2, 3, 4], 2, True),
+ ([2], 2, True),
+ ],
+)
+def test_time_cnn_strides_pooling(strides_pooling, n_layers, should_raise):
+ """Test strides pooling configuration."""
+ input_shape = (100, 5)
+ if should_raise:
+ with pytest.raises(ValueError):
+ network = TimeCNNNetwork(strides_pooling=strides_pooling, n_layers=n_layers)
+ network.build_network(input_shape=input_shape)
+ else:
+ network = TimeCNNNetwork(strides_pooling=strides_pooling, n_layers=n_layers)
+ input_layer, output_layer = network.build_network(input_shape=input_shape)
+
+ assert hasattr(input_layer, "shape")
+ assert hasattr(output_layer, "shape")
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="Tensorflow soft dependency unavailable.",
+)
+@pytest.mark.parametrize(
+ "padding, n_layers, should_raise",
+ [
+ ("valid", 2, False),
+ ("same", 2, False),
+ (["same", "valid"], 2, False),
+ (["same", "valid", "same"], 2, True),
+ (["same"], 2, True),
+ ],
+)
+def test_time_cnn_padding(padding, n_layers, should_raise):
+ """Test padding override behavior for different inputs."""
+ input_shape = (100, 5)
+ if should_raise:
+ with pytest.raises(ValueError):
+ network = TimeCNNNetwork(padding=padding, n_layers=n_layers)
+ network.build_network(input_shape=input_shape)
+ else:
+ network = TimeCNNNetwork(padding=padding, n_layers=n_layers)
+ input_layer, output_layer = network.build_network(input_shape=input_shape)
+ assert hasattr(input_layer, "shape")
+ assert hasattr(output_layer, "shape")
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="Tensorflow soft dependency unavailable.",
+)
+@pytest.mark.parametrize(
+ "dilation, n_layers, should_raise",
+ [
+ (2, 2, False),
+ ([1, 2], 2, False),
+ ([1, 2, 3], 2, True),
+ ([1], 2, True),
+ ],
+)
+def test_time_cnn_dilation_rate(dilation, n_layers, should_raise):
+ """Test dilation rate configuration."""
+ input_shape = (100, 5)
+ if should_raise:
+ with pytest.raises(ValueError):
+ network = TimeCNNNetwork(dilation_rate=dilation, n_layers=n_layers)
+ network.build_network(input_shape=input_shape)
+ else:
+ network = TimeCNNNetwork(dilation_rate=dilation, n_layers=n_layers)
+ input_layer, output_layer = network.build_network(input_shape=input_shape)
+
+ assert hasattr(input_layer, "shape")
+ assert hasattr(output_layer, "shape")
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="Tensorflow soft dependency unavailable.",
+)
+@pytest.mark.parametrize(
+ "strides, n_layers, should_raise",
+ [
+ (1, 2, False),
+ ([1, 2], 2, False),
+ ([1, 2, 3], 2, True),
+ ([1], 2, True),
+ ],
+)
+def test_time_cnn_strides(strides, n_layers, should_raise):
+ """Test strides configuration."""
+ input_shape = (100, 5)
+ if should_raise:
+ with pytest.raises(ValueError):
+ network = TimeCNNNetwork(strides=strides, n_layers=n_layers)
+ network.build_network(input_shape=input_shape)
+ else:
+ network = TimeCNNNetwork(strides=strides, n_layers=n_layers)
+ input_layer, output_layer = network.build_network(input_shape=input_shape)
+
+ assert hasattr(input_layer, "shape")
+ assert hasattr(output_layer, "shape")
+
+
+@pytest.mark.skipif(
+ not _check_soft_dependencies(["tensorflow"], severity="none"),
+ reason="Tensorflow soft dependency unavailable.",
+)
+@pytest.mark.parametrize(
+ "use_bias, n_layers, should_raise",
+ [
+ (True, 2, False),
+ ([True, False], 2, False),
+ ([True, False, True], 2, True),
+ ([True], 2, True),
+ ],
+)
+def test_time_cnn_use_bias(use_bias, n_layers, should_raise):
+ """Test bias usage configuration."""
+ input_shape = (100, 5)
+ if should_raise:
+ with pytest.raises(ValueError):
+ network = TimeCNNNetwork(use_bias=use_bias, n_layers=n_layers)
+ network.build_network(input_shape=input_shape)
+ else:
+ network = TimeCNNNetwork(use_bias=use_bias, n_layers=n_layers)
+ input_layer, output_layer = network.build_network(input_shape=input_shape)
+
+ assert hasattr(input_layer, "shape")
+ assert hasattr(output_layer, "shape")
diff --git a/aeon/testing/estimator_checking/_yield_clustering_checks.py b/aeon/testing/estimator_checking/_yield_clustering_checks.py
index 5205316f94..4e3940c489 100644
--- a/aeon/testing/estimator_checking/_yield_clustering_checks.py
+++ b/aeon/testing/estimator_checking/_yield_clustering_checks.py
@@ -77,18 +77,33 @@ def check_clustering_random_state_deep_learning(estimator, datatype):
deep_clr1 = _clone_estimator(estimator, random_state=random_state)
deep_clr1.fit(FULL_TEST_DATA_DICT[datatype]["train"][0])
- layers1 = deep_clr1.training_model_.layers[1:]
+ encoder_layers1 = deep_clr1.training_model_.layers[1].layers[1:]
+ decoder_layers1 = deep_clr1.training_model_.layers[2].layers[1:]
deep_clr2 = _clone_estimator(estimator, random_state=random_state)
deep_clr2.fit(FULL_TEST_DATA_DICT[datatype]["train"][0])
- layers2 = deep_clr2.training_model_.layers[1:]
+ encoder_layers2 = deep_clr2.training_model_.layers[1].layers[1:]
+ decoder_layers2 = deep_clr2.training_model_.layers[2].layers[1:]
- assert len(layers1) == len(layers2)
+ assert len(encoder_layers1) == len(encoder_layers2)
+ assert len(decoder_layers1) == len(decoder_layers2)
- for i in range(len(layers1)):
- weights1 = layers1[i].get_weights()
- weights2 = layers2[i].get_weights()
+ for i in range(len(encoder_layers1)):
+ weights1 = encoder_layers1[i].get_weights()
+ weights2 = encoder_layers2[i].get_weights()
+
+ assert len(weights1) == len(weights2)
+
+ for j in range(len(weights1)):
+ _weight1 = np.asarray(weights1[j])
+ _weight2 = np.asarray(weights2[j])
+
+ np.testing.assert_almost_equal(_weight1, _weight2, 4)
+
+ for i in range(len(decoder_layers1)):
+ weights1 = decoder_layers1[i].get_weights()
+ weights2 = decoder_layers2[i].get_weights()
assert len(weights1) == len(weights2)
diff --git a/aeon/transformations/series/__init__.py b/aeon/transformations/series/__init__.py
index 031073b2e6..8b71ba9fc8 100644
--- a/aeon/transformations/series/__init__.py
+++ b/aeon/transformations/series/__init__.py
@@ -6,8 +6,10 @@
"ClaSPTransformer",
"DFTSeriesTransformer",
"Dobin",
+ "ExpSmoothingSeriesTransformer",
"GaussSeriesTransformer",
"MatrixProfileSeriesTransformer",
+ "MovingAverageSeriesTransformer",
"PLASeriesTransformer",
"SGSeriesTransformer",
"StatsModelsACF",
@@ -31,8 +33,10 @@
from aeon.transformations.series._clasp import ClaSPTransformer
from aeon.transformations.series._dft import DFTSeriesTransformer
from aeon.transformations.series._dobin import Dobin
+from aeon.transformations.series._exp_smoothing import ExpSmoothingSeriesTransformer
from aeon.transformations.series._gauss import GaussSeriesTransformer
from aeon.transformations.series._matrix_profile import MatrixProfileSeriesTransformer
+from aeon.transformations.series._moving_average import MovingAverageSeriesTransformer
from aeon.transformations.series._pca import PCASeriesTransformer
from aeon.transformations.series._pla import PLASeriesTransformer
from aeon.transformations.series._scaled_logit import ScaledLogitSeriesTransformer
diff --git a/docs/api_reference/transformations.rst b/docs/api_reference/transformations.rst
index fa3184af7b..2a56fd847f 100644
--- a/docs/api_reference/transformations.rst
+++ b/docs/api_reference/transformations.rst
@@ -165,8 +165,10 @@ Series transforms
ClaSPTransformer
DFTSeriesTransformer
Dobin
+ ExpSmoothingSeriesTransformer
GaussSeriesTransformer
MatrixProfileSeriesTransformer
+ MovingAverageSeriesTransformer
PLASeriesTransformer
SGSeriesTransformer
StatsModelsACF