Skip to content

Commit

Permalink
remove job setting in minirocket
Browse files Browse the repository at this point in the history
  • Loading branch information
TonyBagnall committed Jul 14, 2024
1 parent 926cb53 commit ee15cf6
Showing 1 changed file with 21 additions and 32 deletions.
53 changes: 21 additions & 32 deletions aeon/transformations/collection/convolution_based/_minirocket.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,12 @@
"""MiniRocket transformer."""

__maintainer__ = []
__maintainer__ = ["TonyBagnall"]
__all__ = ["MiniRocket"]

import multiprocessing
from itertools import combinations

import numpy as np
from numba import get_num_threads, njit, prange, set_num_threads, vectorize
from numba import njit, prange, vectorize

from aeon.transformations.collection import BaseCollectionTransformer

Expand Down Expand Up @@ -96,9 +95,10 @@ def _fit(self, X, y=None):
Parameters
----------
X : 3D np.ndarray of shape = [n_cases, n_channels, n_timepoints]
panel of time series to transform
y : ignored argument for interface compatibility
X : np.ndarray
Collection of time series of shape (= [)n_cases, n_channels, n_timepoints)
y : None
Ignored argument for interface compatibility.
Returns
-------
Expand Down Expand Up @@ -128,54 +128,43 @@ def _transform(self, X, y=None):
Parameters
----------
X : 3D np.ndarray of shape = [n_cases, n_channels, n_timepoints]
panel of time series to transform
y : ignored argument for interface compatibility
X : np.ndarray
Collection of time series of shape (= [)n_cases, n_channels, n_timepoints).
y : None
Ignored argument for interface compatibility.
Returns
-------
pandas DataFrame, transformed features
"""
X = X.astype(np.float32)
_, n_channels, n_timepoints = X.shape
# change n_jobs dependend on value and existing cores
prev_threads = get_num_threads()
if self.n_jobs < 1 or self.n_jobs > multiprocessing.cpu_count():
n_jobs = multiprocessing.cpu_count()
else:
n_jobs = self.n_jobs
set_num_threads(n_jobs)
if n_channels == 1:
X = X.squeeze(1)
X_ = _static_transform_uni(X, self.parameters, MiniRocket._indices)
else:
X_ = _static_transform_multi(X, self.parameters, MiniRocket._indices)
set_num_threads(prev_threads)
return X_


def _fit_dilations(n_timepoints, n_features, max_dilations_per_kernel):
def _fit_dilations(n_timepoints, n_features, max_dilations):
n_kernels = 84
n_features_per_kernel = n_features // n_kernels
true_max_dilations_per_kernel = min(n_features_per_kernel, max_dilations_per_kernel)
multiplier = n_features_per_kernel / true_max_dilations_per_kernel
n_per_kernel = n_features // n_kernels
max_dilations_per_kernel = min(n_per_kernel, max_dilations)
multiplier = n_per_kernel / max_dilations_per_kernel
max_exponent = np.log2((n_timepoints - 1) / (9 - 1))
dilations, n_features_per_dilation = np.unique(
np.logspace(0, max_exponent, true_max_dilations_per_kernel, base=2).astype(
np.int32
),
dilations, n_per_dilation = np.unique(
np.logspace(0, max_exponent, max_dilations_per_kernel, base=2).astype(np.int32),
return_counts=True,
)
n_features_per_dilation = (n_features_per_dilation * multiplier).astype(
np.int32
) # this is a vector
remainder = n_features_per_kernel - np.sum(n_features_per_dilation)
n_per_dilation = (n_per_dilation * multiplier).astype(np.int32) # this is a vector
remainder = n_per_kernel - np.sum(n_per_dilation)
i = 0
while remainder > 0:
n_features_per_dilation[i] += 1
n_per_dilation[i] += 1
remainder -= 1
i = (i + 1) % len(n_features_per_dilation)
return dilations, n_features_per_dilation
i = (i + 1) % len(n_per_dilation)
return dilations, n_per_dilation


def _quantiles(n):
Expand Down

0 comments on commit ee15cf6

Please sign in to comment.