Skip to content

Commit

Permalink
modify codes suggested by cargo clippy
Browse files Browse the repository at this point in the history
  • Loading branch information
rmitsuboshi committed Jan 19, 2025
1 parent 3a3aa30 commit 7da60b8
Show file tree
Hide file tree
Showing 41 changed files with 105 additions and 123 deletions.
8 changes: 4 additions & 4 deletions src/booster/adaboost/adaboost_algorithm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -38,18 +38,18 @@ use std::ops::ControlFlow;
///
/// # Related information
/// - As some papers proved,
/// `AdaBoost` **approximately maximizes the hard margin.**
/// `AdaBoost` **approximately maximizes the hard margin.**
///
/// - [`AdaBoostV`](crate::booster::AdaBoostV),
/// a successor of AdaBoost, maximizes the hard margin.
/// a successor of AdaBoost, maximizes the hard margin.
///
///
/// ```no_run
/// use miniboosts::prelude::*;
///
/// // Read the training sample from the CSV file.
/// // We use the column named `class` as the label.
/// let sample = SampleReader::new()
/// let sample = SampleReader::default()
/// .file(path_to_file)
/// .has_header(true)
/// .target_feature("class")
Expand Down Expand Up @@ -259,7 +259,7 @@ impl<F> Booster<F> for AdaBoost<'_, F>
let quit = if let Some(it) = self.force_quit_at {
format!("At round {it}")
} else {
format!("-")
"-".to_string()
};
let info = Vec::from([
("# of examples", format!("{}", n_sample)),
Expand Down
4 changes: 2 additions & 2 deletions src/booster/adaboostv/adaboostv_algorithm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ use std::ops::ControlFlow;
///
/// // Read the training sample from the CSV file.
/// // We use the column named `class` as the label.
/// let sample = SampleReader::new()
/// let sample = SampleReader::default()
/// .file(path_to_file)
/// .has_header(true)
/// .target_feature("class")
Expand Down Expand Up @@ -291,7 +291,7 @@ impl<F> Booster<F> for AdaBoostV<'_, F>
let quit = if let Some(it) = self.force_quit_at {
format!("At round {it}")
} else {
format!("-")
"-".to_string()
};
let info = Vec::from([
("# of examples", format!("{}", n_sample)),
Expand Down
2 changes: 1 addition & 1 deletion src/booster/cerlpboost/cerlpboost_algorithm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ use std::ops::ControlFlow;
///
/// // Read the training sample from the CSV file.
/// // We use the column named `class` as the label.
/// let sample = SampleReader::new()
/// let sample = SampleReader::default()
/// .file(path_to_file)
/// .has_header(true)
/// .target_feature("class")
Expand Down
2 changes: 1 addition & 1 deletion src/booster/erlpboost/erlpboost_algorithm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ use std::ops::ControlFlow;
///
/// // Read the training sample from the CSV file.
/// // We use the column named `class` as the label.
/// let sample = SampleReader::new()
/// let sample = SampleReader::default()
/// .file(path_to_file)
/// .has_header(true)
/// .target_feature("class")
Expand Down
4 changes: 2 additions & 2 deletions src/booster/erlpboost/qp_model.rs
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ impl QPModel {

/// Returns `true` if `dist[i] > 0` holds for all `i = 1, 2, ..., m.`
pub(self) fn all_positive(&self, dist: &[f64]) -> bool {
dist.into_iter()
dist.iter()
.copied()
.all(|d| d > 0f64)
}
Expand All @@ -179,7 +179,7 @@ impl QPModel {
pub(self) fn build_linear_part_objective(&self, dist: &[f64]) -> Vec<f64> {
let mut linear = Vec::with_capacity(1 + self.n_examples);
linear.push(1f64);
let iter = dist.into_iter()
let iter = dist.iter()
.copied()
.map(|di| (1f64 / self.eta) * di.ln());
linear.extend(iter);
Expand Down
8 changes: 4 additions & 4 deletions src/booster/gradient_boost/gbm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ use std::ops::ControlFlow;
///
/// // Read the training sample from the CSV file.
/// // We use the column named `class` as the label.
/// let sample = SampleReader::new()
/// let sample = SampleReader::default()
/// .file(path_to_file)
/// .has_header(true)
/// .target_feature("class")
Expand Down Expand Up @@ -149,7 +149,7 @@ impl<'a, F, L> GBM<'a, F, L>
}


impl<'a, F, L> GBM<'a, F, L> {
impl<F, L> GBM<'_, F, L> {
/// Returns the maximum iteration
/// of the `GBM` to find a combined hypothesis
/// that has error at most `tolerance`.
Expand Down Expand Up @@ -194,7 +194,7 @@ impl<F, L> Booster<F> for GBM<'_, F, L>
("# of examples", format!("{n_sample}")),
("# of features", format!("{n_feature}")),
("Tolerance", format!("{}", self.tolerance)),
("Loss", format!("{}", self.loss.name())),
("Loss", self.loss.name().to_string()),
("Max iteration", format!("{}", self.max_iter)),
]);
Some(info)
Expand Down Expand Up @@ -236,7 +236,7 @@ impl<F, L> Booster<F> for GBM<'_, F, L>

let predictions = h.predict_all(self.sample);
let coef = self.loss.best_coefficient(
&self.sample.target(), &predictions[..]
self.sample.target(), &predictions[..]
);

// If the best coefficient is zero,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ use std::collections::HashSet;
///
/// // Read the training sample from the CSV file.
/// // We use the column named `class` as the label.
/// let sample = SampleReader::new()
/// let sample = SampleReader::default()
/// .file(path_to_file)
/// .has_header(true)
/// .target_feature("class")
Expand Down Expand Up @@ -113,7 +113,7 @@ impl<'a, F> GraphSepBoost<'a, F> {
}
}

impl<'a, F> GraphSepBoost<'a, F>
impl<F> GraphSepBoost<'_, F>
where F: Classifier
{
/// Returns a weight on the new hypothesis.
Expand Down Expand Up @@ -236,7 +236,7 @@ impl<F> Booster<F> for GraphSepBoost<'_, F>
where W: WeakLearner<Hypothesis = F>
{
let hypotheses = std::mem::take(&mut self.hypotheses);
NaiveAggregation::new(hypotheses, &self.sample)
NaiveAggregation::new(hypotheses, self.sample)
}
}

Expand All @@ -246,6 +246,6 @@ impl<H> Research for GraphSepBoost<'_, H>
{
type Output = NaiveAggregation<H>;
fn current_hypothesis(&self) -> Self::Output {
NaiveAggregation::from_slice(&self.hypotheses, &self.sample)
NaiveAggregation::from_slice(&self.hypotheses, self.sample)
}
}
8 changes: 4 additions & 4 deletions src/booster/lpboost/lpboost_algorithm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,8 @@ use std::ops::ControlFlow;
/// # Related information
/// - Currently (2023), `LPBoost` has no convergence guarantee.
/// - [`ERLPBoost`](crate::booster::ERLPBoost),
/// A stabilized version of `LPBoost` is
/// proposed by Warmuth et al. (2008).
/// A stabilized version of `LPBoost` is
/// proposed by Warmuth et al. (2008).
///
/// # Example
/// The following code shows a small example for running [`LPBoost`].
Expand All @@ -70,7 +70,7 @@ use std::ops::ControlFlow;
///
/// // Read the training sample from the CSV file.
/// // We use the column named `class` as the label.
/// let sample = SampleReader::new()
/// let sample = SampleReader::default()
/// .file(path_to_file)
/// .has_header(true)
/// .target_feature("class")
Expand Down Expand Up @@ -262,7 +262,7 @@ impl<F> Booster<F> for LPBoost<'_, F>
("# of examples", format!("{n_sample}")),
("# of features", format!("{n_feature}")),
("Tolerance", format!("{}", self.tolerance)),
("Max iteration", format!("-")),
("Max iteration", "-".to_string()),
("Capping (outliers)", format!("{nu} ({ratio: >7.3} %)"))
]);
Some(info)
Expand Down
4 changes: 2 additions & 2 deletions src/booster/madaboost/madaboost_algorithm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ use std::ops::ControlFlow;
///
/// // Read the training sample from the CSV file.
/// // We use the column named `class` as the label.
/// let sample = SampleReader::new()
/// let sample = SampleReader::default()
/// .file(path_to_file)
/// .has_header(true)
/// .target_feature("class")
Expand Down Expand Up @@ -274,7 +274,7 @@ impl<F> Booster<F> for MadaBoost<'_, F>
let quit = if let Some(it) = self.force_quit_at {
format!("At round {it}")
} else {
format!("-")
"-".to_string()
};
let info = Vec::from([
("# of examples", format!("{}", n_sample)),
Expand Down
4 changes: 2 additions & 2 deletions src/booster/mlpboost/mlpboost_algorithm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ use std::ops::ControlFlow;
///
/// // Read the training sample from the CSV file.
/// // We use the column named `class` as the label.
/// let sample = SampleReader::new()
/// let sample = SampleReader::default()
/// .file(path_to_file)
/// .has_header(true)
/// .target_feature("class")
Expand Down Expand Up @@ -388,7 +388,7 @@ impl<F> Booster<F> for MLPBoost<'_, F>
("Max iteration", format!("{}", self.max_iter)),
("Capping (outliers)", format!("{nu} ({ratio: >7.3} %)")),
("Primary", format!("{}", self.primary.current_type())),
("Secondary", format!("LPBoost"))
("Secondary", "LPBoost".to_string())
]);
Some(info)
}
Expand Down
1 change: 0 additions & 1 deletion src/booster/mlpboost/perturbed_lp_model.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ use clarabel::{
solver::*,
};

use rand;
use rand::rngs::StdRng;
use rand::prelude::*;

Expand Down
2 changes: 1 addition & 1 deletion src/booster/smoothboost/smoothboost_algorithm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ use std::ops::ControlFlow;
///
/// // Read the training sample from the CSV file.
/// // We use the column named `class` as the label.
/// let sample = SampleReader::new()
/// let sample = SampleReader::default()
/// .file(path_to_file)
/// .has_header(true)
/// .target_feature("class")
Expand Down
6 changes: 3 additions & 3 deletions src/booster/softboost/qp_model.rs
Original file line number Diff line number Diff line change
Expand Up @@ -154,21 +154,21 @@ impl QPModel {
}
old_objval = objval;
}
return Some(())
Some(())
}


/// Returns `true` if `dist[i] > 0` holds for all `i = 1, 2, ..., m.`
pub(self) fn all_positive(&self, dist: &[f64]) -> bool {
dist.into_iter()
dist.iter()
.copied()
.all(|d| d > 0f64)
}


pub(self) fn build_linear_part_objective(&self, dist: &[f64]) -> Vec<f64> {
let mut linear = Vec::with_capacity(self.n_examples);
let iter = dist.into_iter()
let iter = dist.iter()
.copied()
.map(|di| di.ln());
linear.extend(iter);
Expand Down
2 changes: 1 addition & 1 deletion src/booster/softboost/softboost_algorithm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ use std::ops::ControlFlow;
///
/// // Read the training sample from the CSV file.
/// // We use the column named `class` as the label.
/// let sample = SampleReader::new()
/// let sample = SampleReader::default()
/// .file(path_to_file)
/// .has_header(true)
/// .target_feature("class")
Expand Down
2 changes: 1 addition & 1 deletion src/booster/totalboost/totalboost_algorithm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ use std::ops::ControlFlow;
///
/// // Read the training sample from the CSV file.
/// // We use the column named `class` as the label.
/// let sample = SampleReader::new()
/// let sample = SampleReader::default()
/// .file(path_to_file)
/// .has_header(true)
/// .target_feature("class")
Expand Down
2 changes: 1 addition & 1 deletion src/common/loss_functions.rs
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ impl LossFunction for GBMLoss {
fn hessian(&self, predictions: &[f64], target: &[f64]) -> Vec<f64>
{
let n_sample = predictions.len();
assert_eq!(n_sample as usize, target.len());
assert_eq!(n_sample, target.len());

match self {
Self::L1 => {
Expand Down
2 changes: 1 addition & 1 deletion src/common/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -321,7 +321,7 @@ pub(crate) fn total_weight_for_label(
weight: &[f64],
) -> f64
{
target.into_iter()
target.iter()
.copied()
.zip(weight)
.filter_map(|(t, w)| if t == y { Some(w) } else { None })
Expand Down
2 changes: 1 addition & 1 deletion src/research/cross_validation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ impl<'a> CrossValidation<'a> {
}


impl<'a> Iterator for CrossValidation<'a> {
impl Iterator for CrossValidation<'_> {
type Item = (Sample, Sample);
fn next(&mut self) -> Option<Self::Item> {
if self.current_fold >= self.n_folds { return None; }
Expand Down
11 changes: 4 additions & 7 deletions src/research/logger.rs
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ impl<'a, B, W, F, G> Logger<'a, B, W, F, G> {
}
}

impl<'a, H, B, W, F, G, O> Logger<'a, B, W, F, G>
impl<H, B, W, F, G, O> Logger<'_, B, W, F, G>
where B: Booster<H, Output=O> + Research<Output=O>,
O: Classifier,
W: WeakLearner<Hypothesis = H>,
Expand Down Expand Up @@ -106,17 +106,15 @@ impl<'a, H, B, W, F, G, O> Logger<'a, B, W, F, G>
#[inline(always)]
fn print_log_header(&self) {
println!(
"{} {:>WIDTH$}\t\t{:>WIDTH$}\t{:>WIDTH$}\t{:>WIDTH$}\t{:>WIDTH$}",
" ",
" {:>WIDTH$}\t\t{:>WIDTH$}\t{:>WIDTH$}\t{:>WIDTH$}\t{:>WIDTH$}",
"".bold().red(),
"OBJ.".bold().blue(),
"TRAIN".bold().green(),
"TEST".bold().yellow(),
"ACC.".bold().cyan(),
);
println!(
"{} {:>WIDTH$}\t\t{:>WIDTH$}\t{:>WIDTH$}\t{:>WIDTH$}\t{:>WIDTH$}\n",
" ",
" {:>WIDTH$}\t\t{:>WIDTH$}\t{:>WIDTH$}\t{:>WIDTH$}\t{:>WIDTH$}\n",
"ROUND".bold().red(),
"VALUE".bold().blue(),
"ERROR".bold().green(),
Expand Down Expand Up @@ -184,15 +182,14 @@ impl<'a, H, B, W, F, G, O> Logger<'a, B, W, F, G>
"\
+ {:<STAT_WIDTH$}\t{:>STAT_WIDTH$}\n\
+ {:<STAT_WIDTH$}\t{:>STAT_WIDTH$}\n\
{:=^FULL_WIDTH$}\
{:=^FULL_WIDTH$}\n\
",
"Objective".bold(),
self.objective_func.name().bold().green(),
"Time Limit".bold(),
limit.bold().green(),
"".bold(),
);
println!("");
}


Expand Down
4 changes: 2 additions & 2 deletions src/sample/feature_struct.rs
Original file line number Diff line number Diff line change
Expand Up @@ -422,7 +422,7 @@ impl SparseFeature {
fn distinct_value_count(&self) -> usize {

let values = self.sample[..]
.into_iter()
.iter()
.map(|(_, v)| *v)
.collect::<Vec<_>>();
let mut uniq_value_count = inner_distinct_value_count(values);
Expand Down Expand Up @@ -526,7 +526,7 @@ impl Index<usize> for Feature {
/// Count the number of items in `src` that has the same value.
/// The given vector `src` is assumed to be sorted in ascending order.
fn inner_distinct_value_count(mut src: Vec<f64>) -> usize {
src.sort_by(|a, b| a.partial_cmp(&b).unwrap());
src.sort_by(|a, b| a.partial_cmp(b).unwrap());
let mut iter = src.into_iter();
let mut value = match iter.next() {
Some(v) => v,
Expand Down
Loading

0 comments on commit 7da60b8

Please sign in to comment.