diff --git a/crates/sophus/examples/pose_graph.rs b/crates/sophus/examples/pose_graph.rs index a84aed6e..f6389b74 100644 --- a/crates/sophus/examples/pose_graph.rs +++ b/crates/sophus/examples/pose_graph.rs @@ -3,9 +3,8 @@ use hollywood::actors::egui::Stream; pub use hollywood::compute::Context; pub use hollywood::core::*; use hollywood::macros::*; -use sophus::image::image_view::ImageSize; -use sophus::lie::traits::IsTranslationProductGroup; use sophus::opt::example_problems::pose_circle::PoseCircleProblem; +use sophus::prelude::*; use sophus::viewer::actor::run_viewer_on_main_thread; use sophus::viewer::actor::ViewerBuilder; use sophus::viewer::actor::ViewerCamera; @@ -13,13 +12,13 @@ use sophus::viewer::actor::ViewerConfig; use sophus::viewer::renderable::*; use sophus::viewer::scene_renderer::interaction::WgpuClippingPlanes; use sophus::viewer::SimpleViewer; -use sophus_core::linalg::vector::IsVector; use sophus_core::linalg::VecF64; -use sophus_lie::groups::isometry2::Isometry2; -use sophus_lie::groups::isometry3::Isometry3; -use sophus_sensor::camera_enum::perspective_camera::KannalaBrandtCamera; +use sophus_image::ImageSize; +use sophus_lie::Isometry2; +use sophus_lie::Isometry3; use sophus_sensor::camera_enum::perspective_camera::PerspectiveCameraEnum; use sophus_sensor::dyn_camera::DynCamera; +use sophus_sensor::KannalaBrandtCamera; #[actor(ContentGeneratorMessage)] type ContentGenerator = Actor< @@ -100,16 +99,16 @@ impl OnMessage for ContentGeneratorMessage { ContentGeneratorMessage::ClockTick(_time_in_seconds) => { let pose_graph = PoseCircleProblem::new(25); - let mut renderables = vec![]; - - renderables.push(Renderable::Lines3(Lines3 { - name: "true".into(), - lines: make_axes(pose_graph.true_world_from_robot.clone()), - })); - renderables.push(Renderable::Lines3(Lines3 { - name: "est".into(), - lines: make_axes(pose_graph.est_world_from_robot.clone()), - })); + let renderables = vec![ + Renderable::Lines3(Lines3 { + name: "true".into(), + lines: make_axes(pose_graph.true_world_from_robot.clone()), + }), + Renderable::Lines3(Lines3 { + name: "est".into(), + lines: make_axes(pose_graph.est_world_from_robot.clone()), + }), + ]; outbound.packets.send(Stream { msg: renderables }); } diff --git a/crates/sophus/examples/viewer_ex.rs b/crates/sophus/examples/viewer_ex.rs index 70b399e6..be52cc65 100644 --- a/crates/sophus/examples/viewer_ex.rs +++ b/crates/sophus/examples/viewer_ex.rs @@ -8,7 +8,8 @@ pub use hollywood::core::*; use hollywood::macros::*; use nalgebra::SVector; use sophus::image::arc_image::ArcImage4F32; -use sophus::image::image_view::ImageSize; +use sophus::image::ImageSize; +use sophus::prelude::*; use sophus::viewer::actor::run_viewer_on_main_thread; use sophus::viewer::actor::ViewerBuilder; use sophus::viewer::actor::ViewerCamera; @@ -16,11 +17,9 @@ use sophus::viewer::actor::ViewerConfig; use sophus::viewer::renderable::*; use sophus::viewer::scene_renderer::interaction::WgpuClippingPlanes; use sophus::viewer::SimpleViewer; -use sophus_core::linalg::vector::IsVector; use sophus_core::linalg::VecF64; -use sophus_lie::groups::isometry3::Isometry3; -use sophus_lie::traits::IsTranslationProductGroup; -use sophus_sensor::dyn_camera::DynCamera; +use sophus_lie::Isometry3; +use sophus_sensor::DynCamera; #[actor(ContentGeneratorMessage)] type ContentGenerator = Actor< diff --git a/crates/sophus/src/lib.rs b/crates/sophus/src/lib.rs index 83e10187..087f20fe 100644 --- a/crates/sophus/src/lib.rs +++ b/crates/sophus/src/lib.rs @@ -1,4 +1,5 @@ #![feature(portable_simd)] +#![allow(clippy::needless_range_loop)] pub use sophus_core as core; pub use sophus_image as image; @@ -9,3 +10,10 @@ pub use sophus_sensor as sensor; pub mod viewer; pub use hollywood; + +pub mod prelude { + pub use crate::core::prelude::*; + pub use crate::image::prelude::*; + pub use crate::lie::prelude::*; + pub use crate::opt::prelude::*; +} diff --git a/crates/sophus/src/viewer.rs b/crates/sophus/src/viewer.rs index b17fd346..c579022c 100644 --- a/crates/sophus/src/viewer.rs +++ b/crates/sophus/src/viewer.rs @@ -3,7 +3,6 @@ pub mod offscreen; pub mod pixel_renderer; pub mod renderable; pub mod scene_renderer; - use self::actor::ViewerBuilder; use self::offscreen::OffscreenTexture; use self::pixel_renderer::PixelRenderer; @@ -12,8 +11,8 @@ use self::scene_renderer::depth_renderer::DepthRenderer; use self::scene_renderer::textured_mesh::TexturedMeshVertex3; use self::scene_renderer::SceneRenderer; use crate::image::arc_image::ArcImage4U8; -use crate::image::image_view::ImageSize; use crate::image::image_view::IsImageView; +use crate::image::ImageSize; use crate::viewer::pixel_renderer::LineVertex2; use crate::viewer::pixel_renderer::PointVertex2; use crate::viewer::scene_renderer::line::LineVertex3; @@ -30,7 +29,7 @@ use hollywood::actors::egui::Stream; use hollywood::compute::pipeline::CancelRequest; use hollywood::core::request::RequestMessage; use sophus_core::tensor::tensor_view::IsTensorLike; -use sophus_lie::groups::isometry3::Isometry3; +use sophus_lie::Isometry3; use sophus_sensor::dyn_camera::DynCamera; use std::sync::Arc; diff --git a/crates/sophus/src/viewer/actor.rs b/crates/sophus/src/viewer/actor.rs index 1393797f..9c83af16 100644 --- a/crates/sophus/src/viewer/actor.rs +++ b/crates/sophus/src/viewer/actor.rs @@ -5,9 +5,8 @@ use eframe::egui; use hollywood::actors::egui::EguiAppFromBuilder; use hollywood::actors::egui::GenericEguiBuilder; use hollywood::core::request::RequestMessage; -use sophus_lie::groups::isometry3::Isometry3; +use sophus_lie::Isometry3; use sophus_sensor::dyn_camera::DynCamera; - pub struct ViewerCamera { pub intrinsics: DynCamera, pub clipping_planes: WgpuClippingPlanes, diff --git a/crates/sophus/src/viewer/offscreen.rs b/crates/sophus/src/viewer/offscreen.rs index f47d9a89..ae0d21fa 100644 --- a/crates/sophus/src/viewer/offscreen.rs +++ b/crates/sophus/src/viewer/offscreen.rs @@ -1,9 +1,9 @@ use crate::image::arc_image::ArcImageF32; -use crate::image::image_view::ImageSize; use crate::image::image_view::ImageViewF32; +use crate::image::ImageSize; use crate::viewer::ViewerRenderState; use eframe::egui::{self}; -use sophus_sensor::dyn_camera::DynCamera; +use sophus_sensor::DynCamera; #[derive(Debug)] pub(crate) struct OffscreenTexture { diff --git a/crates/sophus/src/viewer/pixel_renderer.rs b/crates/sophus/src/viewer/pixel_renderer.rs index b2787444..eb74f9c6 100644 --- a/crates/sophus/src/viewer/pixel_renderer.rs +++ b/crates/sophus/src/viewer/pixel_renderer.rs @@ -1,13 +1,11 @@ pub mod line; pub mod pixel_point; - +use self::line::PixelLineRenderer; +use self::pixel_point::PixelPointRenderer; use crate::viewer::actor::ViewerBuilder; use crate::viewer::scene_renderer::interaction::InteractionState; use crate::viewer::DepthRenderer; use crate::viewer::ViewerRenderState; - -use self::line::PixelLineRenderer; -use self::pixel_point::PixelPointRenderer; use bytemuck::Pod; use bytemuck::Zeroable; use std::num::NonZeroU64; diff --git a/crates/sophus/src/viewer/pixel_renderer/line.rs b/crates/sophus/src/viewer/pixel_renderer/line.rs index dc7a82cc..f8a69305 100644 --- a/crates/sophus/src/viewer/pixel_renderer/line.rs +++ b/crates/sophus/src/viewer/pixel_renderer/line.rs @@ -1,7 +1,6 @@ use crate::viewer::renderable::Line2; use crate::viewer::LineVertex2; use crate::viewer::ViewerRenderState; - use eframe::egui_wgpu::wgpu::util::DeviceExt; use nalgebra::SVector; use std::collections::BTreeMap; diff --git a/crates/sophus/src/viewer/pixel_renderer/pixel_point.rs b/crates/sophus/src/viewer/pixel_renderer/pixel_point.rs index 46602f11..703e3539 100644 --- a/crates/sophus/src/viewer/pixel_renderer/pixel_point.rs +++ b/crates/sophus/src/viewer/pixel_renderer/pixel_point.rs @@ -1,7 +1,6 @@ use crate::viewer::renderable::Point2; use crate::viewer::PointVertex2; use crate::viewer::ViewerRenderState; - use eframe::egui_wgpu::wgpu::util::DeviceExt; use std::collections::BTreeMap; use std::sync::Mutex; diff --git a/crates/sophus/src/viewer/scene_renderer.rs b/crates/sophus/src/viewer/scene_renderer.rs index 66e9b648..675b37ad 100644 --- a/crates/sophus/src/viewer/scene_renderer.rs +++ b/crates/sophus/src/viewer/scene_renderer.rs @@ -5,7 +5,6 @@ pub mod line; pub mod mesh; pub mod point; pub mod textured_mesh; - use self::buffers::SceneRenderBuffers; use self::interaction::Interaction; use self::mesh::MeshRenderer; diff --git a/crates/sophus/src/viewer/scene_renderer/buffers.rs b/crates/sophus/src/viewer/scene_renderer/buffers.rs index 7bca3474..aae9aeb2 100644 --- a/crates/sophus/src/viewer/scene_renderer/buffers.rs +++ b/crates/sophus/src/viewer/scene_renderer/buffers.rs @@ -1,7 +1,6 @@ use crate::sensor::distortion_table::DistortTable; use crate::viewer::ViewerBuilder; use crate::viewer::ViewerRenderState; - use std::sync::Mutex; use wgpu::util::DeviceExt; diff --git a/crates/sophus/src/viewer/scene_renderer/interaction.rs b/crates/sophus/src/viewer/scene_renderer/interaction.rs index b0a52c5d..a5913ce7 100644 --- a/crates/sophus/src/viewer/scene_renderer/interaction.rs +++ b/crates/sophus/src/viewer/scene_renderer/interaction.rs @@ -1,10 +1,8 @@ use crate::image::arc_image::ArcImageF32; -use crate::image::image_view::IsImageView; -use crate::lie::traits::IsTranslationProductGroup; +use crate::prelude::*; use eframe::egui; use sophus_core::linalg::VecF64; -use sophus_core::tensor::tensor_view::IsTensorLike; -use sophus_lie::groups::isometry3::Isometry3; +use sophus_lie::Isometry3; use sophus_sensor::dyn_camera::DynCamera; #[derive(Clone, Copy)] diff --git a/crates/sophus/src/viewer/scene_renderer/line.rs b/crates/sophus/src/viewer/scene_renderer/line.rs index 7d816598..19d12dfa 100644 --- a/crates/sophus/src/viewer/scene_renderer/line.rs +++ b/crates/sophus/src/viewer/scene_renderer/line.rs @@ -1,6 +1,5 @@ use crate::viewer::renderable::Line3; use crate::viewer::ViewerRenderState; - use bytemuck::Pod; use bytemuck::Zeroable; use eframe::egui_wgpu::wgpu::util::DeviceExt; diff --git a/crates/sophus/src/viewer/scene_renderer/mesh.rs b/crates/sophus/src/viewer/scene_renderer/mesh.rs index e3480ed9..b43c1619 100644 --- a/crates/sophus/src/viewer/scene_renderer/mesh.rs +++ b/crates/sophus/src/viewer/scene_renderer/mesh.rs @@ -1,6 +1,5 @@ use crate::viewer::renderable::Triangle3; use crate::viewer::ViewerRenderState; - use bytemuck::Pod; use bytemuck::Zeroable; use eframe::egui_wgpu::wgpu::util::DeviceExt; diff --git a/crates/sophus/src/viewer/scene_renderer/point.rs b/crates/sophus/src/viewer/scene_renderer/point.rs index 40d4838b..f8e17e7e 100644 --- a/crates/sophus/src/viewer/scene_renderer/point.rs +++ b/crates/sophus/src/viewer/scene_renderer/point.rs @@ -1,6 +1,5 @@ use crate::viewer::renderable::Point3; use crate::viewer::ViewerRenderState; - use bytemuck::Pod; use bytemuck::Zeroable; use eframe::egui_wgpu::wgpu::util::DeviceExt; diff --git a/crates/sophus/src/viewer/scene_renderer/textured_mesh.rs b/crates/sophus/src/viewer/scene_renderer/textured_mesh.rs index d6da636b..5cdbd50a 100644 --- a/crates/sophus/src/viewer/scene_renderer/textured_mesh.rs +++ b/crates/sophus/src/viewer/scene_renderer/textured_mesh.rs @@ -1,12 +1,11 @@ +use crate::viewer::renderable::TexturedTriangle3; +use crate::viewer::ViewerRenderState; use bytemuck::Pod; use bytemuck::Zeroable; use eframe::egui_wgpu::wgpu::util::DeviceExt; use std::collections::BTreeMap; use wgpu::DepthStencilState; -use crate::viewer::renderable::TexturedTriangle3; -use crate::viewer::ViewerRenderState; - #[repr(C)] #[derive(Clone, Copy, Pod, Zeroable)] pub struct TexturedMeshVertex3 { diff --git a/crates/sophus_core/src/calculus.rs b/crates/sophus_core/src/calculus.rs index 95f9e096..d4f04608 100644 --- a/crates/sophus_core/src/calculus.rs +++ b/crates/sophus_core/src/calculus.rs @@ -3,11 +3,18 @@ /// dual numbers - for automatic differentiation pub mod dual; -/// manifolds -pub mod manifold; + /// curves, scalar-valued, vector-valued, and matrix-valued maps pub mod maps; + /// intervals and regions pub mod region; +pub use crate::calculus::region::IInterval; +pub use crate::calculus::region::IRegion; +pub use crate::calculus::region::Interval; +pub use crate::calculus::region::Region; + /// splines pub mod spline; +pub use crate::calculus::spline::CubicBSpline; +pub use crate::calculus::spline::CubicBSplineParams; diff --git a/crates/sophus_core/src/calculus/dual.rs b/crates/sophus_core/src/calculus/dual.rs index ada118ed..bb7ec3a1 100644 --- a/crates/sophus_core/src/calculus/dual.rs +++ b/crates/sophus_core/src/calculus/dual.rs @@ -1,6 +1,14 @@ -///! DualScalar matrix. +/// DualScalar matrix. pub mod dual_matrix; -///! DualScalar scalar. +pub use crate::calculus::dual::dual_matrix::DualBatchMatrix; +pub use crate::calculus::dual::dual_matrix::DualMatrix; + +/// DualScalar scalar. pub mod dual_scalar; -///! DualScalar vector. +pub use crate::calculus::dual::dual_scalar::DualBatchScalar; +pub use crate::calculus::dual::dual_scalar::DualScalar; + +/// DualScalar vector. pub mod dual_vector; +pub use crate::calculus::dual::dual_vector::DualBatchVector; +pub use crate::calculus::dual::dual_vector::DualVector; diff --git a/crates/sophus_core/src/calculus/dual/dual_matrix.rs b/crates/sophus_core/src/calculus/dual/dual_matrix.rs index d30cb55e..a4dbcbb8 100644 --- a/crates/sophus_core/src/calculus/dual/dual_matrix.rs +++ b/crates/sophus_core/src/calculus/dual/dual_matrix.rs @@ -1,23 +1,16 @@ -use super::dual_scalar::IsDual; -use super::dual_scalar::IsDualScalar; -use super::dual_vector::DualVector; -use crate::calculus::dual::dual_scalar::DualBatchScalar; -use crate::calculus::dual::dual_scalar::DualScalar; -use crate::calculus::dual::dual_vector::DualBatchVector; -use crate::linalg::matrix::IsMatrix; -use crate::linalg::matrix::IsSingleMatrix; -use crate::linalg::scalar::IsCoreScalar; -use crate::linalg::scalar::IsScalar; +use crate::calculus::dual::DualBatchScalar; +use crate::calculus::dual::DualBatchVector; +use crate::calculus::dual::DualScalar; +use crate::calculus::dual::DualVector; use crate::linalg::BatchMatF64; use crate::linalg::BatchScalarF64; use crate::linalg::BatchVecF64; use crate::linalg::MatF64; use crate::linalg::VecF64; +use crate::prelude::*; use crate::tensor::mut_tensor::MutTensorDD; use crate::tensor::mut_tensor::MutTensorDDR; use crate::tensor::mut_tensor::MutTensorDDRC; -use crate::tensor::mut_tensor_view::IsMutTensorLike; -use crate::tensor::tensor_view::IsTensorLike; use approx::AbsDiffEq; use approx::RelativeEq; use num_traits::Zero; @@ -33,10 +26,10 @@ use std::simd::SupportedLaneCount; /// DualScalarLike matrix #[derive(Clone)] pub struct DualMatrix { - /// value - real matrix - pub val: MatF64, - /// derivative - infinitesimal matrix - pub dij_val: Option>, + /// real part + pub real_part: MatF64, + /// infinitesimal part - represents derivative + pub dij_part: Option>, } /// DualScalarLike matrix @@ -46,10 +39,10 @@ where BatchScalarF64: IsCoreScalar, LaneCount: SupportedLaneCount, { - /// value - real matrix - pub val: BatchMatF64, - /// derivative - infinitesimal matrix - pub dij_val: Option, ROWS, COLS>>, + /// real part + pub real_part: BatchMatF64, + /// infinitesimal part - represents derivative + pub dij_part: Option, ROWS, COLS>>, } impl IsSingleMatrix @@ -65,8 +58,14 @@ pub trait IsDualMatrix< const BATCH: usize, >: IsMatrix + IsDual { - /// Create a new dual number - fn new(val: S::RealMatrix) -> Self; + /// Create a new dual matrix from a real matrix for auto-differentiation with respect to self + /// + /// Typically this is not called directly, but through using a map auto-differentiation call: + /// + /// - ScalarValuedMapFromMatrix::fw_autodiff(...); + /// - VectorValuedMapFromMatrix::fw_autodiff(...); + /// - MatrixValuedMapFromMatrix::fw_autodiff(...); + fn new_with_dij(val: S::RealMatrix) -> Self; /// Get the derivative fn dij_val(self) -> Option>; @@ -78,13 +77,23 @@ impl IsDualMatrix { /// Create a new dual number - fn new(val: MatF64) -> Self { - DualMatrix { val, dij_val: None } + fn new_with_dij(val: MatF64) -> Self { + let mut dij_val = MutTensorDDRC::::from_shape([ROWS, COLS]); + for i in 0..ROWS { + for j in 0..COLS { + dij_val.mut_view().get_mut([i, j])[(i, j)] = 1.0; + } + } + + Self { + real_part: val, + dij_part: Some(dij_val), + } } /// Get the derivative fn dij_val(self) -> Option> { - self.dij_val + self.dij_part } } @@ -281,26 +290,11 @@ impl DualMatrix { rhs: rhs_dx.unwrap(), }) } - - /// Create a dual matrix - pub fn v(val: MatF64) -> Self { - let mut dij_val = MutTensorDDRC::::from_shape([ROWS, COLS]); - for i in 0..ROWS { - for j in 0..COLS { - dij_val.mut_view().get_mut([i, j])[(i, j)] = 1.0; - } - } - - Self { - val, - dij_val: Some(dij_val), - } - } } impl PartialEq for DualMatrix { fn eq(&self, other: &Self) -> bool { - self.val == other.val && self.dij_val == other.dij_val + self.real_part == other.real_part && self.dij_part == other.dij_part } } @@ -312,7 +306,7 @@ impl AbsDiffEq for DualMatrix } fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool { - self.val.abs_diff_eq(&other.val, epsilon) + self.real_part.abs_diff_eq(&other.real_part, epsilon) } } @@ -327,7 +321,8 @@ impl RelativeEq for DualMatrix epsilon: Self::Epsilon, max_relative: Self::Epsilon, ) -> bool { - self.val.relative_eq(&other.val, epsilon, max_relative) + self.real_part + .relative_eq(&other.real_part, epsilon, max_relative) } } @@ -336,37 +331,40 @@ impl IsMatrix { fn mat_mul(&self, rhs: DualMatrix) -> DualMatrix { DualMatrix { - val: self.val * rhs.val, - dij_val: DualMatrix::binary_mm_dij( - &self.dij_val, - &rhs.dij_val, - |l_dij| l_dij * rhs.val, - |r_dij| self.val * r_dij, + real_part: self.real_part * rhs.real_part, + dij_part: DualMatrix::binary_mm_dij( + &self.dij_part, + &rhs.dij_part, + |l_dij| l_dij * rhs.real_part, + |r_dij| self.real_part * r_dij, ), } } fn from_scalar(val: DualScalar) -> Self { DualMatrix { - val: MatF64::::from_scalar(val.val), - dij_val: val.dij_val.map(|dij_val| { + real_part: MatF64::::from_scalar(val.real_part), + dij_part: val.dij_part.map(|dij_val| { MutTensorDDRC::from_map(&dij_val.view(), |v| MatF64::::from_scalar(*v)) }), } } fn from_real_matrix(val: MatF64) -> Self { - Self { val, dij_val: None } + Self { + real_part: val, + dij_part: None, + } } fn scaled(&self, s: DualScalar) -> Self { DualMatrix { - val: self.val * s.val, - dij_val: DualMatrix::binary_ms_dij( - &self.dij_val, - &s.dij_val, - |l_dij| l_dij * s.val, - |r_dij| self.val * *r_dij, + real_part: self.real_part * s.real_part, + dij_part: DualMatrix::binary_ms_dij( + &self.dij_part, + &s.dij_part, + |l_dij| l_dij * s.real_part, + |r_dij| self.real_part * *r_dij, ), } } @@ -377,9 +375,9 @@ impl IsMatrix fn get_elem(&self, idx: [usize; 2]) -> DualScalar { DualScalar { - val: self.val.get_elem(idx), - dij_val: self - .dij_val + real_part: self.real_part.get_elem(idx), + dij_part: self + .dij_part .clone() .map(|dij_val| MutTensorDD::from_map(&dij_val.view(), |v| v[(idx[0], idx[1])])), } @@ -393,17 +391,17 @@ impl IsMatrix for j in 0..d_rows.len() { let d = d_rows.clone()[j].clone(); - val_mat[(i, j)] = d.val; - if d.dij_val.is_some() { - shape = Some(d.dij_val.clone().unwrap().dims()); + val_mat[(i, j)] = d.real_part; + if d.dij_part.is_some() { + shape = Some(d.dij_part.clone().unwrap().dims()); } } } if shape.is_none() { return DualMatrix { - val: val_mat, - dij_val: None, + real_part: val_mat, + dij_part: None, }; } let shape = shape.unwrap(); @@ -414,24 +412,24 @@ impl IsMatrix let d_rows = duals[i].clone(); for j in 0..d_rows.len() { let d = d_rows.clone()[j].clone(); - if d.dij_val.is_some() { + if d.dij_part.is_some() { for d0 in 0..shape[0] { for d1 in 0..shape[1] { r.mut_view().get_mut([d0, d1])[(i, j)] = - d.dij_val.clone().unwrap().get([d0, d1]); + d.dij_part.clone().unwrap().get([d0, d1]); } } } } } DualMatrix { - val: val_mat, - dij_val: Some(r), + real_part: val_mat, + dij_part: Some(r), } } fn real_matrix(&self) -> &MatF64 { - &self.val + &self.real_part } fn block_mat2x2( @@ -452,11 +450,11 @@ impl IsMatrix bot_row: DualMatrix, ) -> Self { assert_eq!(R0 + R1, ROWS); - let maybe_dij = Self::two_dx(top_row.dij_val, bot_row.dij_val); + let maybe_dij = Self::two_dx(top_row.dij_part, bot_row.dij_part); Self { - val: MatF64::::block_mat2x1(top_row.val, bot_row.val), - dij_val: match maybe_dij { + real_part: MatF64::::block_mat2x1(top_row.real_part, bot_row.real_part), + dij_part: match maybe_dij { Some(dij_val) => { let mut r = MutTensorDDRC::::from_shape(dij_val.shape()); for d0 in 0..dij_val.shape()[0] { @@ -479,11 +477,11 @@ impl IsMatrix righ_col: DualMatrix, ) -> Self { assert_eq!(C0 + C1, COLS); - let maybe_dij = Self::two_dx(left_col.dij_val, righ_col.dij_val); + let maybe_dij = Self::two_dx(left_col.dij_part, righ_col.dij_part); Self { - val: MatF64::::block_mat1x2(left_col.val, righ_col.val), - dij_val: match maybe_dij { + real_part: MatF64::::block_mat1x2(left_col.real_part, righ_col.real_part), + dij_part: match maybe_dij { Some(dij_val) => { let mut r = MutTensorDDRC::::from_shape(dij_val.shape()); for d0 in 0..dij_val.shape()[0] { @@ -507,8 +505,8 @@ impl IsMatrix start_c: usize, ) -> DualMatrix { DualMatrix { - val: self.val.get_fixed_submat(start_r, start_c), - dij_val: self.dij_val.clone().map(|dij_val| { + real_part: self.real_part.get_fixed_submat(start_r, start_c), + dij_part: self.dij_part.clone().map(|dij_val| { MutTensorDDRC::from_map(&dij_val.view(), |v| v.get_fixed_submat(start_r, start_c)) }), } @@ -516,9 +514,9 @@ impl IsMatrix fn get_col_vec(&self, start_r: usize) -> DualVector { DualVector { - val: self.val.get_col_vec(start_r), - dij_val: self - .dij_val + real_part: self.real_part.get_col_vec(start_r), + dij_part: self + .dij_part .clone() .map(|dij_val| MutTensorDDR::from_map(&dij_val.view(), |v| v.get_col_vec(start_r))), } @@ -526,37 +524,37 @@ impl IsMatrix fn get_row_vec(&self, c: usize) -> DualVector { DualVector { - val: self.val.get_row_vec(c), - dij_val: self - .dij_val + real_part: self.real_part.get_row_vec(c), + dij_part: self + .dij_part .clone() .map(|dij_val| MutTensorDDR::from_map(&dij_val.view(), |v| v.get_row_vec(c))), } } - fn from_real_array2(vals: [[f64; COLS]; ROWS]) -> Self { + fn from_real_scalar_array2(vals: [[f64; COLS]; ROWS]) -> Self { DualMatrix { - val: MatF64::from_real_array2(vals), - dij_val: None, + real_part: MatF64::from_real_scalar_array2(vals), + dij_part: None, } } fn from_f64_array2(vals: [[f64; COLS]; ROWS]) -> Self { DualMatrix { - val: MatF64::from_real_array2(vals), - dij_val: None, + real_part: MatF64::from_real_scalar_array2(vals), + dij_part: None, } } fn from_f64(val: f64) -> Self { DualMatrix { - val: MatF64::::from_f64(val), - dij_val: None, + real_part: MatF64::::from_f64(val), + dij_part: None, } } fn set_col_vec(&mut self, c: usize, v: DualVector) { - self.val.set_col_vec(c, v.val); + self.real_part.set_col_vec(c, v.real_part); todo!(); } @@ -573,13 +571,13 @@ impl IsMatrix } fn set_elem(&mut self, idx: [usize; 2], val: DualScalar) { - self.val.set_elem(idx, val.val); - if self.dij_val.is_some() { - let dij = &mut self.dij_val.as_mut().unwrap(); + self.real_part.set_elem(idx, val.real_part); + if self.dij_part.is_some() { + let dij = &mut self.dij_part.as_mut().unwrap(); for i in 0..dij.dims()[0] { for j in 0..dij.dims()[1] { dij.mut_view().get_mut([i, j])[(idx[0], idx[1])] = - val.dij_val.clone().unwrap().get([i, j]); + val.dij_part.clone().unwrap().get([i, j]); } } } @@ -591,10 +589,10 @@ impl Add for DualMatrix { fn add(self, rhs: Self) -> Self::Output { DualMatrix { - val: self.val + rhs.val, - dij_val: Self::binary_mm_dij( - &self.dij_val, - &rhs.dij_val, + real_part: self.real_part + rhs.real_part, + dij_part: Self::binary_mm_dij( + &self.dij_part, + &rhs.dij_part, |l_dij| *l_dij, |r_dij| *r_dij, ), @@ -607,10 +605,10 @@ impl Sub for DualMatrix { fn sub(self, rhs: Self) -> Self::Output { DualMatrix { - val: self.val - rhs.val, - dij_val: Self::binary_mm_dij( - &self.dij_val, - &rhs.dij_val, + real_part: self.real_part - rhs.real_part, + dij_part: Self::binary_mm_dij( + &self.dij_part, + &rhs.dij_part, |l_dij| *l_dij, |r_dij| -r_dij, ), @@ -623,9 +621,9 @@ impl Neg for DualMatrix { fn neg(self) -> Self::Output { DualMatrix { - val: -self.val, - dij_val: self - .dij_val + real_part: -self.real_part, + dij_part: self + .dij_part .clone() .map(|dij_val| MutTensorDDRC::from_map(&dij_val.view(), |v| -v)), } @@ -638,7 +636,7 @@ impl Zero for DualMatrix { } fn is_zero(&self) -> bool { - self.val.is_zero() + self.real_part.is_zero() } } @@ -647,12 +645,12 @@ impl Mul> for DualMatrix< fn mul(self, rhs: DualVector) -> Self::Output { Self::Output { - val: self.val * rhs.val, - dij_val: Self::binary_mv_dij( - &self.dij_val, - &rhs.dij_val, - |l_dij| l_dij * rhs.val, - |r_dij| self.val * r_dij, + real_part: self.real_part * rhs.real_part, + dij_part: Self::binary_mv_dij( + &self.dij_part, + &rhs.dij_part, + |l_dij| l_dij * rhs.real_part, + |r_dij| self.real_part * r_dij, ), } } @@ -660,14 +658,14 @@ impl Mul> for DualMatrix< impl Debug for DualMatrix { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - if self.dij_val.is_some() { + if self.dij_part.is_some() { f.debug_struct("DualScalarLike") - .field("val", &self.val) - .field("dij_val", &self.dij_val.as_ref().unwrap().elem_view()) + .field("val", &self.real_part) + .field("dij_val", &self.dij_part.as_ref().unwrap().elem_view()) .finish() } else { f.debug_struct("DualScalarLike") - .field("val", &self.val) + .field("val", &self.real_part) .finish() } } @@ -686,13 +684,24 @@ where LaneCount: SupportedLaneCount, { /// Create a new dual number - fn new(val: BatchMatF64) -> Self { - DualBatchMatrix { val, dij_val: None } + fn new_with_dij(val: BatchMatF64) -> Self { + let mut dij_val = + MutTensorDDRC::, ROWS, COLS>::from_shape([ROWS, COLS]); + for i in 0..ROWS { + for j in 0..COLS { + dij_val.mut_view().get_mut([i, j])[(i, j)] = BatchScalarF64::::from_f64(1.0); + } + } + + Self { + real_part: val, + dij_part: Some(dij_val), + } } /// Get the derivative fn dij_val(self) -> Option, ROWS, COLS>> { - self.dij_val + self.dij_part } } @@ -861,22 +870,6 @@ where rhs: rhs_dx.unwrap(), }) } - - /// Create a dual matrix - pub fn v(val: BatchMatF64) -> Self { - let mut dij_val = - MutTensorDDRC::, ROWS, COLS>::from_shape([ROWS, COLS]); - for i in 0..ROWS { - for j in 0..COLS { - dij_val.mut_view().get_mut([i, j])[(i, j)] = BatchScalarF64::::from_f64(1.0); - } - } - - Self { - val, - dij_val: Some(dij_val), - } - } } impl PartialEq @@ -885,7 +878,7 @@ where LaneCount: SupportedLaneCount, { fn eq(&self, other: &Self) -> bool { - self.val == other.val && self.dij_val == other.dij_val + self.real_part == other.real_part && self.dij_part == other.dij_part } } @@ -901,7 +894,7 @@ where } fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool { - self.val.abs_diff_eq(&other.val, epsilon) + self.real_part.abs_diff_eq(&other.real_part, epsilon) } } @@ -920,7 +913,8 @@ where epsilon: Self::Epsilon, max_relative: Self::Epsilon, ) -> bool { - self.val.relative_eq(&other.val, epsilon, max_relative) + self.real_part + .relative_eq(&other.real_part, epsilon, max_relative) } } @@ -931,19 +925,19 @@ where { fn from_f64(val: f64) -> Self { DualBatchMatrix { - val: BatchMatF64::::from_f64(val), - dij_val: None, + real_part: BatchMatF64::::from_f64(val), + dij_part: None, } } fn set_elem(&mut self, idx: [usize; 2], val: DualBatchScalar) { - self.val.set_elem(idx, val.val); - if self.dij_val.is_some() { - let dij = &mut self.dij_val.as_mut().unwrap(); + self.real_part.set_elem(idx, val.real_part); + if self.dij_part.is_some() { + let dij = &mut self.dij_part.as_mut().unwrap(); for i in 0..dij.dims()[0] { for j in 0..dij.dims()[1] { dij.mut_view().get_mut([i, j])[(idx[0], idx[1])] = - val.dij_val.clone().unwrap().get([i, j]); + val.dij_part.clone().unwrap().get([i, j]); } } } @@ -951,8 +945,8 @@ where fn from_scalar(val: DualBatchScalar) -> Self { DualBatchMatrix { - val: BatchMatF64::::from_scalar(val.val), - dij_val: val.dij_val.map(|dij_val| { + real_part: BatchMatF64::::from_scalar(val.real_part), + dij_part: val.dij_part.map(|dij_val| { MutTensorDDRC::from_map(&dij_val.view(), |v| { BatchMatF64::::from_scalar(*v) }) @@ -965,28 +959,31 @@ where rhs: DualBatchMatrix, ) -> DualBatchMatrix { DualBatchMatrix { - val: self.val * rhs.val, - dij_val: DualBatchMatrix::::binary_mm_dij( - &self.dij_val, - &rhs.dij_val, - |l_dij| l_dij * rhs.val, - |r_dij| self.val * r_dij, + real_part: self.real_part * rhs.real_part, + dij_part: DualBatchMatrix::::binary_mm_dij( + &self.dij_part, + &rhs.dij_part, + |l_dij| l_dij * rhs.real_part, + |r_dij| self.real_part * r_dij, ), } } fn from_real_matrix(val: BatchMatF64) -> Self { - Self { val, dij_val: None } + Self { + real_part: val, + dij_part: None, + } } fn scaled(&self, s: DualBatchScalar) -> Self { DualBatchMatrix { - val: self.val * s.val, - dij_val: DualBatchMatrix::::binary_ms_dij( - &self.dij_val, - &s.dij_val, - |l_dij| l_dij * s.val, - |r_dij| self.val * *r_dij, + real_part: self.real_part * s.real_part, + dij_part: DualBatchMatrix::::binary_ms_dij( + &self.dij_part, + &s.dij_part, + |l_dij| l_dij * s.real_part, + |r_dij| self.real_part * *r_dij, ), } } @@ -997,9 +994,9 @@ where fn get_elem(&self, idx: [usize; 2]) -> DualBatchScalar { DualBatchScalar:: { - val: self.val.get_elem(idx), - dij_val: self - .dij_val + real_part: self.real_part.get_elem(idx), + dij_part: self + .dij_part .clone() .map(|dij_val| MutTensorDD::from_map(&dij_val.view(), |v| v[(idx[0], idx[1])])), } @@ -1013,45 +1010,44 @@ where for j in 0..d_rows.len() { let d = d_rows.clone()[j].clone(); - val_mat[(i, j)] = d.val; - if d.dij_val.is_some() { - shape = Some(d.dij_val.clone().unwrap().dims()); + val_mat[(i, j)] = d.real_part; + if d.dij_part.is_some() { + shape = Some(d.dij_part.clone().unwrap().dims()); } } } if shape.is_none() { return DualBatchMatrix { - val: val_mat, - dij_val: None, + real_part: val_mat, + dij_part: None, }; } let shape = shape.unwrap(); let mut r = MutTensorDDRC::, ROWS, COLS>::from_shape(shape); - for i in 0..duals.len() { let d_rows = duals[i].clone(); for j in 0..d_rows.len() { let d = d_rows.clone()[j].clone(); - if d.dij_val.is_some() { + if d.dij_part.is_some() { for d0 in 0..shape[0] { for d1 in 0..shape[1] { r.mut_view().get_mut([d0, d1])[(i, j)] = - d.dij_val.clone().unwrap().get([d0, d1]); + d.dij_part.clone().unwrap().get([d0, d1]); } } } } } DualBatchMatrix { - val: val_mat, - dij_val: Some(r), + real_part: val_mat, + dij_part: Some(r), } } fn real_matrix(&self) -> &BatchMatF64 { - &self.val + &self.real_part } fn block_mat2x2( @@ -1078,11 +1074,14 @@ where bot_row: DualBatchMatrix, ) -> Self { assert_eq!(R0 + R1, ROWS); - let maybe_dij = Self::two_dx(top_row.dij_val, bot_row.dij_val); + let maybe_dij = Self::two_dx(top_row.dij_part, bot_row.dij_part); Self { - val: BatchMatF64::::block_mat2x1(top_row.val, bot_row.val), - dij_val: match maybe_dij { + real_part: BatchMatF64::::block_mat2x1( + top_row.real_part, + bot_row.real_part, + ), + dij_part: match maybe_dij { Some(dij_val) => { let mut r = MutTensorDDRC::, ROWS, COLS>::from_shape( dij_val.shape(), @@ -1108,11 +1107,14 @@ where righ_col: DualBatchMatrix, ) -> Self { assert_eq!(C0 + C1, COLS); - let maybe_dij = Self::two_dx(left_col.dij_val, righ_col.dij_val); + let maybe_dij = Self::two_dx(left_col.dij_part, righ_col.dij_part); Self { - val: BatchMatF64::::block_mat1x2(left_col.val, righ_col.val), - dij_val: match maybe_dij { + real_part: BatchMatF64::::block_mat1x2( + left_col.real_part, + righ_col.real_part, + ), + dij_part: match maybe_dij { Some(dij_val) => { let mut r = MutTensorDDRC::, ROWS, COLS>::from_shape( dij_val.shape(), @@ -1139,8 +1141,8 @@ where start_c: usize, ) -> DualBatchMatrix { DualBatchMatrix { - val: self.val.get_fixed_submat(start_r, start_c), - dij_val: self.dij_val.clone().map(|dij_val| { + real_part: self.real_part.get_fixed_submat(start_r, start_c), + dij_part: self.dij_part.clone().map(|dij_val| { MutTensorDDRC::from_map(&dij_val.view(), |v| v.get_fixed_submat(start_r, start_c)) }), } @@ -1148,9 +1150,9 @@ where fn get_col_vec(&self, start_r: usize) -> DualBatchVector { DualBatchVector { - val: self.val.get_col_vec(start_r), - dij_val: self - .dij_val + real_part: self.real_part.get_col_vec(start_r), + dij_part: self + .dij_part .clone() .map(|dij_val| MutTensorDDR::from_map(&dij_val.view(), |v| v.get_col_vec(start_r))), } @@ -1158,25 +1160,25 @@ where fn get_row_vec(&self, c: usize) -> DualBatchVector { DualBatchVector { - val: self.val.get_row_vec(c), - dij_val: self - .dij_val + real_part: self.real_part.get_row_vec(c), + dij_part: self + .dij_part .clone() .map(|dij_val| MutTensorDDR::from_map(&dij_val.view(), |v| v.get_row_vec(c))), } } - fn from_real_array2(vals: [[BatchScalarF64; COLS]; ROWS]) -> Self { + fn from_real_scalar_array2(vals: [[BatchScalarF64; COLS]; ROWS]) -> Self { DualBatchMatrix { - val: BatchMatF64::from_real_array2(vals), - dij_val: None, + real_part: BatchMatF64::from_real_scalar_array2(vals), + dij_part: None, } } fn from_f64_array2(vals: [[f64; COLS]; ROWS]) -> Self { DualBatchMatrix { - val: BatchMatF64::from_f64_array2(vals), - dij_val: None, + real_part: BatchMatF64::from_f64_array2(vals), + dij_part: None, } } @@ -1185,7 +1187,7 @@ where c: usize, v: as IsScalar>::Vector, ) { - self.val.set_col_vec(c, v.val); + self.real_part.set_col_vec(c, v.real_part); todo!(); } @@ -1194,11 +1196,11 @@ where } fn select(self, mask: &Mask, other: Self) -> Self { - let maybe_dij = Self::two_dx(self.dij_val, other.dij_val); + let maybe_dij = Self::two_dx(self.dij_part, other.dij_part); DualBatchMatrix { - val: self.val.select(mask, other.val), - dij_val: match maybe_dij { + real_part: self.real_part.select(mask, other.real_part), + dij_part: match maybe_dij { Some(dij) => { let mut r = MutTensorDDRC::, ROWS, COLS>::from_shape(dij.shape()); @@ -1225,10 +1227,10 @@ where fn add(self, rhs: Self) -> Self::Output { DualBatchMatrix { - val: self.val + rhs.val, - dij_val: Self::binary_mm_dij( - &self.dij_val, - &rhs.dij_val, + real_part: self.real_part + rhs.real_part, + dij_part: Self::binary_mm_dij( + &self.dij_part, + &rhs.dij_part, |l_dij| *l_dij, |r_dij| *r_dij, ), @@ -1245,10 +1247,10 @@ where fn sub(self, rhs: Self) -> Self::Output { DualBatchMatrix { - val: self.val - rhs.val, - dij_val: Self::binary_mm_dij( - &self.dij_val, - &rhs.dij_val, + real_part: self.real_part - rhs.real_part, + dij_part: Self::binary_mm_dij( + &self.dij_part, + &rhs.dij_part, |l_dij| *l_dij, |r_dij| -r_dij, ), @@ -1265,9 +1267,9 @@ where fn neg(self) -> Self::Output { DualBatchMatrix { - val: -self.val, - dij_val: self - .dij_val + real_part: -self.real_part, + dij_part: self + .dij_part .clone() .map(|dij_val| MutTensorDDRC::from_map(&dij_val.view(), |v| -v)), } @@ -1284,7 +1286,7 @@ where } fn is_zero(&self) -> bool { - self.val.is_zero() + self.real_part.is_zero() } } @@ -1297,12 +1299,12 @@ where fn mul(self, rhs: DualBatchVector) -> Self::Output { Self::Output { - val: self.val * rhs.val, - dij_val: Self::binary_mv_dij( - &self.dij_val, - &rhs.dij_val, - |l_dij| l_dij * rhs.val, - |r_dij| self.val * r_dij, + real_part: self.real_part * rhs.real_part, + dij_part: Self::binary_mv_dij( + &self.dij_part, + &rhs.dij_part, + |l_dij| l_dij * rhs.real_part, + |r_dij| self.real_part * r_dij, ), } } @@ -1314,14 +1316,14 @@ where LaneCount: SupportedLaneCount, { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - if self.dij_val.is_some() { + if self.dij_part.is_some() { f.debug_struct("DualScalarLike") - .field("val", &self.val) - .field("dij_val", &self.dij_val.as_ref().unwrap().elem_view()) + .field("val", &self.real_part) + .field("dij_val", &self.dij_part.as_ref().unwrap().elem_view()) .finish() } else { f.debug_struct("DualScalarLike") - .field("val", &self.val) + .field("val", &self.real_part) .finish() } } diff --git a/crates/sophus_core/src/calculus/dual/dual_scalar.rs b/crates/sophus_core/src/calculus/dual/dual_scalar.rs index ebddebaf..711d5450 100644 --- a/crates/sophus_core/src/calculus/dual/dual_scalar.rs +++ b/crates/sophus_core/src/calculus/dual/dual_scalar.rs @@ -2,18 +2,15 @@ use super::dual_matrix::DualBatchMatrix; use super::dual_matrix::DualMatrix; use super::dual_vector::DualBatchVector; use super::dual_vector::DualVector; -use crate::linalg::scalar::IsCoreScalar; -use crate::linalg::scalar::IsScalar; -use crate::linalg::scalar::IsSingleScalar; use crate::linalg::scalar::NumberCategory; use crate::linalg::BatchMatF64; use crate::linalg::BatchScalarF64; use crate::linalg::BatchVecF64; use crate::linalg::MatF64; use crate::linalg::VecF64; +use crate::prelude::*; use crate::tensor::mut_tensor::InnerScalarToVec; use crate::tensor::mut_tensor::MutTensorDD; -use crate::tensor::tensor_view::IsTensorLike; use approx::assert_abs_diff_eq; use approx::AbsDiffEq; use approx::RelativeEq; @@ -37,11 +34,11 @@ pub trait IsDual {} /// Dual number - a real number and an infinitesimal number #[derive(Clone)] pub struct DualScalar { - /// value - real number - pub val: f64, + /// real part + pub real_part: f64, - /// derivative - infinitesimal number - pub dij_val: Option>, + /// infinitesimal part - represents derivative + pub dij_part: Option>, } impl IsDual for DualScalar {} @@ -53,11 +50,11 @@ where BatchScalarF64: IsCoreScalar, LaneCount: SupportedLaneCount, { - /// value - real number - pub val: BatchScalarF64, + /// real part + pub real_part: BatchScalarF64, - /// derivative - infinitesimal number - pub dij_val: Option>>, + /// infinitesimal part - represents derivative + pub dij_part: Option>>, } impl IsDual for DualBatchScalar @@ -69,14 +66,32 @@ where /// Trait for scalar dual numbers pub trait IsDualScalar: IsScalar + IsDual { - /// Create a new dual number - fn new(val: Self::RealScalar) -> Self; - - /// Create a vector of dual numbers - fn vector_v(val: Self::RealVector) -> Self::DualVector; - - /// Create a matrix of dual numbers - fn matrix_v( + /// Create a new dual scalar from real scalar for auto-differentiation with respect to self + /// + /// Typically this is not called directly, but through using a curve auto-differentiation call: + /// + /// - ScalarValuedCurve::fw_autodiff(...); + /// - VectorValuedCurve::fw_autodiff(...); + /// - MatrixValuedCurve::fw_autodiff(...); + fn new_with_dij(val: Self::RealScalar) -> Self; + + /// Create a new dual vector from a real vector for auto-differentiation with respect to self + /// + /// Typically this is not called directly, but through using a map auto-differentiation call: + /// + /// - ScalarValuedMapFromVector::fw_autodiff(...); + /// - VectorValuedMapFromVector::fw_autodiff(...); + /// - MatrixValuedMapFromVector::fw_autodiff(...); + fn vector_with_dij(val: Self::RealVector) -> Self::DualVector; + + /// Create a new dual matrix from a real matrix for auto-differentiation with respect to self + /// + /// Typically this is not called directly, but through using a map auto-differentiation call: + /// + /// - ScalarValuedMapFromMatrix::fw_autodiff(...); + /// - VectorValuedMapFromMatrix::fw_autodiff(...); + /// - MatrixValuedMapFromMatrix::fw_autodiff(...); + fn matrix_with_dij( val: Self::RealMatrix, ) -> Self::DualMatrix; @@ -92,7 +107,7 @@ impl AbsDiffEq for DualScalar { } fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool { - self.val.abs_diff_eq(&other.val, epsilon) + self.real_part.abs_diff_eq(&other.real_part, epsilon) } } @@ -107,7 +122,8 @@ impl RelativeEq for DualScalar { epsilon: Self::Epsilon, max_relative: Self::Epsilon, ) -> bool { - self.val.relative_eq(&other.val, epsilon, max_relative) + self.real_part + .relative_eq(&other.real_part, epsilon, max_relative) } } @@ -128,7 +144,7 @@ impl IsSingleScalar for DualScalar { type SingleMatrix = DualMatrix; fn single_real_scalar(&self) -> f64 { - self.val + self.real_part } fn single_scalar(&self) -> Self { @@ -136,7 +152,7 @@ impl IsSingleScalar for DualScalar { } fn i64_floor(&self) -> i64 { - self.val.floor() as i64 + self.real_part.floor() as i64 } } @@ -158,31 +174,31 @@ impl Zero for DualScalar { } fn is_zero(&self) -> bool { - self.val == ::from_f64(0.0).real_part() + self.real_part == ::from_f64(0.0).real_part() } } impl IsDualScalar<1> for DualScalar { - fn new(val: f64) -> Self { + fn new_with_dij(val: f64) -> Self { let dij_val = >::from_shape_and_val([1, 1], 1.0); Self { - val, - dij_val: Some(dij_val), + real_part: val, + dij_part: Some(dij_val), } } - fn vector_v(val: Self::RealVector) -> Self::Vector { - DualVector::::v(val) + fn vector_with_dij(val: Self::RealVector) -> Self::Vector { + DualVector::::new_with_dij(val) } fn dij_val(self) -> Option> { - self.dij_val + self.dij_part } - fn matrix_v( + fn matrix_with_dij( val: Self::RealMatrix, ) -> Self::Matrix { - DualMatrix::::v(val) + DualMatrix::::new_with_dij(val) } } @@ -223,8 +239,8 @@ impl Neg for DualScalar { fn neg(self) -> Self { Self { - val: -self.val, - dij_val: match self.dij_val.clone() { + real_part: -self.real_part, + dij_part: match self.dij_part.clone() { Some(dij_val) => { let dyn_mat = >::from_map(&dij_val.view(), |v: &f64| -(*v)); @@ -238,13 +254,13 @@ impl Neg for DualScalar { impl PartialEq for DualScalar { fn eq(&self, other: &Self) -> bool { - self.val == other.val + self.real_part == other.real_part } } impl PartialOrd for DualScalar { fn partial_cmp(&self, other: &Self) -> Option { - self.val.partial_cmp(&other.val) + self.real_part.partial_cmp(&other.real_part) } } @@ -256,14 +272,14 @@ impl From for DualScalar { impl Debug for DualScalar { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - if self.dij_val.is_some() { + if self.dij_part.is_some() { f.debug_struct("DualScalar") - .field("val", &self.val) - .field("dij_val", &self.dij_val.as_ref().unwrap().elem_view()) + .field("val", &self.real_part) + .field("dij_val", &self.dij_part.as_ref().unwrap().elem_view()) .finish() } else { f.debug_struct("DualScalar") - .field("val", &self.val) + .field("val", &self.real_part) .finish() } } @@ -283,24 +299,27 @@ impl IsScalar<1> for DualScalar { type Mask = bool; fn from_real_scalar(val: f64) -> Self { - Self { val, dij_val: None } + Self { + real_part: val, + dij_part: None, + } } fn from_real_array(arr: [f64; 1]) -> Self { Self::from_f64(arr[0]) } - fn real_array(&self) -> [f64; 1] { - [self.val] + fn to_real_array(&self) -> [f64; 1] { + [self.real_part] } fn cos(self) -> DualScalar { Self { - val: self.val.cos(), - dij_val: match self.dij_val.clone() { + real_part: self.real_part.cos(), + dij_part: match self.dij_part.clone() { Some(dij_val) => { let dyn_mat = >::from_map(&dij_val.view(), |dij: &f64| { - -(*dij) * self.val.sin() + -(*dij) * self.real_part.sin() }); Some(dyn_mat) } @@ -311,11 +330,11 @@ impl IsScalar<1> for DualScalar { fn sin(self) -> DualScalar { Self { - val: self.val.sin(), - dij_val: match self.dij_val.clone() { + real_part: self.real_part.sin(), + dij_part: match self.dij_part.clone() { Some(dij_val) => { let dyn_mat = >::from_map(&dij_val.view(), |dij: &f64| { - *dij * self.val.cos() + *dij * self.real_part.cos() }); Some(dyn_mat) } @@ -326,11 +345,11 @@ impl IsScalar<1> for DualScalar { fn abs(self) -> Self { Self { - val: self.val.abs(), - dij_val: match self.dij_val.clone() { + real_part: self.real_part.abs(), + dij_part: match self.dij_part.clone() { Some(dij_val) => { let dyn_mat = >::from_map(&dij_val.view(), |dij: &f64| { - *dij * self.val.signum() + *dij * self.real_part.signum() }); Some(dyn_mat) @@ -341,27 +360,28 @@ impl IsScalar<1> for DualScalar { } fn atan2(self, rhs: Self) -> Self { - let inv_sq_nrm: f64 = 1.0 / (self.val * self.val + rhs.val * rhs.val); + let inv_sq_nrm: f64 = + 1.0 / (self.real_part * self.real_part + rhs.real_part * rhs.real_part); Self { - val: self.val.atan2(rhs.val), - dij_val: Self::binary_dij( - &self.dij_val, - &rhs.dij_val, - |l_dij| inv_sq_nrm * ((*l_dij) * rhs.val), - |r_dij| -inv_sq_nrm * (self.val * (*r_dij)), + real_part: self.real_part.atan2(rhs.real_part), + dij_part: Self::binary_dij( + &self.dij_part, + &rhs.dij_part, + |l_dij| inv_sq_nrm * ((*l_dij) * rhs.real_part), + |r_dij| -inv_sq_nrm * (self.real_part * (*r_dij)), ), } } fn real_part(&self) -> f64 { - self.val + self.real_part } fn sqrt(self) -> Self { - let sqrt = self.val.sqrt(); + let sqrt = self.real_part.sqrt(); Self { - val: sqrt, - dij_val: match self.dij_val { + real_part: sqrt, + dij_part: match self.dij_part { Some(dij) => { let out_dij = >::from_map(&dij.view(), |dij: &f64| { (*dij) * 1.0 / (2.0 * sqrt) @@ -375,8 +395,8 @@ impl IsScalar<1> for DualScalar { fn to_vec(self) -> DualVector<1> { DualVector::<1> { - val: self.val.real_part().to_vec(), - dij_val: match self.dij_val { + real_part: self.real_part.real_part().to_vec(), + dij_part: match self.dij_part { Some(dij) => { let tmp = dij.inner_scalar_to_vec(); Some(tmp) @@ -388,10 +408,10 @@ impl IsScalar<1> for DualScalar { fn tan(self) -> Self { Self { - val: self.val.tan(), - dij_val: match self.dij_val.clone() { + real_part: self.real_part.tan(), + dij_part: match self.dij_part.clone() { Some(dij_val) => { - let c = self.val.cos(); + let c = self.real_part.cos(); let sec_squared = 1.0 / (c * c); let dyn_mat = >::from_map(&dij_val.view(), |dij: &f64| { *dij * sec_squared @@ -405,10 +425,10 @@ impl IsScalar<1> for DualScalar { fn acos(self) -> Self { Self { - val: self.val.acos(), - dij_val: match self.dij_val.clone() { + real_part: self.real_part.acos(), + dij_part: match self.dij_part.clone() { Some(dij_val) => { - let dval = -1.0 / (1.0 - self.val * self.val).sqrt(); + let dval = -1.0 / (1.0 - self.real_part * self.real_part).sqrt(); let dyn_mat = >::from_map(&dij_val.view(), |dij: &f64| *dij * dval); Some(dyn_mat) @@ -420,10 +440,10 @@ impl IsScalar<1> for DualScalar { fn asin(self) -> Self { Self { - val: self.val.asin(), - dij_val: match self.dij_val.clone() { + real_part: self.real_part.asin(), + dij_part: match self.dij_part.clone() { Some(dij_val) => { - let dval = 1.0 / (1.0 - self.val * self.val).sqrt(); + let dval = 1.0 / (1.0 - self.real_part * self.real_part).sqrt(); let dyn_mat = >::from_map(&dij_val.view(), |dij: &f64| *dij * dval); Some(dyn_mat) @@ -435,10 +455,10 @@ impl IsScalar<1> for DualScalar { fn atan(self) -> Self { Self { - val: self.val.atan(), - dij_val: match self.dij_val.clone() { + real_part: self.real_part.atan(), + dij_part: match self.dij_part.clone() { Some(dij_val) => { - let dval = 1.0 / (1.0 + self.val * self.val); + let dval = 1.0 / (1.0 + self.real_part * self.real_part); let dyn_mat = >::from_map(&dij_val.view(), |dij: &f64| *dij * dval); Some(dyn_mat) @@ -450,8 +470,8 @@ impl IsScalar<1> for DualScalar { fn fract(self) -> Self { Self { - val: self.val.fract(), - dij_val: match self.dij_val.clone() { + real_part: self.real_part.fract(), + dij_part: match self.dij_part.clone() { Some(dij_val) => { let dyn_mat = >::from_map(&dij_val.view(), |dij: &f64| *dij); Some(dyn_mat) @@ -462,11 +482,14 @@ impl IsScalar<1> for DualScalar { } fn floor(&self) -> f64 { - self.val.floor() + self.real_part.floor() } fn from_f64(val: f64) -> Self { - Self { val, dij_val: None } + Self { + real_part: val, + dij_part: None, + } } fn scalar_examples() -> Vec { @@ -479,8 +502,8 @@ impl IsScalar<1> for DualScalar { fn signum(&self) -> Self { Self { - val: self.val.signum(), - dij_val: None, + real_part: self.real_part.signum(), + dij_part: None, } } @@ -491,7 +514,7 @@ impl IsScalar<1> for DualScalar { type DualMatrix = DualMatrix; fn less_equal(&self, rhs: &Self) -> Self::Mask { - self.val.less_equal(&rhs.val) + self.real_part.less_equal(&rhs.real_part) } fn to_dual(self) -> Self::DualScalar { @@ -507,7 +530,7 @@ impl IsScalar<1> for DualScalar { } fn greater_equal(&self, rhs: &Self) -> Self::Mask { - self.val.greater_equal(&rhs.val) + self.real_part.greater_equal(&rhs.real_part) } } @@ -528,11 +551,16 @@ impl Add for DualScalar { impl Add<&DualScalar> for DualScalar { type Output = DualScalar; fn add(self, rhs: &Self) -> Self::Output { - let r = self.val + rhs.val; + let r = self.real_part + rhs.real_part; Self { - val: r, - dij_val: Self::binary_dij(&self.dij_val, &rhs.dij_val, |l_dij| *l_dij, |r_dij| *r_dij), + real_part: r, + dij_part: Self::binary_dij( + &self.dij_part, + &rhs.dij_part, + |l_dij| *l_dij, + |r_dij| *r_dij, + ), } } } @@ -547,15 +575,15 @@ impl Mul for DualScalar { impl Mul<&DualScalar> for DualScalar { type Output = DualScalar; fn mul(self, rhs: &Self) -> Self::Output { - let r = self.val * rhs.val; + let r = self.real_part * rhs.real_part; Self { - val: r, - dij_val: Self::binary_dij( - &self.dij_val, - &rhs.dij_val, - |l_dij| (*l_dij) * rhs.val, - |r_dij| (*r_dij) * self.val, + real_part: r, + dij_part: Self::binary_dij( + &self.dij_part, + &rhs.dij_part, + |l_dij| (*l_dij) * rhs.real_part, + |r_dij| (*r_dij) * self.real_part, ), } } @@ -571,14 +599,14 @@ impl Div for DualScalar { impl Div<&DualScalar> for DualScalar { type Output = DualScalar; fn div(self, rhs: &Self) -> Self::Output { - let rhs_inv = 1.0 / rhs.val; + let rhs_inv = 1.0 / rhs.real_part; Self { - val: self.val * rhs_inv, - dij_val: Self::binary_dij( - &self.dij_val, - &rhs.dij_val, + real_part: self.real_part * rhs_inv, + dij_part: Self::binary_dij( + &self.dij_part, + &rhs.dij_part, |l_dij| l_dij * rhs_inv, - |r_dij| -self.val * r_dij * rhs_inv * rhs_inv, + |r_dij| -self.real_part * r_dij * rhs_inv * rhs_inv, ), } } @@ -595,8 +623,13 @@ impl Sub<&DualScalar> for DualScalar { type Output = DualScalar; fn sub(self, rhs: &Self) -> Self::Output { Self { - val: self.val - rhs.val, - dij_val: Self::binary_dij(&self.dij_val, &rhs.dij_val, |l_dij| *l_dij, |r_dij| -r_dij), + real_part: self.real_part - rhs.real_part, + dij_part: Self::binary_dij( + &self.dij_part, + &rhs.dij_part, + |l_dij| *l_dij, + |r_dij| -r_dij, + ), } } } @@ -606,26 +639,26 @@ where BatchScalarF64: IsCoreScalar, LaneCount: SupportedLaneCount, { - fn new(val: BatchScalarF64) -> Self { + fn new_with_dij(val: BatchScalarF64) -> Self { let dij_val = MutTensorDD::from_shape_and_val([1, 1], BatchScalarF64::::ones()); Self { - val, - dij_val: Some(dij_val), + real_part: val, + dij_part: Some(dij_val), } } fn dij_val(self) -> Option>> { - self.dij_val + self.dij_part } - fn vector_v(val: Self::RealVector) -> Self::Vector { - DualBatchVector::::v(val) + fn vector_with_dij(val: Self::RealVector) -> Self::Vector { + DualBatchVector::::new_with_dij(val) } - fn matrix_v( + fn matrix_with_dij( val: Self::RealMatrix, ) -> Self::Matrix { - DualBatchMatrix::::v(val) + DualBatchMatrix::::new_with_dij(val) } } @@ -673,7 +706,7 @@ where } fn is_zero(&self) -> bool { - self.val == >::from_f64(0.0).real_part() + self.real_part == >::from_f64(0.0).real_part() } } @@ -730,8 +763,8 @@ where fn neg(self) -> Self { Self { - val: -self.val, - dij_val: match self.dij_val.clone() { + real_part: -self.real_part, + dij_part: match self.dij_part.clone() { Some(dij_val) => { let dyn_mat = MutTensorDD::from_map(&dij_val.view(), |v: &BatchScalarF64| -*v); @@ -750,7 +783,7 @@ where LaneCount: SupportedLaneCount, { fn eq(&self, other: &Self) -> bool { - self.val == other.val + self.real_part == other.real_part } } @@ -767,11 +800,10 @@ where fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool { for i in 0..BATCH { - if !self - .val - .extract_single(i) - .abs_diff_eq(&other.val.extract_single(i), epsilon.extract_single(i)) - { + if !self.real_part.extract_single(i).abs_diff_eq( + &other.real_part.extract_single(i), + epsilon.extract_single(i), + ) { return false; } } @@ -795,8 +827,8 @@ where max_relative: Self::Epsilon, ) -> bool { for i in 0..BATCH { - if !self.val.extract_single(i).relative_eq( - &other.val.extract_single(i), + if !self.real_part.extract_single(i).relative_eq( + &other.real_part.extract_single(i), epsilon.extract_single(i), max_relative.extract_single(i), ) { @@ -823,14 +855,14 @@ where LaneCount: SupportedLaneCount, { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - if self.dij_val.is_some() { + if self.dij_part.is_some() { f.debug_struct("DualScalar") - .field("val", &self.val) - .field("dij_val", &self.dij_val.as_ref().unwrap().elem_view()) + .field("val", &self.real_part) + .field("dij_val", &self.dij_part.as_ref().unwrap().elem_view()) .finish() } else { f.debug_struct("DualScalar") - .field("val", &self.val) + .field("val", &self.real_part) .finish() } } @@ -854,7 +886,10 @@ where type Mask = Mask; fn from_real_scalar(val: BatchScalarF64) -> Self { - Self { val, dij_val: None } + Self { + real_part: val, + dij_part: None, + } } fn scalar_examples() -> Vec { @@ -865,14 +900,14 @@ where Self::from_real_scalar(BatchScalarF64::::from_real_array(arr)) } - fn real_array(&self) -> [f64; BATCH] { - self.val.real_array() + fn to_real_array(&self) -> [f64; BATCH] { + self.real_part.to_real_array() } fn extract_single(&self, i: usize) -> Self::SingleScalar { Self::SingleScalar { - val: self.val.extract_single(i), - dij_val: match self.dij_val.clone() { + real_part: self.real_part.extract_single(i), + dij_part: match self.dij_part.clone() { Some(dij_val) => { let dyn_mat = MutTensorDD::from_map(&dij_val.view(), |dij: &BatchScalarF64| { @@ -891,12 +926,12 @@ where LaneCount: SupportedLaneCount, { Self { - val: self.val.cos(), - dij_val: match self.dij_val.clone() { + real_part: self.real_part.cos(), + dij_part: match self.dij_part.clone() { Some(dij_val) => { let dyn_mat = MutTensorDD::from_map(&dij_val.view(), |dij: &BatchScalarF64| { - -*dij * self.val.sin() + -*dij * self.real_part.sin() }); Some(dyn_mat) } @@ -907,8 +942,8 @@ where fn signum(&self) -> Self { Self { - val: self.val.signum(), - dij_val: None, + real_part: self.real_part.signum(), + dij_part: None, } } @@ -918,12 +953,12 @@ where LaneCount: SupportedLaneCount, { Self { - val: self.val.sin(), - dij_val: match self.dij_val.clone() { + real_part: self.real_part.sin(), + dij_part: match self.dij_part.clone() { Some(dij_val) => { let dyn_mat = MutTensorDD::from_map(&dij_val.view(), |dij: &BatchScalarF64| { - *dij * self.val.cos() + *dij * self.real_part.cos() }); Some(dyn_mat) } @@ -934,12 +969,12 @@ where fn abs(self) -> Self { Self { - val: self.val.abs(), - dij_val: match self.dij_val.clone() { + real_part: self.real_part.abs(), + dij_part: match self.dij_part.clone() { Some(dij_val) => { let dyn_mat = MutTensorDD::from_map(&dij_val.view(), |dij: &BatchScalarF64| { - *dij * self.val.signum() + *dij * self.real_part.signum() }); Some(dyn_mat) } @@ -949,28 +984,28 @@ where } fn atan2(self, rhs: Self) -> Self { - let inv_sq_nrm: BatchScalarF64 = - BatchScalarF64::ones() / (self.val * self.val + rhs.val * rhs.val); + let inv_sq_nrm: BatchScalarF64 = BatchScalarF64::ones() + / (self.real_part * self.real_part + rhs.real_part * rhs.real_part); Self { - val: self.val.atan2(rhs.val), - dij_val: Self::binary_dij( - &self.dij_val, - &rhs.dij_val, - |l_dij| inv_sq_nrm * ((*l_dij) * rhs.val), - |r_dij| -inv_sq_nrm * (self.val * (*r_dij)), + real_part: self.real_part.atan2(rhs.real_part), + dij_part: Self::binary_dij( + &self.dij_part, + &rhs.dij_part, + |l_dij| inv_sq_nrm * ((*l_dij) * rhs.real_part), + |r_dij| -inv_sq_nrm * (self.real_part * (*r_dij)), ), } } fn real_part(&self) -> BatchScalarF64 { - self.val + self.real_part } fn sqrt(self) -> Self { - let sqrt = self.val.sqrt(); + let sqrt = self.real_part.sqrt(); Self { - val: sqrt, - dij_val: match self.dij_val { + real_part: sqrt, + dij_part: match self.dij_part { Some(dij) => { let out_dij = MutTensorDD::from_map(&dij.view(), |dij: &BatchScalarF64| { @@ -986,8 +1021,8 @@ where fn to_vec(self) -> DualBatchVector<1, BATCH> { DualBatchVector::<1, BATCH> { - val: self.val.real_part().to_vec(), - dij_val: match self.dij_val { + real_part: self.real_part.real_part().to_vec(), + dij_part: match self.dij_part { Some(dij) => { let tmp = dij.inner_scalar_to_vec(); Some(tmp) @@ -999,10 +1034,10 @@ where fn tan(self) -> Self { Self { - val: self.val.tan(), - dij_val: match self.dij_val.clone() { + real_part: self.real_part.tan(), + dij_part: match self.dij_part.clone() { Some(dij_val) => { - let c = self.val.cos(); + let c = self.real_part.cos(); let sec_squared = BatchScalarF64::::ones() / (c * c); let dyn_mat = MutTensorDD::from_map(&dij_val.view(), |dij: &BatchScalarF64| { @@ -1017,11 +1052,12 @@ where fn acos(self) -> Self { Self { - val: self.val.acos(), - dij_val: match self.dij_val.clone() { + real_part: self.real_part.acos(), + dij_part: match self.dij_part.clone() { Some(dij_val) => { let dval = -BatchScalarF64::::ones() - / (BatchScalarF64::::ones() - self.val * self.val).sqrt(); + / (BatchScalarF64::::ones() - self.real_part * self.real_part) + .sqrt(); let dyn_mat = MutTensorDD::from_map(&dij_val.view(), |dij: &BatchScalarF64| { *dij * dval @@ -1035,11 +1071,12 @@ where fn asin(self) -> Self { Self { - val: self.val.asin(), - dij_val: match self.dij_val.clone() { + real_part: self.real_part.asin(), + dij_part: match self.dij_part.clone() { Some(dij_val) => { let dval = BatchScalarF64::::ones() - / (BatchScalarF64::::ones() - self.val * self.val).sqrt(); + / (BatchScalarF64::::ones() - self.real_part * self.real_part) + .sqrt(); let dyn_mat = MutTensorDD::from_map(&dij_val.view(), |dij: &BatchScalarF64| { *dij * dval @@ -1053,11 +1090,11 @@ where fn atan(self) -> Self { Self { - val: self.val.atan(), - dij_val: match self.dij_val.clone() { + real_part: self.real_part.atan(), + dij_part: match self.dij_part.clone() { Some(dij_val) => { let dval = BatchScalarF64::::ones() - / (BatchScalarF64::::ones() + self.val * self.val); + / (BatchScalarF64::::ones() + self.real_part * self.real_part); let dyn_mat = MutTensorDD::from_map(&dij_val.view(), |dij: &BatchScalarF64| { *dij * dval @@ -1071,8 +1108,8 @@ where fn fract(self) -> Self { Self { - val: self.val.fract(), - dij_val: match self.dij_val.clone() { + real_part: self.real_part.fract(), + dij_part: match self.dij_part.clone() { Some(dij_val) => { let dyn_mat = MutTensorDD::from_map(&dij_val.view(), |dij: &BatchScalarF64| *dij); @@ -1084,7 +1121,7 @@ where } fn floor(&self) -> BatchScalarF64 { - self.val.floor() + self.real_part.floor() } fn from_f64(val: f64) -> Self { @@ -1093,10 +1130,6 @@ where type DualScalar = Self; - fn scalar(self) -> Self { - self - } - fn ones() -> Self { Self::from_f64(1.0) } @@ -1120,7 +1153,7 @@ where } fn less_equal(&self, rhs: &Self) -> Self::Mask { - self.val.less_equal(&rhs.val) + self.real_part.less_equal(&rhs.real_part) } fn to_dual(self) -> Self::DualScalar { @@ -1129,8 +1162,8 @@ where fn select(self, mask: &Self::Mask, other: Self) -> Self { Self { - val: self.val.select(mask, other.val), - dij_val: match (self.dij_val, other.dij_val) { + real_part: self.real_part.select(mask, other.real_part), + dij_part: match (self.dij_part, other.dij_part) { (Some(lhs), Some(rhs)) => { let dyn_mat = MutTensorDD::from_map2( &lhs.view(), @@ -1145,7 +1178,7 @@ where } fn greater_equal(&self, rhs: &Self) -> Self::Mask { - self.val.greater_equal(&rhs.val) + self.real_part.greater_equal(&rhs.real_part) } } @@ -1186,11 +1219,16 @@ where { type Output = DualBatchScalar; fn add(self, rhs: &Self) -> Self::Output { - let r = self.val + rhs.val; + let r = self.real_part + rhs.real_part; Self { - val: r, - dij_val: Self::binary_dij(&self.dij_val, &rhs.dij_val, |l_dij| *l_dij, |r_dij| *r_dij), + real_part: r, + dij_part: Self::binary_dij( + &self.dij_part, + &rhs.dij_part, + |l_dij| *l_dij, + |r_dij| *r_dij, + ), } } } @@ -1213,15 +1251,15 @@ where { type Output = DualBatchScalar; fn mul(self, rhs: &Self) -> Self::Output { - let r = self.val * rhs.val; + let r = self.real_part * rhs.real_part; Self { - val: r, - dij_val: Self::binary_dij( - &self.dij_val, - &rhs.dij_val, - |l_dij| (*l_dij) * rhs.val, - |r_dij| (*r_dij) * self.val, + real_part: r, + dij_part: Self::binary_dij( + &self.dij_part, + &rhs.dij_part, + |l_dij| (*l_dij) * rhs.real_part, + |r_dij| (*r_dij) * self.real_part, ), } } @@ -1245,14 +1283,14 @@ where { type Output = DualBatchScalar; fn div(self, rhs: &Self) -> Self::Output { - let rhs_inv = BatchScalarF64::::ones() / rhs.val; + let rhs_inv = BatchScalarF64::::ones() / rhs.real_part; Self { - val: self.val * rhs_inv, - dij_val: Self::binary_dij( - &self.dij_val, - &rhs.dij_val, + real_part: self.real_part * rhs_inv, + dij_part: Self::binary_dij( + &self.dij_part, + &rhs.dij_part, |l_dij| *l_dij * rhs_inv, - |r_dij| -self.val * (*r_dij) * rhs_inv * rhs_inv, + |r_dij| -self.real_part * (*r_dij) * rhs_inv * rhs_inv, ), } } @@ -1277,10 +1315,10 @@ where type Output = DualBatchScalar; fn sub(self, rhs: &Self) -> Self::Output { Self { - val: self.val - rhs.val, - dij_val: Self::binary_dij( - &self.dij_val, - &rhs.dij_val, + real_part: self.real_part - rhs.real_part, + dij_part: Self::binary_dij( + &self.dij_part, + &rhs.dij_part, |l_dij| *l_dij, |r_dij| -(*r_dij), ), diff --git a/crates/sophus_core/src/calculus/dual/dual_vector.rs b/crates/sophus_core/src/calculus/dual/dual_vector.rs index 5ad83b0b..40bdc05c 100644 --- a/crates/sophus_core/src/calculus/dual/dual_vector.rs +++ b/crates/sophus_core/src/calculus/dual/dual_vector.rs @@ -1,25 +1,16 @@ -use approx::AbsDiffEq; -use approx::RelativeEq; - use super::dual_matrix::DualMatrix; use super::dual_scalar::DualBatchScalar; use super::dual_scalar::DualScalar; -use super::dual_scalar::IsDual; -use super::dual_scalar::IsDualScalar; -use crate::calculus::dual::dual_matrix::DualBatchMatrix; -use crate::linalg::matrix::IsMatrix; -use crate::linalg::scalar::IsCoreScalar; -use crate::linalg::scalar::IsScalar; -use crate::linalg::vector::IsSingleVector; -use crate::linalg::vector::IsVector; +use crate::calculus::dual::DualBatchMatrix; use crate::linalg::BatchScalarF64; use crate::linalg::BatchVecF64; use crate::linalg::VecF64; +use crate::prelude::*; use crate::tensor::mut_tensor::InnerVecToMat; use crate::tensor::mut_tensor::MutTensorDD; use crate::tensor::mut_tensor::MutTensorDDR; -use crate::tensor::mut_tensor_view::IsMutTensorLike; -use crate::tensor::tensor_view::IsTensorLike; +use approx::AbsDiffEq; +use approx::RelativeEq; use std::fmt::Debug; use std::ops::Add; use std::ops::Neg; @@ -31,18 +22,24 @@ use std::simd::SupportedLaneCount; /// Dual vector #[derive(Clone)] pub struct DualVector { - /// value - real vector - pub val: VecF64, - /// derivative - infinitesimal vector - pub dij_val: Option>, + /// real part + pub real_part: VecF64, + /// infinitesimal part - represents derivative + pub dij_part: Option>, } /// Trait for scalar dual numbers pub trait IsDualVector, const ROWS: usize, const BATCH: usize>: IsVector + IsDual { - /// Create a new dual number - fn new(val: S::RealVector) -> Self; + /// Create a new dual vector from a real vector for auto-differentiation with respect to self + /// + /// Typically this is not called directly, but through using a map auto-differentiation call: + /// + /// - ScalarValuedMapFromVector::fw_autodiff(...); + /// - VectorValuedMapFromVector::fw_autodiff(...); + /// - MatrixValuedMapFromVector::fw_autodiff(...); + fn new_with_dij(val: S::RealVector) -> Self; /// Get the derivative fn dij_val(self) -> Option>; @@ -55,34 +52,42 @@ where BatchScalarF64: IsCoreScalar, LaneCount: SupportedLaneCount, { - /// value - real vector - pub val: BatchVecF64, - /// derivative - infinitesimal vector - pub dij_val: Option, ROWS>>, + /// real part + pub real_part: BatchVecF64, + /// infinitesimal part - represents derivative + pub dij_part: Option, ROWS>>, } impl IsDual for DualVector {} impl IsDualVector for DualVector { - fn new(val: VecF64) -> Self { - DualVector::v(val) + fn new_with_dij(val: VecF64) -> Self { + let mut dij_val = MutTensorDDR::::from_shape([ROWS, 1]); + for i in 0..ROWS { + dij_val.mut_view().get_mut([i, 0])[(i, 0)] = 1.0; + } + + Self { + real_part: val, + dij_part: Some(dij_val), + } } fn dij_val(self) -> Option> { - self.dij_val + self.dij_part } } impl num_traits::Zero for DualVector { fn zero() -> Self { DualVector { - val: VecF64::zeros(), - dij_val: None, + real_part: VecF64::zeros(), + dij_part: None, } } fn is_zero(&self) -> bool { - self.val == VecF64::::zeros() + self.real_part == VecF64::::zeros() } } @@ -91,7 +96,7 @@ where DualVector: IsVector, { fn set_real_scalar(&mut self, idx: usize, v: f64) { - self.val[idx] = v; + self.real_part[idx] = v; } } @@ -107,19 +112,6 @@ impl DijPair { } impl DualVector { - /// create a dual vector - pub fn v(val: VecF64) -> Self { - let mut dij_val = MutTensorDDR::::from_shape([ROWS, 1]); - for i in 0..ROWS { - dij_val.mut_view().get_mut([i, 0])[(i, 0)] = 1.0; - } - - Self { - val, - dij_val: Some(dij_val), - } - } - fn binary_dij< const R0: usize, const R1: usize, @@ -214,9 +206,9 @@ impl Neg for DualVector { fn neg(self) -> Self::Output { DualVector { - val: -self.val, - dij_val: self - .dij_val + real_part: -self.real_part, + dij_part: self + .dij_part .clone() .map(|dij_val| MutTensorDDR::from_map(&dij_val.view(), |v| -v)), } @@ -228,8 +220,13 @@ impl Sub for DualVector { fn sub(self, rhs: Self) -> Self::Output { DualVector { - val: self.val - rhs.val, - dij_val: Self::binary_dij(&self.dij_val, &rhs.dij_val, |l_dij| *l_dij, |r_dij| -r_dij), + real_part: self.real_part - rhs.real_part, + dij_part: Self::binary_dij( + &self.dij_part, + &rhs.dij_part, + |l_dij| *l_dij, + |r_dij| -r_dij, + ), } } } @@ -239,22 +236,27 @@ impl Add for DualVector { fn add(self, rhs: Self) -> Self::Output { DualVector { - val: self.val + rhs.val, - dij_val: Self::binary_dij(&self.dij_val, &rhs.dij_val, |l_dij| *l_dij, |r_dij| *r_dij), + real_part: self.real_part + rhs.real_part, + dij_part: Self::binary_dij( + &self.dij_part, + &rhs.dij_part, + |l_dij| *l_dij, + |r_dij| *r_dij, + ), } } } impl Debug for DualVector { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - if self.dij_val.is_some() { + if self.dij_part.is_some() { f.debug_struct("DualScalarLike") - .field("val", &self.val) - .field("dij_val", &self.dij_val.as_ref().unwrap().elem_view()) + .field("val", &self.real_part) + .field("dij_val", &self.dij_part.as_ref().unwrap().elem_view()) .finish() } else { f.debug_struct("DualScalarLike") - .field("val", &self.val) + .field("val", &self.real_part) .finish() } } @@ -262,7 +264,7 @@ impl Debug for DualVector { impl PartialEq for DualVector { fn eq(&self, other: &Self) -> bool { - self.val == other.val && self.dij_val == other.dij_val + self.real_part == other.real_part && self.dij_part == other.dij_part } } @@ -274,7 +276,7 @@ impl AbsDiffEq for DualVector { } fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool { - self.val.abs_diff_eq(&other.val, epsilon) + self.real_part.abs_diff_eq(&other.real_part, epsilon) } } @@ -289,27 +291,16 @@ impl RelativeEq for DualVector { epsilon: Self::Epsilon, max_relative: Self::Epsilon, ) -> bool { - self.val.relative_eq(&other.val, epsilon, max_relative) + self.real_part + .relative_eq(&other.real_part, epsilon, max_relative) } } impl IsVector for DualVector { fn from_f64(val: f64) -> Self { DualVector { - val: VecF64::::from_scalar(val), - dij_val: None, - } - } - - fn set_real_elem(&mut self, idx: usize, v: f64) { - self.val[idx] = v; - if self.dij_val.is_some() { - let dij = &mut self.dij_val.as_mut().unwrap(); - for i in 0..dij.dims()[0] { - for j in 0..dij.dims()[1] { - dij.mut_view().get_mut([i, j])[idx] = 0.0; - } - } + real_part: VecF64::::from_scalar(val), + dij_part: None, } } @@ -323,9 +314,9 @@ impl IsVector for DualVector { fn get_elem(&self, idx: usize) -> DualScalar { DualScalar { - val: self.val[idx], - dij_val: self - .dij_val + real_part: self.real_part[idx], + dij_part: self + .dij_part .clone() .map(|dij_val| MutTensorDD::from_map(&dij_val.view(), |v| v[idx])), } @@ -337,16 +328,16 @@ impl IsVector for DualVector { for i in 0..duals.len() { let d = duals.clone()[i].clone(); - val_v[i] = d.val; - if d.dij_val.is_some() { - shape = Some(d.dij_val.clone().unwrap().dims()); + val_v[i] = d.real_part; + if d.dij_part.is_some() { + shape = Some(d.dij_part.clone().unwrap().dims()); } } if shape.is_none() { return DualVector { - val: val_v, - dij_val: None, + real_part: val_v, + dij_part: None, }; } let shape = shape.unwrap(); @@ -355,49 +346,43 @@ impl IsVector for DualVector { for i in 0..duals.len() { let d = duals.clone()[i].clone(); - if d.dij_val.is_some() { + if d.dij_part.is_some() { for d0 in 0..shape[0] { for d1 in 0..shape[1] { r.mut_view().get_mut([d0, d1])[(i, 0)] = - d.dij_val.clone().unwrap().get([d0, d1]); + d.dij_part.clone().unwrap().get([d0, d1]); } } } } DualVector { - val: val_v, - dij_val: Some(r), + real_part: val_v, + dij_part: Some(r), } } fn from_real_array(vals: [f64; ROWS]) -> Self { DualVector { - val: VecF64::from_real_array(vals), - dij_val: None, + real_part: VecF64::from_real_array(vals), + dij_part: None, } } fn from_real_vector(val: VecF64) -> Self { - Self { val, dij_val: None } + Self { + real_part: val, + dij_part: None, + } } fn real_vector(&self) -> &VecF64 { - &self.val - } - - fn get_fixed_rows(&self, start: usize) -> DualVector { - DualVector { - val: self.val.fixed_rows::(start).into(), - dij_val: self.dij_val.clone().map(|dij_val| { - MutTensorDDR::from_map(&dij_val.view(), |v| v.fixed_rows::(start).into()) - }), - } + &self.real_part } fn to_mat(self) -> DualMatrix { DualMatrix { - val: self.val, - dij_val: self.dij_val.map(|dij| dij.inner_vec_to_mat()), + real_part: self.real_part, + dij_part: self.dij_part.map(|dij| dij.inner_vec_to_mat()), } } @@ -407,10 +392,10 @@ impl IsVector for DualVector { ) -> Self { assert_eq!(R0 + R1, ROWS); - let maybe_dij = Self::two_dx(top_row.dij_val, bot_row.dij_val); + let maybe_dij = Self::two_dx(top_row.dij_part, bot_row.dij_part); Self { - val: VecF64::::block_vec2(top_row.val, bot_row.val), - dij_val: match maybe_dij { + real_part: VecF64::::block_vec2(top_row.real_part, bot_row.real_part), + dij_part: match maybe_dij { Some(dij_val) => { let mut r = MutTensorDDR::::from_shape(dij_val.shape()); for d0 in 0..dij_val.shape()[0] { @@ -430,12 +415,12 @@ impl IsVector for DualVector { fn scaled(&self, s: DualScalar) -> Self { DualVector { - val: self.val * s.val, - dij_val: Self::binary_vs_dij( - &self.dij_val, - &s.dij_val, - |l_dij| l_dij * s.val, - |r_dij| self.val * *r_dij, + real_part: self.real_part * s.real_part, + dij_part: Self::binary_vs_dij( + &self.dij_part, + &s.dij_part, + |l_dij| l_dij * s.real_part, + |r_dij| self.real_part * *r_dij, ), } } @@ -457,8 +442,8 @@ impl IsVector for DualVector { fn from_f64_array(vals: [f64; ROWS]) -> Self { DualVector { - val: VecF64::from_f64_array(vals), - dij_val: None, + real_part: VecF64::from_f64_array(vals), + dij_part: None, } } @@ -468,16 +453,16 @@ impl IsVector for DualVector { for i in 0..vals.len() { let d = vals.clone()[i].clone(); - val_v[i] = d.val; - if d.dij_val.is_some() { - shape = Some(d.dij_val.clone().unwrap().dims()); + val_v[i] = d.real_part; + if d.dij_part.is_some() { + shape = Some(d.dij_part.clone().unwrap().dims()); } } if shape.is_none() { return DualVector { - val: val_v, - dij_val: None, + real_part: val_v, + dij_part: None, }; } let shape = shape.unwrap(); @@ -486,37 +471,33 @@ impl IsVector for DualVector { for i in 0..vals.len() { let d = vals.clone()[i].clone(); - if d.dij_val.is_some() { + if d.dij_part.is_some() { for d0 in 0..shape[0] { for d1 in 0..shape[1] { r.mut_view().get_mut([d0, d1])[(i, 0)] = - d.dij_val.clone().unwrap().get([d0, d1]); + d.dij_part.clone().unwrap().get([d0, d1]); } } } } DualVector { - val: val_v, - dij_val: Some(r), + real_part: val_v, + dij_part: Some(r), } } fn set_elem(&mut self, idx: usize, v: DualScalar) { - self.val[idx] = v.val; - if self.dij_val.is_some() { - let dij = &mut self.dij_val.as_mut().unwrap(); + self.real_part[idx] = v.real_part; + if self.dij_part.is_some() { + let dij = &mut self.dij_part.as_mut().unwrap(); for i in 0..dij.dims()[0] { for j in 0..dij.dims()[1] { - dij.mut_view().get_mut([i, j])[idx] = v.dij_val.clone().unwrap().get([i, j]); + dij.mut_view().get_mut([i, j])[idx] = v.dij_part.clone().unwrap().get([i, j]); } } } } - fn vector(self) -> Self { - self.clone() - } - fn to_dual(self) -> >::DualVector { self } @@ -544,8 +525,8 @@ impl IsVector for DualVector { fn get_fixed_subvec(&self, start_r: usize) -> DualVector { DualVector { - val: self.val.fixed_rows::(start_r).into(), - dij_val: self.dij_val.clone().map(|dij_val| { + real_part: self.real_part.fixed_rows::(start_r).into(), + dij_part: self.dij_part.clone().map(|dij_val| { MutTensorDDR::from_map(&dij_val.view(), |v| v.fixed_rows::(start_r).into()) }), } @@ -558,13 +539,13 @@ where { fn zero() -> Self { DualBatchVector { - val: BatchVecF64::::zeros(), - dij_val: None, + real_part: BatchVecF64::::zeros(), + dij_part: None, } } fn is_zero(&self) -> bool { - self.val == BatchVecF64::::zeros() + self.real_part == BatchVecF64::::zeros() } } @@ -578,7 +559,7 @@ where LaneCount: SupportedLaneCount, { fn eq(&self, other: &Self) -> bool { - self.val == other.val && self.dij_val == other.dij_val + self.real_part == other.real_part && self.dij_part == other.dij_part } } @@ -593,7 +574,7 @@ where } fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool { - self.val.abs_diff_eq(&other.val, epsilon) + self.real_part.abs_diff_eq(&other.real_part, epsilon) } } @@ -611,7 +592,8 @@ where epsilon: Self::Epsilon, max_relative: Self::Epsilon, ) -> bool { - self.val.relative_eq(&other.val, epsilon, max_relative) + self.real_part + .relative_eq(&other.real_part, epsilon, max_relative) } } @@ -620,12 +602,20 @@ impl IsDualVector, where LaneCount: SupportedLaneCount, { - fn new(val: BatchVecF64) -> Self { - DualBatchVector::::v(val) + fn new_with_dij(val: BatchVecF64) -> Self { + let mut dij_val = MutTensorDDR::, ROWS>::from_shape([ROWS, 1]); + for i in 0..ROWS { + dij_val.mut_view().get_mut([i, 0])[(i, 0)] = BatchScalarF64::::ones(); + } + + Self { + real_part: val, + dij_part: Some(dij_val), + } } fn dij_val(self) -> Option, ROWS>> { - self.dij_val + self.dij_part } } @@ -633,19 +623,6 @@ impl DualBatchVector where LaneCount: SupportedLaneCount, { - /// create a dual vector - pub fn v(val: BatchVecF64) -> Self { - let mut dij_val = MutTensorDDR::, ROWS>::from_shape([ROWS, 1]); - for i in 0..ROWS { - dij_val.mut_view().get_mut([i, 0])[(i, 0)] = BatchScalarF64::::ones(); - } - - Self { - val, - dij_val: Some(dij_val), - } - } - fn binary_dij< const R0: usize, const R1: usize, @@ -743,9 +720,9 @@ where fn neg(self) -> Self::Output { DualBatchVector { - val: -self.val, - dij_val: self - .dij_val + real_part: -self.real_part, + dij_part: self + .dij_part .clone() .map(|dij_val| MutTensorDDR::from_map(&dij_val.view(), |v| -v)), } @@ -760,8 +737,13 @@ where fn sub(self, rhs: Self) -> Self::Output { DualBatchVector { - val: self.val - rhs.val, - dij_val: Self::binary_dij(&self.dij_val, &rhs.dij_val, |l_dij| *l_dij, |r_dij| -r_dij), + real_part: self.real_part - rhs.real_part, + dij_part: Self::binary_dij( + &self.dij_part, + &rhs.dij_part, + |l_dij| *l_dij, + |r_dij| -r_dij, + ), } } } @@ -774,8 +756,13 @@ where fn add(self, rhs: Self) -> Self::Output { DualBatchVector { - val: self.val + rhs.val, - dij_val: Self::binary_dij(&self.dij_val, &rhs.dij_val, |l_dij| *l_dij, |r_dij| *r_dij), + real_part: self.real_part + rhs.real_part, + dij_part: Self::binary_dij( + &self.dij_part, + &rhs.dij_part, + |l_dij| *l_dij, + |r_dij| *r_dij, + ), } } } @@ -788,11 +775,11 @@ where { fn from_f64(val: f64) -> Self { DualBatchVector { - val: + real_part: as IsVector, ROWS, BATCH>>::from_f64( val, ), - dij_val: None, + dij_part: None, } } @@ -809,18 +796,6 @@ where result } - fn set_real_elem(&mut self, idx: usize, v: BatchScalarF64) { - self.val[idx] = v; - if self.dij_val.is_some() { - let dij = &mut self.dij_val.as_mut().unwrap(); - for i in 0..dij.dims()[0] { - for j in 0..dij.dims()[1] { - dij.mut_view().get_mut([i, j])[idx] = BatchScalarF64::::from_f64(0.0); - } - } - } - } - fn norm(&self) -> DualBatchScalar { self.clone().dot(self.clone()).sqrt() } @@ -831,9 +806,9 @@ where fn get_elem(&self, idx: usize) -> DualBatchScalar { DualBatchScalar { - val: self.val[idx], - dij_val: self - .dij_val + real_part: self.real_part[idx], + dij_part: self + .dij_part .clone() .map(|dij_val| MutTensorDD::from_map(&dij_val.view(), |v| v[idx])), } @@ -845,16 +820,16 @@ where for i in 0..duals.len() { let d = duals.clone()[i].clone(); - val_v[i] = d.val; - if d.dij_val.is_some() { - shape = Some(d.dij_val.clone().unwrap().dims()); + val_v[i] = d.real_part; + if d.dij_part.is_some() { + shape = Some(d.dij_part.clone().unwrap().dims()); } } if shape.is_none() { return DualBatchVector { - val: val_v, - dij_val: None, + real_part: val_v, + dij_part: None, }; } let shape = shape.unwrap(); @@ -863,49 +838,43 @@ where for i in 0..duals.len() { let d = duals.clone()[i].clone(); - if d.dij_val.is_some() { + if d.dij_part.is_some() { for d0 in 0..shape[0] { for d1 in 0..shape[1] { r.mut_view().get_mut([d0, d1])[(i, 0)] = - d.dij_val.clone().unwrap().get([d0, d1]); + d.dij_part.clone().unwrap().get([d0, d1]); } } } } DualBatchVector { - val: val_v, - dij_val: Some(r), + real_part: val_v, + dij_part: Some(r), } } fn from_real_array(vals: [BatchScalarF64; ROWS]) -> Self { DualBatchVector { - val: BatchVecF64::from_real_array(vals), - dij_val: None, + real_part: BatchVecF64::from_real_array(vals), + dij_part: None, } } fn from_real_vector(val: BatchVecF64) -> Self { - Self { val, dij_val: None } + Self { + real_part: val, + dij_part: None, + } } fn real_vector(&self) -> &BatchVecF64 { - &self.val - } - - fn get_fixed_rows(&self, start: usize) -> DualBatchVector { - DualBatchVector { - val: self.val.fixed_rows::(start).into(), - dij_val: self.dij_val.clone().map(|dij_val| { - MutTensorDDR::from_map(&dij_val.view(), |v| v.fixed_rows::(start).into()) - }), - } + &self.real_part } fn to_mat(self) -> DualBatchMatrix { DualBatchMatrix { - val: self.val, - dij_val: self.dij_val.map(|dij| dij.inner_vec_to_mat()), + real_part: self.real_part, + dij_part: self.dij_part.map(|dij| dij.inner_vec_to_mat()), } } @@ -915,10 +884,10 @@ where ) -> Self { assert_eq!(R0 + R1, ROWS); - let maybe_dij = Self::two_dx(top_row.dij_val, bot_row.dij_val); + let maybe_dij = Self::two_dx(top_row.dij_part, bot_row.dij_part); Self { - val: BatchVecF64::::block_vec2(top_row.val, bot_row.val), - dij_val: match maybe_dij { + real_part: BatchVecF64::::block_vec2(top_row.real_part, bot_row.real_part), + dij_part: match maybe_dij { Some(dij_val) => { let mut r = MutTensorDDR::, ROWS>::from_shape(dij_val.shape()); @@ -940,12 +909,12 @@ where fn scaled(&self, s: DualBatchScalar) -> Self { DualBatchVector { - val: self.val * s.val, - dij_val: Self::binary_vs_dij( - &self.dij_val, - &s.dij_val, - |l_dij| l_dij * s.val, - |r_dij| self.val * *r_dij, + real_part: self.real_part * s.real_part, + dij_part: Self::binary_vs_dij( + &self.dij_part, + &s.dij_part, + |l_dij| l_dij * s.real_part, + |r_dij| self.real_part * *r_dij, ), } } @@ -967,8 +936,8 @@ where fn from_f64_array(vals: [f64; ROWS]) -> Self { DualBatchVector { - val: BatchVecF64::from_f64_array(vals), - dij_val: None, + real_part: BatchVecF64::from_f64_array(vals), + dij_part: None, } } @@ -978,16 +947,16 @@ where for i in 0..vals.len() { let d = vals.clone()[i].clone(); - val_v[i] = d.val; - if d.dij_val.is_some() { - shape = Some(d.dij_val.clone().unwrap().dims()); + val_v[i] = d.real_part; + if d.dij_part.is_some() { + shape = Some(d.dij_part.clone().unwrap().dims()); } } if shape.is_none() { return DualBatchVector { - val: val_v, - dij_val: None, + real_part: val_v, + dij_part: None, }; } let shape = shape.unwrap(); @@ -996,47 +965,43 @@ where for i in 0..vals.len() { let d = vals.clone()[i].clone(); - if d.dij_val.is_some() { + if d.dij_part.is_some() { for d0 in 0..shape[0] { for d1 in 0..shape[1] { r.mut_view().get_mut([d0, d1])[(i, 0)] = - d.dij_val.clone().unwrap().get([d0, d1]); + d.dij_part.clone().unwrap().get([d0, d1]); } } } } DualBatchVector { - val: val_v, - dij_val: Some(r), + real_part: val_v, + dij_part: Some(r), } } fn set_elem(&mut self, idx: usize, v: DualBatchScalar) { - self.val[idx] = v.val; - if self.dij_val.is_some() { - let dij = &mut self.dij_val.as_mut().unwrap(); + self.real_part[idx] = v.real_part; + if self.dij_part.is_some() { + let dij = &mut self.dij_part.as_mut().unwrap(); for i in 0..dij.dims()[0] { for j in 0..dij.dims()[1] { - dij.mut_view().get_mut([i, j])[idx] = v.dij_val.clone().unwrap().get([i, j]); + dij.mut_view().get_mut([i, j])[idx] = v.dij_part.clone().unwrap().get([i, j]); } } } } - fn vector(self) -> Self { - self.clone() - } - fn to_dual(self) -> as IsScalar>::DualVector { self } fn select(self, mask: &Mask, other: Self) -> Self { - let maybe_dij = Self::two_dx(self.dij_val, other.dij_val); + let maybe_dij = Self::two_dx(self.dij_part, other.dij_part); Self { - val: IsVector::select(self.val, mask, other.val), - dij_val: match maybe_dij { + real_part: IsVector::select(self.real_part, mask, other.real_part), + dij_part: match maybe_dij { Some(dij) => { let mut r = MutTensorDDR::, ROWS>::from_shape(dij.shape()); @@ -1055,8 +1020,8 @@ where fn get_fixed_subvec(&self, start_r: usize) -> DualBatchVector { DualBatchVector { - val: self.val.fixed_rows::(start_r).into(), - dij_val: self.dij_val.clone().map(|dij_val| { + real_part: self.real_part.fixed_rows::(start_r).into(), + dij_part: self.dij_part.clone().map(|dij_val| { MutTensorDDR::from_map(&dij_val.view(), |v| v.fixed_rows::(start_r).into()) }), } diff --git a/crates/sophus_core/src/calculus/maps.rs b/crates/sophus_core/src/calculus/maps.rs index 5765dd74..80b596a9 100644 --- a/crates/sophus_core/src/calculus/maps.rs +++ b/crates/sophus_core/src/calculus/maps.rs @@ -1,8 +1,20 @@ /// curve - a function from ℝ to M, where M is a manifold pub mod curves; +pub use crate::calculus::maps::curves::MatrixValuedCurve; +pub use crate::calculus::maps::curves::ScalarValuedCurve; +pub use crate::calculus::maps::curves::VectorValuedCurve; + /// matrix-valued map - a function from M to ℝʳ x ℝᶜ, where M is a manifold pub mod matrix_valued_maps; +pub use crate::calculus::maps::matrix_valued_maps::MatrixValuedMapFromMatrix; +pub use crate::calculus::maps::matrix_valued_maps::MatrixValuedMapFromVector; + /// scalar-valued map - a function from M to ℝ, where M is a manifold pub mod scalar_valued_maps; +pub use crate::calculus::maps::scalar_valued_maps::ScalarValuedMapFromMatrix; +pub use crate::calculus::maps::scalar_valued_maps::ScalarValuedMapFromVector; + /// vector-valued map - a function from M to ℝⁿ, where M is a manifold pub mod vector_valued_maps; +pub use crate::calculus::maps::vector_valued_maps::VectorValuedMapFromMatrix; +pub use crate::calculus::maps::vector_valued_maps::VectorValuedMapFromVector; diff --git a/crates/sophus_core/src/calculus/maps/curves.rs b/crates/sophus_core/src/calculus/maps/curves.rs index 2d45a623..151200fd 100644 --- a/crates/sophus_core/src/calculus/maps/curves.rs +++ b/crates/sophus_core/src/calculus/maps/curves.rs @@ -1,11 +1,5 @@ -use crate::calculus::dual::dual_matrix::IsDualMatrix; -use crate::calculus::dual::dual_scalar::IsDualScalar; -use crate::calculus::dual::dual_vector::IsDualVector; -use crate::linalg::matrix::IsMatrix; -use crate::linalg::scalar::IsScalar; -use crate::linalg::vector::IsVector; use crate::linalg::SMat; -use crate::tensor::tensor_view::IsTensorLike; +use crate::prelude::*; use nalgebra::SVector; /// A smooth curve in ℝ. @@ -36,7 +30,11 @@ impl, const BATCH: usize> ScalarValuedCurve { where TFn: Fn(D) -> D, { - curve(D::new(a)).dij_val().clone().unwrap().get([0, 0]) + curve(D::new_with_dij(a)) + .dij_val() + .clone() + .unwrap() + .get([0, 0]) } } @@ -72,7 +70,7 @@ impl, const BATCH: usize> VectorValuedCurve { TFn: Fn(D) -> D::Vector, D::Vector: IsDualVector, { - curve(D::new(a)).dij_val().unwrap().get([0, 0]) + curve(D::new_with_dij(a)).dij_val().unwrap().get([0, 0]) } } @@ -111,7 +109,7 @@ impl, const BATCH: usize> MatrixValuedCurve { TFn: Fn(D) -> D::Matrix, D::Matrix: IsDualMatrix, { - curve(D::new(a)).dij_val().unwrap().get([0, 0]) + curve(D::new_with_dij(a)).dij_val().unwrap().get([0, 0]) } } @@ -131,7 +129,6 @@ fn curve_test() { ) => { impl CurveTest for $dual_scalar { fn run_curve_test() { - use crate::linalg::matrix::IsMatrix; use crate::linalg::vector::IsVector; for i in 0..10 { diff --git a/crates/sophus_core/src/calculus/maps/matrix_valued_maps.rs b/crates/sophus_core/src/calculus/maps/matrix_valued_maps.rs index 184cfa5c..cbbc596d 100644 --- a/crates/sophus_core/src/calculus/maps/matrix_valued_maps.rs +++ b/crates/sophus_core/src/calculus/maps/matrix_valued_maps.rs @@ -1,15 +1,8 @@ -use crate::calculus::dual::dual_matrix::IsDualMatrix; -use crate::calculus::dual::dual_scalar::IsDualScalar; -use crate::linalg::matrix::IsMatrix; -use crate::linalg::matrix::IsRealMatrix; -use crate::linalg::scalar::IsRealScalar; -use crate::linalg::scalar::IsScalar; use crate::linalg::SMat; +use crate::prelude::*; use crate::tensor::mut_tensor::MutTensorDDRC; use crate::tensor::mut_tensor::MutTensorDRC; -use crate::tensor::mut_tensor_view::IsMutTensorLike; use nalgebra::SMatrix; - use std::marker::PhantomData; /// Matrix-valued map on a vector space. @@ -71,7 +64,7 @@ impl, const BATCH: usize> TFn: Fn(D::DualVector) -> D::DualMatrix, { MutTensorDRC { - mut_array: matrix_valued(D::vector_v(a)) + mut_array: matrix_valued(D::vector_with_dij(a)) .dij_val() .unwrap() .mut_array @@ -156,7 +149,7 @@ impl, const BATCH: usize> where TFn: Fn(D::DualMatrix) -> D::DualMatrix, { - matrix_valued(D::matrix_v(a)).dij_val().unwrap() + matrix_valued(D::matrix_with_dij(a)).dij_val().unwrap() } } @@ -165,7 +158,7 @@ fn matrix_valued_map_from_vector_tests() { use crate::calculus::dual::dual_scalar::DualBatchScalar; use crate::calculus::dual::dual_scalar::DualScalar; use crate::calculus::maps::matrix_valued_maps::MatrixValuedMapFromVector; - use crate::linalg::matrix::IsMatrix; + use crate::linalg::scalar::IsScalar; use crate::linalg::vector::IsVector; use crate::linalg::BatchScalarF64; diff --git a/crates/sophus_core/src/calculus/maps/scalar_valued_maps.rs b/crates/sophus_core/src/calculus/maps/scalar_valued_maps.rs index 65424b0b..ddee6d79 100644 --- a/crates/sophus_core/src/calculus/maps/scalar_valued_maps.rs +++ b/crates/sophus_core/src/calculus/maps/scalar_valued_maps.rs @@ -1,10 +1,5 @@ -use crate::calculus::dual::dual_scalar::IsDualScalar; -use crate::linalg::matrix::IsMatrix; -use crate::linalg::scalar::IsRealScalar; -use crate::linalg::scalar::IsScalar; -use crate::linalg::vector::IsVector; +use crate::prelude::*; use crate::tensor::mut_tensor::MutTensorDD; -use crate::tensor::tensor_view::IsTensorLike; /// Scalar-valued map on a vector space. /// @@ -58,8 +53,10 @@ impl, const BATCH: usize> ScalarValuedMapFromVector) -> D, { - let jacobian: MutTensorDD = - scalar_valued(D::vector_v(a)).dij_val().unwrap().clone(); + let jacobian: MutTensorDD = scalar_valued(D::vector_with_dij(a)) + .dij_val() + .unwrap() + .clone(); assert_eq!(jacobian.dims(), [INROWS, 1]); let mut out = D::RealVector::::zeros(); @@ -117,8 +114,10 @@ impl, const BATCH: usize> ScalarValuedMapFromMatrix) -> D, { - let jacobian: MutTensorDD = - scalar_valued(D::matrix_v(a)).dij_val().unwrap().clone(); + let jacobian: MutTensorDD = scalar_valued(D::matrix_with_dij(a)) + .dij_val() + .unwrap() + .clone(); assert_eq!(jacobian.dims(), [INROWS, INCOLS]); let mut out = D::RealMatrix::::zeros(); @@ -148,7 +147,6 @@ fn scalar_valued_map_tests() { #[cfg(test)] impl Test for $scalar { fn run() { - use crate::linalg::matrix::IsMatrix; use crate::linalg::vector::IsVector; let a = <$scalar as IsScalar<$batch>>::Vector::<2>::new( diff --git a/crates/sophus_core/src/calculus/maps/vector_valued_maps.rs b/crates/sophus_core/src/calculus/maps/vector_valued_maps.rs index 4497f3df..f63bf080 100644 --- a/crates/sophus_core/src/calculus/maps/vector_valued_maps.rs +++ b/crates/sophus_core/src/calculus/maps/vector_valued_maps.rs @@ -1,14 +1,7 @@ -use crate::calculus::dual::dual_scalar::IsDualScalar; -use crate::calculus::dual::dual_vector::IsDualVector; -use crate::linalg::matrix::IsMatrix; -use crate::linalg::scalar::IsRealScalar; -use crate::linalg::scalar::IsScalar; -use crate::linalg::vector::IsVector; use crate::linalg::SVec; +use crate::prelude::*; use crate::tensor::mut_tensor::MutTensorDDR; use crate::tensor::mut_tensor::MutTensorDR; -use crate::tensor::mut_tensor_view::IsMutTensorLike; -use crate::tensor::tensor_view::IsTensorLike; use std::marker::PhantomData; /// Vector-valued map on a vector space. @@ -98,7 +91,7 @@ impl, const BATCH: usize> where TFn: Fn(D::DualVector) -> D::DualVector, { - let v = vector_valued(D::vector_v(a)); + let v = vector_valued(D::vector_with_dij(a)); let d = v.dij_val(); if d.is_none() { return MutTensorDR::from_shape([INROWS]); @@ -193,7 +186,7 @@ impl, const BATCH: usize> where TFn: Fn(D::DualMatrix) -> D::DualVector, { - vector_valued(D::matrix_v(a)).dij_val().unwrap() + vector_valued(D::matrix_with_dij(a)).dij_val().unwrap() } } @@ -203,7 +196,7 @@ fn vector_valued_map_from_vector_tests() { use crate::calculus::dual::dual_scalar::DualScalar; use crate::calculus::maps::vector_valued_maps::VectorValuedMapFromMatrix; use crate::calculus::maps::vector_valued_maps::VectorValuedMapFromVector; - use crate::linalg::matrix::IsMatrix; + use crate::linalg::vector::IsVector; use crate::linalg::BatchScalarF64; use crate::tensor::tensor_view::IsTensorLike; diff --git a/crates/sophus_core/src/calculus/region.rs b/crates/sophus_core/src/calculus/region.rs index 52e0df69..2b7d8f4e 100644 --- a/crates/sophus_core/src/calculus/region.rs +++ b/crates/sophus_core/src/calculus/region.rs @@ -1,5 +1,6 @@ use nalgebra::SVector; -use num_traits::Bounded; + +use crate::IsPoint; /// Floating-point interval #[derive(Debug, Copy, Clone)] @@ -43,84 +44,6 @@ impl IRegion { } } -/// Traits for points -pub trait IsPoint: Copy + Bounded { - /// Point type - type Point: Bounded; - - /// smallest point - fn smallest() -> Self::Point { - Bounded::min_value() - } - - /// largest point - fn largest() -> Self::Point { - Bounded::max_value() - } - - /// clamp point - fn clamp(&self, min: Self, max: Self) -> Self::Point; - - /// check if point is less or equal to another point - fn is_less_equal(&self, rhs: Self) -> bool; -} - -impl IsPoint<1> for f64 { - type Point = f64; - - fn clamp(&self, min: f64, max: f64) -> f64 { - f64::clamp(*self, min, max) - } - - fn is_less_equal(&self, rhs: f64) -> bool { - self <= &rhs - } -} - -impl IsPoint<1> for i64 { - type Point = i64; - - fn clamp(&self, min: i64, max: i64) -> i64 { - Ord::clamp(*self, min, max) - } - - fn is_less_equal(&self, rhs: i64) -> bool { - self <= &rhs - } -} - -impl IsPoint for SVector { - type Point = Self; - - fn clamp(&self, min: Self, max: Self) -> Self::Point { - let mut p: Self::Point = Self::Point::zeros(); - for i in 0..D { - p[i] = self[i].clamp(min[i], max[i]); - } - p - } - - fn is_less_equal(&self, _rhs: Self::Point) -> bool { - todo!() - } -} - -impl IsPoint for SVector { - type Point = Self; - - fn clamp(&self, min: Self::Point, max: Self::Point) -> Self::Point { - let mut p: Self::Point = Self::Point::zeros(); - for i in 0..D { - p[i] = self[i].clamp(min[i], max[i]); - } - p - } - - fn is_less_equal(&self, _rhs: Self::Point) -> bool { - todo!() - } -} - /// Traits for regions pub trait IsRegion> { /// Region type diff --git a/crates/sophus_core/src/calculus/spline.rs b/crates/sophus_core/src/calculus/spline.rs index 5e60aa1b..15f044dd 100644 --- a/crates/sophus_core/src/calculus/spline.rs +++ b/crates/sophus_core/src/calculus/spline.rs @@ -3,9 +3,7 @@ pub mod spline_segment; use crate::calculus::spline::spline_segment::CubicBSplineSegment; use crate::calculus::spline::spline_segment::SegmentCase; -use crate::linalg::bool_mask::BoolMask; -use crate::linalg::matrix::IsMatrix; -use crate::linalg::scalar::IsSingleScalar; +use crate::prelude::*; /// Cubic B-Spline implementation pub struct CubicBSplineImpl { diff --git a/crates/sophus_core/src/calculus/spline/spline_segment.rs b/crates/sophus_core/src/calculus/spline/spline_segment.rs index 5e488fbf..188a6211 100644 --- a/crates/sophus_core/src/calculus/spline/spline_segment.rs +++ b/crates/sophus_core/src/calculus/spline/spline_segment.rs @@ -1,6 +1,4 @@ -use crate::linalg::matrix::IsMatrix; -use crate::linalg::scalar::IsSingleScalar; -use crate::linalg::vector::IsVector; +use crate::prelude::*; use std::marker::PhantomData; /// cubic basis function diff --git a/crates/sophus_core/src/lib.rs b/crates/sophus_core/src/lib.rs index c46f4648..54ff6b55 100644 --- a/crates/sophus_core/src/lib.rs +++ b/crates/sophus_core/src/lib.rs @@ -1,12 +1,55 @@ #![feature(portable_simd)] +#![deny(missing_docs)] +#![allow(clippy::needless_range_loop)] +//! sophus core crate - part of the sophus-rs projectx -/// calculus +/// calculus - differentiation, splines, and more pub mod calculus; + /// linear algebra types pub mod linalg; + +/// manifolds +pub mod manifold; +pub use crate::manifold::*; + /// params pub mod params; +pub use crate::params::*; + /// points pub mod points; +pub use crate::points::*; + /// tensors pub mod tensor; +pub use crate::tensor::arc_tensor::*; +pub use crate::tensor::mut_tensor::*; +pub use crate::tensor::mut_tensor_view::*; +pub use crate::tensor::tensor_view::*; + +/// sophus_core prelude +pub mod prelude { + pub use crate::calculus::dual::dual_matrix::IsDualMatrix; + pub use crate::calculus::dual::dual_scalar::IsDual; + pub use crate::calculus::dual::dual_scalar::IsDualScalar; + pub use crate::calculus::dual::dual_vector::IsDualVector; + pub use crate::calculus::region::IsRegion; + pub use crate::linalg::bool_mask::IsBoolMask; + pub use crate::linalg::matrix::IsMatrix; + pub use crate::linalg::matrix::IsRealMatrix; + pub use crate::linalg::matrix::IsSingleMatrix; + pub use crate::linalg::scalar::IsCoreScalar; + pub use crate::linalg::scalar::IsRealScalar; + pub use crate::linalg::scalar::IsScalar; + pub use crate::linalg::scalar::IsSingleScalar; + pub use crate::linalg::vector::IsRealVector; + pub use crate::linalg::vector::IsSingleVector; + pub use crate::linalg::vector::IsVector; + pub use crate::manifold::traits::IsManifold; + pub use crate::params::HasParams; + pub use crate::tensor::element::IsStaticTensor; + pub use crate::tensor::mut_tensor_view::IsMutTensorLike; + pub use crate::tensor::tensor_view::IsTensorLike; + pub use crate::tensor::tensor_view::IsTensorView; +} diff --git a/crates/sophus_core/src/linalg.rs b/crates/sophus_core/src/linalg.rs index cd5c7027..d4c1ff14 100644 --- a/crates/sophus_core/src/linalg.rs +++ b/crates/sophus_core/src/linalg.rs @@ -1,7 +1,15 @@ +/// Boolean mask - generalization of bool to SIMD pub mod bool_mask; + +/// Matrix types pub mod matrix; + +/// Scalar types pub mod scalar; + +/// Vector types pub mod vector; + use std::ops::Add; use std::simd::cmp::SimdPartialEq; use std::simd::num::SimdFloat; @@ -17,7 +25,7 @@ pub type SVec = nalgebra::SVector = nalgebra::SMatrix; -/// Batch scalar +/// Batch of scalar #[derive(Clone, Debug, PartialEq, Copy)] pub struct BatchScalar( Simd, @@ -25,9 +33,11 @@ pub struct BatchScalar( where LaneCount: SupportedLaneCount; +/// Batch of vectors pub type BatchVec = nalgebra::SVector, ROWS>; +/// Batch of matrices pub type BatchMat = nalgebra::SMatrix, ROWS, COLS>; diff --git a/crates/sophus_core/src/linalg/bool_mask.rs b/crates/sophus_core/src/linalg/bool_mask.rs index 7c2404ae..9448909c 100644 --- a/crates/sophus_core/src/linalg/bool_mask.rs +++ b/crates/sophus_core/src/linalg/bool_mask.rs @@ -3,16 +3,28 @@ use std::simd::Mask; use std::simd::MaskElement; use std::simd::SupportedLaneCount; -pub trait BoolMask { +/// Boolean mask - generalization of boolean comparison to SIMDs +pub trait IsBoolMask { + /// Mask with all lanes set to true fn all_true() -> Self; + + /// Mask with all lanes set to false fn all_false() -> Self; + + /// Returns true if all lanes are true fn all(&self) -> bool; + + /// Returns true if any lane is true fn any(&self) -> bool; + + /// Returns the number of lanes that are true fn count(&self) -> usize; + + /// Returns the number of lanes fn lanes(&self) -> usize; } -impl BoolMask for bool { +impl IsBoolMask for bool { fn all_true() -> bool { true } @@ -40,7 +52,7 @@ impl BoolMask for bool { } } -impl BoolMask for Mask +impl IsBoolMask for Mask where T: MaskElement, LaneCount: SupportedLaneCount, diff --git a/crates/sophus_core/src/linalg/matrix.rs b/crates/sophus_core/src/linalg/matrix.rs index 12f71578..6c7cd2f5 100644 --- a/crates/sophus_core/src/linalg/matrix.rs +++ b/crates/sophus_core/src/linalg/matrix.rs @@ -1,16 +1,13 @@ -use super::scalar::IsRealScalar; -use super::scalar::IsSingleScalar; -use crate::calculus::dual::dual_matrix::DualBatchMatrix; -use crate::calculus::dual::dual_matrix::DualMatrix; -use crate::linalg::scalar::IsScalar; +use crate::calculus::dual::DualBatchMatrix; +use crate::calculus::dual::DualMatrix; use crate::linalg::BatchMatF64; use crate::linalg::BatchScalarF64; use crate::linalg::BatchVecF64; use crate::linalg::MatF64; use crate::linalg::VecF64; +use crate::prelude::*; use approx::AbsDiffEq; use approx::RelativeEq; -use num_traits::Zero; use std::fmt::Debug; use std::ops::Add; use std::ops::Index; @@ -22,7 +19,9 @@ use std::simd::LaneCount; use std::simd::Mask; use std::simd::SupportedLaneCount; -/// Matrix - either a real (f64) or a dual number matrix +/// Matrix trait +/// - either a real (f64) or a dual number matrix +/// - either a single matrix or a batch matrix pub trait IsMatrix< S: IsScalar, const ROWS: usize, @@ -40,40 +39,47 @@ pub trait IsMatrix< + AbsDiffEq + RelativeEq { - /// create 1x2 block matrix + /// creates matrix from a left and right block columns fn block_mat1x2( left_col: S::Matrix, righ_col: S::Matrix, ) -> Self; - fn set_elem(&mut self, idx: [usize; 2], val: S); - - /// create 2x1 block matrix + /// creates matrix from a top and bottom block rows fn block_mat2x1( top_row: S::Matrix, bot_row: S::Matrix, ) -> Self; - /// create 2x2 block matrix + /// creates matrix from a 2x2 block of matrices fn block_mat2x2( top_row: (S::Matrix, S::Matrix), bot_row: (S::Matrix, S::Matrix), ) -> Self; - fn select(self, mask: &S::Mask, other: Self) -> Self; - - /// create from 2d array + /// creates matrix from a 2d array of scalars fn from_array2(vals: [[S; COLS]; ROWS]) -> Self; - /// create from constant 2d array - fn from_real_array2(vals: [[S::RealScalar; COLS]; ROWS]) -> Self; + /// creates matrix with all real elements (and lanes) set to the given value + /// + /// (for dual numbers, the infinitesimal part is set to zero) + fn from_f64(val: f64) -> Self; - /// create from constant 2d array + /// creates matrix from a 2d array of real values + /// + /// - all lanes are set to the same value + /// - for dual numbers, the infinitesimal part is set to zero fn from_f64_array2(vals: [[f64; COLS]; ROWS]) -> Self; + /// creates matrix from a 2d array of real scalars + /// + /// (for dual numbers, the infinitesimal part is set to zero) + fn from_real_scalar_array2(vals: [[S::RealScalar; COLS]; ROWS]) -> Self; + /// create a constant matrix fn from_real_matrix(val: S::RealMatrix) -> Self; + /// create a constant scalar fn from_scalar(val: S) -> Self; /// extract column vector @@ -92,29 +98,39 @@ pub trait IsMatrix< start_c: usize, ) -> S::Matrix; - fn to_dual(self) -> S::DualMatrix; - /// create an identity matrix fn identity() -> Self; /// matrix multiplication fn mat_mul(&self, other: S::Matrix) -> S::Matrix; - /// return the real part + /// ones + fn ones() -> Self { + Self::from_f64(1.0) + } + + /// Return a real matrix fn real_matrix(&self) -> &S::RealMatrix; /// return scaled matrix fn scaled(&self, v: S) -> Self; - /// create a constant scalar - fn from_f64(val: f64) -> Self; + /// Returns self if mask is true, otherwise returns other + /// + /// For batch matrices, this is a lane-wise operation + fn select(self, mask: &S::Mask, other: Self) -> Self; + + /// set i-th element + fn set_elem(&mut self, idx: [usize; 2], val: S); + /// set column vectors fn set_col_vec(&mut self, c: usize, v: S::Vector); - /// ones - fn ones() -> Self { - Self::from_f64(1.0) - } + /// Return dual matrix + /// + /// If self is a real matrix, this will return a dual matrix with the infinitesimal part set to + /// zero: (self, 0ϵ) + fn to_dual(self) -> S::DualMatrix; /// zeros fn zeros() -> Self { @@ -122,7 +138,7 @@ pub trait IsMatrix< } } -/// is real vector like +/// Is real matrix? pub trait IsRealMatrix< S: IsRealScalar + IsScalar, const ROWS: usize, @@ -136,7 +152,7 @@ pub trait IsRealMatrix< { } -/// Matrix - either a real (f64) or a dual number matrix +/// Is single matrix? (not batch) pub trait IsSingleMatrix: IsMatrix + Mul, Output = S::SingleVector> { @@ -166,7 +182,7 @@ impl IsMatrix for MatF m } - fn from_real_array2(vals: [[f64; COLS]; ROWS]) -> Self { + fn from_real_scalar_array2(vals: [[f64; COLS]; ROWS]) -> Self { let mut m = MatF64::::zeros(); for c in 0..COLS { for r in 0..ROWS { @@ -197,7 +213,7 @@ impl IsMatrix for MatF bot_row: MatF64, ) -> Self { assert_eq!(ROWS, R0 + R1); - let mut m = Self::zero(); + let mut m = Self::zeros(); m.fixed_view_mut::(0, 0).copy_from(&top_row); m.fixed_view_mut::(R0, 0).copy_from(&bot_row); @@ -210,7 +226,7 @@ impl IsMatrix for MatF ) -> Self { assert_eq!(ROWS, R0 + R1); assert_eq!(COLS, C0 + C1); - let mut m = Self::zero(); + let mut m = Self::zeros(); m.fixed_view_mut::(0, 0).copy_from(&top_row.0); m.fixed_view_mut::(0, C0).copy_from(&top_row.1); @@ -225,7 +241,7 @@ impl IsMatrix for MatF righ_col: MatF64, ) -> Self { assert_eq!(COLS, C0 + C1); - let mut m = Self::zero(); + let mut m = Self::zeros(); m.fixed_view_mut::(0, 0).copy_from(&left_col); m.fixed_view_mut::(0, C0).copy_from(&righ_col); @@ -317,7 +333,7 @@ where Self::from_fn(|r, c| vals[r][c]) } - fn from_real_array2(vals: [[BatchScalarF64; COLS]; ROWS]) -> Self { + fn from_real_scalar_array2(vals: [[BatchScalarF64; COLS]; ROWS]) -> Self { Self::from_fn(|r, c| vals[r][c]) } diff --git a/crates/sophus_core/src/linalg/scalar.rs b/crates/sophus_core/src/linalg/scalar.rs index 1e072cbb..b9613f69 100644 --- a/crates/sophus_core/src/linalg/scalar.rs +++ b/crates/sophus_core/src/linalg/scalar.rs @@ -1,25 +1,16 @@ -use super::bool_mask::BoolMask; -use super::matrix::IsRealMatrix; -use super::vector::IsRealVector; -use crate::calculus::dual::dual_matrix::DualBatchMatrix; -use crate::calculus::dual::dual_matrix::DualMatrix; -use crate::calculus::dual::dual_matrix::IsDualMatrix; -use crate::calculus::dual::dual_scalar::DualBatchScalar; -use crate::calculus::dual::dual_scalar::DualScalar; -use crate::calculus::dual::dual_scalar::IsDualScalar; -use crate::calculus::dual::dual_vector::DualBatchVector; -use crate::calculus::dual::dual_vector::DualVector; -use crate::calculus::dual::dual_vector::IsDualVector; -use crate::linalg::matrix::IsMatrix; -use crate::linalg::matrix::IsSingleMatrix; -use crate::linalg::vector::IsSingleVector; -use crate::linalg::vector::IsVector; +use crate::calculus::dual::DualBatchMatrix; +use crate::calculus::dual::DualBatchScalar; +use crate::calculus::dual::DualBatchVector; +use crate::calculus::dual::DualMatrix; +use crate::calculus::dual::DualScalar; +use crate::calculus::dual::DualVector; use crate::linalg::BatchMatF64; use crate::linalg::BatchScalar; use crate::linalg::BatchScalarF64; use crate::linalg::BatchVecF64; use crate::linalg::MatF64; use crate::linalg::VecF64; +use crate::prelude::*; use approx::assert_abs_diff_eq; use approx::AbsDiffEq; use approx::RelativeEq; @@ -95,7 +86,10 @@ where } } -/// Scalar - either a real (f64) or a dual number +/// Scalar trait +/// +/// - either a real (f64) or a dual number +/// - either a single scalar or a batch scalar pub trait IsScalar: PartialEq + Debug @@ -124,7 +118,8 @@ pub trait IsScalar: /// Dual scalar type type DualScalar: IsDualScalar; - type Mask: BoolMask; + /// Mask type + type Mask: IsBoolMask; /// Vector type type RealMatrix: IsRealMatrix< @@ -154,12 +149,6 @@ pub trait IsScalar: BATCH_SIZE, >; - fn select(self, mask: &Self::Mask, other: Self) -> Self; - - fn less_equal(&self, rhs: &Self) -> Self::Mask; - - fn greater_equal(&self, rhs: &Self) -> Self::Mask; - /// absolute value fn abs(self) -> Self; @@ -175,28 +164,63 @@ pub trait IsScalar: /// arctangent2 fn atan2(self, x: Self) -> Self; - /// signum - fn signum(&self) -> Self; - /// cosine fn cos(self) -> Self; + /// Returns value of single lane + fn extract_single(&self, i: usize) -> Self::SingleScalar; + /// floor fn floor(&self) -> Self::RealScalar; /// fractional part fn fract(self) -> Self; - /// create a constant scalar + /// Creates a scalar with all real lanes set the given value + /// + /// If self is a dual number, the infinitesimal part is set to zero + fn from_f64(val: f64) -> Self; + + /// Creates a scalar from an array of real values + /// + /// - If self is a single scalar, the array must have one element + /// - If self is a batch scalar, the array must have BATCH_SIZE elements + /// - If self is a dual number, the infinitesimal part is set to zero + fn from_real_array(arr: [f64; BATCH_SIZE]) -> Self; + + /// creates scalar from real scalar + /// + /// for dual numbers, the infinitesimal part is set to zero fn from_real_scalar(val: Self::RealScalar) -> Self; - /// create a constant scalar - fn from_f64(val: f64) -> Self; + /// Greater or equal comparison + fn greater_equal(&self, rhs: &Self) -> Self::Mask; + + /// Less or equal comparison + fn less_equal(&self, rhs: &Self) -> Self::Mask; + + /// ones + fn ones() -> Self { + Self::from_f64(1.0) + } /// return the real part fn real_part(&self) -> Self::RealScalar; - fn to_dual(self) -> Self::DualScalar; + /// return examples of scalar values + fn scalar_examples() -> Vec; + + /// Return the self if the mask is true, otherwise the other value + /// + /// This is a lane-wise operation + fn select(self, mask: &Self::Mask, other: Self) -> Self; + + /// Return the sign of the scalar + /// + /// -1 if negative including -0, + /// 1 if positive, including +0, + /// NaN if NaN + fn signum(&self) -> Self; /// sine fn sin(self) -> Self; @@ -204,32 +228,22 @@ pub trait IsScalar: /// square root fn sqrt(self) -> Self; + /// Returns dual number representation + /// + /// If self is a real number, the infinitesimal part is zero: (self, 0ϵ) + fn to_dual(self) -> Self::DualScalar; + + /// Return as a real array + /// + /// If self is a dual number, the infinitesimal part is omitted + fn to_real_array(&self) -> [f64; BATCH_SIZE]; + /// tangent fn tan(self) -> Self; /// return as a vector fn to_vec(self) -> Self::Vector<1>; - /// value - fn scalar(self) -> Self { - self - } - - fn from_real_array(arr: [f64; BATCH_SIZE]) -> Self; - - fn real_array(&self) -> [f64; BATCH_SIZE]; - - /// value - fn scalar_examples() -> Vec; - - /// get item - fn extract_single(&self, i: usize) -> Self::SingleScalar; - - /// ones - fn ones() -> Self { - Self::from_f64(1.0) - } - /// zeros fn zeros() -> Self { Self::from_f64(0.0) @@ -333,7 +347,7 @@ impl IsScalar<1> for f64 { arr[0] } - fn real_array(&self) -> [Self::RealScalar; 1] { + fn to_real_array(&self) -> [Self::RealScalar; 1] { [*self] } @@ -607,7 +621,7 @@ where } } - fn real_array(&self) -> [f64; BATCH] { + fn to_real_array(&self) -> [f64; BATCH] { self.0.to_array() } diff --git a/crates/sophus_core/src/linalg/vector.rs b/crates/sophus_core/src/linalg/vector.rs index 09cad77d..a2682d98 100644 --- a/crates/sophus_core/src/linalg/vector.rs +++ b/crates/sophus_core/src/linalg/vector.rs @@ -1,16 +1,13 @@ -use approx::AbsDiffEq; -use approx::RelativeEq; - -use super::scalar::IsRealScalar; -use super::scalar::IsScalar; -use super::scalar::IsSingleScalar; -use crate::calculus::dual::dual_vector::DualBatchVector; -use crate::calculus::dual::dual_vector::DualVector; +use crate::calculus::dual::DualBatchVector; +use crate::calculus::dual::DualVector; use crate::linalg::BatchMatF64; use crate::linalg::BatchScalarF64; use crate::linalg::BatchVecF64; use crate::linalg::MatF64; use crate::linalg::VecF64; +use crate::prelude::*; +use approx::AbsDiffEq; +use approx::RelativeEq; use std::fmt::Debug; use std::ops::Add; use std::ops::Index; @@ -32,28 +29,18 @@ pub trait IsVector, const ROWS: usize, const BATCH_SIZE: + AbsDiffEq + RelativeEq { - fn vector(self) -> Self; - - /// create a block vector + /// creates vector from a block of two vectors fn block_vec2( top_row: S::Vector, bot_row: S::Vector, ) -> Self; - fn to_dual( - self, - ) -> <>::DualScalar as IsScalar>::Vector; - /// dot product fn dot(self, rhs: Self) -> S; - fn outer(self, rhs: S::Vector) -> S::Matrix; - /// create a vector from an array fn from_array(vals: [S; ROWS]) -> Self; - fn select(self, mask: &S::Mask, other: Self) -> Self; - /// create a constant vector from an array fn from_real_array(vals: [S::RealScalar; ROWS]) -> Self; @@ -66,14 +53,14 @@ pub trait IsVector, const ROWS: usize, const BATCH_SIZE: /// create a constant vector from an array fn from_f64_array(vals: [f64; ROWS]) -> Self; - /// create a constant vector from an array - fn from_scalar_array(vals: [S; ROWS]) -> Self; - /// get ith element fn get_elem(&self, idx: usize) -> S; - /// get fixed rows - fn get_fixed_rows(&self, start: usize) -> S::Vector; + /// Returns a fixed-size subvector starting at the given row + fn get_fixed_subvec(&self, start_r: usize) -> S::Vector; + + /// create a constant vector from an array + fn from_scalar_array(vals: [S; ROWS]) -> Self; /// norm fn norm(&self) -> S; @@ -81,22 +68,35 @@ pub trait IsVector, const ROWS: usize, const BATCH_SIZE: /// return normalized vector fn normalized(&self) -> Self; + /// outer product + fn outer(self, rhs: S::Vector) -> S::Matrix; + /// return the real part fn real_vector(&self) -> &S::RealVector; + /// Returns self if mask is true, otherwise returns other + /// + /// For batch vectors, this is a lane-wise operation + fn select(self, mask: &S::Mask, other: Self) -> Self; + /// return scaled vector fn scaled(&self, v: S) -> Self; - /// set ith element as constant + /// set ith element to given scalar fn set_elem(&mut self, idx: usize, v: S); - /// set ith element as constant - fn set_real_elem(&mut self, idx: usize, v: S::RealScalar); - /// squared norm fn squared_norm(&self) -> S; - /// return the matrix representation + /// Return dual vector + /// + /// If self is a real vector, this will return a dual vector with the infinitesimal part set to + /// zero: (self, 0ϵ) + fn to_dual( + self, + ) -> <>::DualScalar as IsScalar>::Vector; + + /// return the matrix representation - in self as a column vector fn to_mat(self) -> S::Matrix; /// ones @@ -108,9 +108,6 @@ pub trait IsVector, const ROWS: usize, const BATCH_SIZE: fn zeros() -> Self { Self::from_f64(0.0) } - - /// get fixed submatrix - fn get_fixed_subvec(&self, start_r: usize) -> S::Vector; } /// is real vector like @@ -144,10 +141,6 @@ impl IsSingleVector for VecF64 { impl IsRealVector for VecF64 {} impl IsVector for VecF64 { - fn vector(self) -> Self { - self - } - fn block_vec2( top_row: VecF64, bot_row: VecF64, @@ -184,10 +177,6 @@ impl IsVector for VecF64 { self[idx] } - fn get_fixed_rows(&self, start: usize) -> VecF64 { - self.fixed_rows::(start).into() - } - fn norm(&self) -> f64 { self.norm() } @@ -200,10 +189,6 @@ impl IsVector for VecF64 { self[idx] = v; } - fn set_real_elem(&mut self, idx: usize, v: f64) { - self[idx] = v; - } - fn squared_norm(&self) -> f64 { self.norm_squared() } @@ -286,12 +271,8 @@ where m } - fn vector(self) -> Self { - self - } - fn dot(self, rhs: Self) -> BatchScalarF64 { - (self.transpose() * &rhs)[0] + (self.transpose() * rhs)[0] } fn from_array(vals: [BatchScalarF64; ROWS]) -> Self { @@ -318,17 +299,13 @@ where self[idx] } - fn get_fixed_rows(&self, start: usize) -> BatchVecF64 { - self.fixed_rows::(start).into() - } - fn norm(&self) -> BatchScalarF64 { self.squared_norm().sqrt() } fn normalized(&self) -> Self { let norm = self.norm(); - if norm == BatchScalarF64::::zeros() { + if norm == as IsScalar>::zeros() { return *self; } let factor = BatchScalarF64::::ones() / norm; @@ -347,14 +324,10 @@ where self[idx] = v; } - fn set_real_elem(&mut self, idx: usize, v: BatchScalarF64) { - self[idx] = v; - } - fn squared_norm(&self) -> BatchScalarF64 { - let mut squared_norm = BatchScalarF64::::zeros(); + let mut squared_norm = as IsScalar>::zeros(); for i in 0..ROWS { - let val = self.get_elem(i); + let val = IsVector::get_elem(self, i); squared_norm += val * val; } squared_norm diff --git a/crates/sophus_core/src/manifold.rs b/crates/sophus_core/src/manifold.rs new file mode 100644 index 00000000..a8dae449 --- /dev/null +++ b/crates/sophus_core/src/manifold.rs @@ -0,0 +1,2 @@ +/// manifolds +pub mod traits; diff --git a/crates/sophus_core/src/calculus/manifold/traits.rs b/crates/sophus_core/src/manifold/traits.rs similarity index 95% rename from crates/sophus_core/src/calculus/manifold/traits.rs rename to crates/sophus_core/src/manifold/traits.rs index cffeab3e..4849cb64 100644 --- a/crates/sophus_core/src/calculus/manifold/traits.rs +++ b/crates/sophus_core/src/manifold/traits.rs @@ -1,8 +1,6 @@ use crate::linalg::VecF64; - -use crate::linalg::scalar::IsScalar; -use crate::params::HasParams; use crate::params::ParamsImpl; +use crate::prelude::*; /// A tangent implementation. pub trait TangentImpl, const DOF: usize, const BATCH_SIZE: usize> { diff --git a/crates/sophus_core/src/params.rs b/crates/sophus_core/src/params.rs index 70c37181..9a34b3ce 100644 --- a/crates/sophus_core/src/params.rs +++ b/crates/sophus_core/src/params.rs @@ -1,6 +1,6 @@ -use crate::linalg::scalar::IsScalar; use crate::linalg::VecF64; use crate::points::example_points; +use crate::prelude::*; /// Parameter implementation. pub trait ParamsImpl, const PARAMS: usize, const BATCH_SIZE: usize> { diff --git a/crates/sophus_core/src/points.rs b/crates/sophus_core/src/points.rs index 457a067d..870a891c 100644 --- a/crates/sophus_core/src/points.rs +++ b/crates/sophus_core/src/points.rs @@ -1,5 +1,88 @@ -use crate::linalg::scalar::IsScalar; -use crate::linalg::vector::IsVector; +use crate::prelude::*; +use nalgebra::SVector; +use num_traits::Bounded; + +/// Traits for points +pub trait IsPoint: Copy + Bounded { + /// Point type + type Point: Bounded; + + /// smallest point + fn smallest() -> Self::Point { + Bounded::min_value() + } + + /// largest point + fn largest() -> Self::Point { + Bounded::max_value() + } + + /// clamp point + fn clamp(&self, min: Self, max: Self) -> Self::Point; + + /// check if point is less or equal to another point + fn is_less_equal(&self, rhs: Self) -> bool; +} + +impl IsPoint<1> for f64 { + type Point = f64; + + fn clamp(&self, min: f64, max: f64) -> f64 { + f64::clamp(*self, min, max) + } + + fn is_less_equal(&self, rhs: f64) -> bool { + self <= &rhs + } +} + +impl IsPoint<1> for i64 { + type Point = i64; + + fn clamp(&self, min: i64, max: i64) -> i64 { + Ord::clamp(*self, min, max) + } + + fn is_less_equal(&self, rhs: i64) -> bool { + self <= &rhs + } +} + +impl IsPoint for SVector { + type Point = Self; + + fn clamp(&self, min: Self, max: Self) -> Self::Point { + let mut p: Self::Point = Self::Point::zeros(); + for i in 0..D { + p[i] = self[i].clamp(min[i], max[i]); + } + p + } + + fn is_less_equal(&self, rhs: Self::Point) -> bool { + self.iter() + .zip(rhs.iter()) + .all(|(a, b)| a.is_less_equal(*b)) + } +} + +impl IsPoint for SVector { + type Point = Self; + + fn clamp(&self, min: Self::Point, max: Self::Point) -> Self::Point { + let mut p: Self::Point = Self::Point::zeros(); + for i in 0..D { + p[i] = self[i].clamp(min[i], max[i]); + } + p + } + + fn is_less_equal(&self, _rhs: Self::Point) -> bool { + self.iter() + .zip(_rhs.iter()) + .all(|(a, b)| a.is_less_equal(*b)) + } +} /// Example points pub fn example_points, const POINT: usize, const BATCH: usize>( diff --git a/crates/sophus_core/src/tensor.rs b/crates/sophus_core/src/tensor.rs index 1f100eef..efc4b352 100644 --- a/crates/sophus_core/src/tensor.rs +++ b/crates/sophus_core/src/tensor.rs @@ -3,11 +3,19 @@ /// Arc tensor pub mod arc_tensor; +pub use crate::tensor::arc_tensor::ArcTensor; + /// Tensor element pub mod element; + /// Mutable tensor pub mod mut_tensor; +pub use crate::tensor::mut_tensor::MutTensor; + /// Mutable tensor view pub mod mut_tensor_view; +pub use crate::tensor::mut_tensor_view::MutTensorView; + /// Tensor view pub mod tensor_view; +pub use crate::tensor::tensor_view::TensorView; diff --git a/crates/sophus_core/src/tensor/arc_tensor.rs b/crates/sophus_core/src/tensor/arc_tensor.rs index 9e98b00d..da71a27f 100644 --- a/crates/sophus_core/src/tensor/arc_tensor.rs +++ b/crates/sophus_core/src/tensor/arc_tensor.rs @@ -1,15 +1,11 @@ -use ndarray::Dimension; - -use crate::linalg::scalar::IsCoreScalar; use crate::linalg::SMat; use crate::linalg::SVec; -use crate::tensor::element::IsStaticTensor; +use crate::prelude::*; use crate::tensor::mut_tensor::InnerScalarToVec; use crate::tensor::mut_tensor::InnerVecToMat; -use crate::tensor::mut_tensor::MutTensor; -use crate::tensor::tensor_view::IsTensorLike; -use crate::tensor::tensor_view::IsTensorView; -use crate::tensor::tensor_view::TensorView; +use crate::tensor::MutTensor; +use crate::tensor::TensorView; +use ndarray::Dimension; use std::marker::PhantomData; diff --git a/crates/sophus_core/src/tensor/element.rs b/crates/sophus_core/src/tensor/element.rs index fae726b5..4ae17a46 100644 --- a/crates/sophus_core/src/tensor/element.rs +++ b/crates/sophus_core/src/tensor/element.rs @@ -1,8 +1,7 @@ -use crate::linalg::scalar::IsCoreScalar; use crate::linalg::scalar::NumberCategory; use crate::linalg::SMat; use crate::linalg::SVec; - +use crate::prelude::*; use std::fmt::Debug; pub use typenum::generic_const_mappings::Const; @@ -14,11 +13,6 @@ pub trait IsStaticTensor< const COLS: usize, >: Clone + Debug + num_traits::Zero { - /// Set zeros - fn zeros() -> Self { - Self::from_slice(&vec![Scalar::zero(); Self::num_scalars()]) - } - /// Returns ith scalar element fn scalar(&self, idx: [usize; SRANK]) -> &Scalar; diff --git a/crates/sophus_core/src/tensor/layout.rs b/crates/sophus_core/src/tensor/layout.rs deleted file mode 100644 index 57233402..00000000 --- a/crates/sophus_core/src/tensor/layout.rs +++ /dev/null @@ -1,303 +0,0 @@ -use std::ops::Range; - -use crate::sophus_calculus::linalg::M; - -use crate::tensor::element::IsScalar; -use crate::tensor::element::IsStaticTensor; - -pub trait HasShape { - fn dims(&self) -> TensorShape; - fn num_elements(&self) -> usize { - self.dims().iter().product() - } -} - -impl HasShape<1> for TensorShape<1> { - fn dims(&self) -> TensorShape<1> { - *self - } -} - -impl HasShape<2> for TensorShape<2> { - fn dims(&self) -> TensorShape<2> { - *self - } -} - -impl HasShape<3> for TensorShape<3> { - fn dims(&self) -> TensorShape<3> { - *self - } -} - -impl HasShape<4> for TensorShape<4> { - fn dims(&self) -> TensorShape<4> { - *self - } -} - -pub type TensorShape = [usize; RANK]; - -/// -/// -/// Memory layout: monotonically decreasing -/// [D1xD2xD3x.., D1xD2x.., ..., 1] -/// -/// Data presentation: ROW-first indexing -/// [D0= , ..., D{i}=ROW, D{i+1}=COL,...] -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub struct TensorLayout { - pub dims: TensorShape, - row_major_strides: [usize; RANK], -} - -pub type TensorLayout1 = TensorLayout<1>; -pub type TensorLayout2 = TensorLayout<2>; -pub type TensorLayout3 = TensorLayout<3>; -pub type TensorLayout4 = TensorLayout<4>; - -impl TensorLayout<1> { - pub fn rank(dims: [usize; 1]) -> Self { - Self { - dims, - row_major_strides: [1], - } - } -} - -impl TensorLayout<2> { - pub fn rank(dims: [usize; 2]) -> Self { - Self { - dims, - row_major_strides: [dims[1], 1], - } - } -} - -impl TensorLayout<3> { - pub fn rank(dims: [usize; 3]) -> Self { - Self { - dims, - row_major_strides: [dims[2] * dims[1], dims[2], 1], - } - } -} - -impl TensorLayout<4> { - pub fn rank(dims: [usize; 4]) -> Self { - Self { - dims, - row_major_strides: [dims[3] * dims[2] * dims[1], dims[3] * dims[2], dims[3], 1], - } - } -} - -impl TensorLayout { - fn null() -> Self { - let mut strides = [0; RANK]; - strides[RANK - 1] = 1; - Self { - dims: [0; RANK], - row_major_strides: strides, - } - } -} - -impl Default for TensorLayout { - fn default() -> Self { - TensorLayout::null() - } -} - -pub trait HasTensorLayout: HasShape { - fn strides(&self) -> [usize; RANK]; - - fn padded_area(&self) -> usize { - self.dims()[0] * self.strides()[0] - } - - fn num_bytes_of_padded_area< - Scalar: IsScalar + 'static, - const BATCH_SIZE: usize, - const ROWS: usize, - const COLS: usize, - >( - &self, - ) -> usize { - Self::padded_area(self) * std::mem::size_of::() * BATCH_SIZE * ROWS * COLS - } - - fn layout(&self) -> TensorLayout; - - fn index(&self, idx_tuple: [usize; RANK]) -> usize; - - fn dim0_index(&self, idx_tuple: [usize; RANK]) -> usize; - fn dim0_range(&self, idx_tuple: [usize; RANK]) -> std::ops::Range; - - fn is_empty(&self) -> bool { - *self.dims().iter().min().unwrap_or(&0) == 0 - } -} - -macro_rules! tensor_shape { - ($drank:literal) => { - impl HasShape<$drank> for TensorLayout<$drank> { - fn dims(&self) -> TensorShape<$drank> { - self.dims - } - fn num_elements(&self) -> usize { - self.dims.num_elements() - } - } - - impl HasTensorLayout<$drank> for TensorLayout<$drank> { - fn layout(&self) -> TensorLayout<$drank> { - *self - } - - fn strides(&self) -> [usize; $drank] { - debug_assert_eq!(self.row_major_strides[$drank - 1], 1); - self.row_major_strides - } - - fn index(&self, idx_tuple: [usize; $drank]) -> usize { - self.dim0_index(idx_tuple) + idx_tuple[$drank - 1] - } - - fn dim0_index(&self, idx_tuple: [usize; $drank]) -> usize { - let all_idx_but_last = *array_ref![idx_tuple, 0, $drank - 1]; - let strides = self.strides(); - let all_strides_but_last = *array_ref![strides, 0, $drank - 1]; - - all_idx_but_last - .iter() - .zip(all_strides_but_last.iter()) - .map(|(x, y)| x * y) - .sum() - } - - fn dim0_range(&self, idx_tuple: [usize; $drank]) -> Range { - let idx0 = self.dim0_index(idx_tuple); - std::ops::Range { - start: idx0, - end: idx0 + self.dims[0], - } - } - } - }; -} -tensor_shape!(1); -tensor_shape!(2); -tensor_shape!(3); -tensor_shape!(4); - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn tensor_shape() { - let rank1_shape = [8]; - assert_eq!(rank1_shape.dims()[0], 8); - - let rank2_shape = [8, 7]; - assert_eq!(rank2_shape.dims()[0], 8); - assert_eq!(rank2_shape.dims()[1], 7); - - let rank3_shape = [8, 7, 6]; - assert_eq!(rank3_shape.dims()[0], 8); - assert_eq!(rank3_shape.dims()[1], 7); - assert_eq!(rank3_shape.dims()[2], 6); - - let rank3_shape = [8, 7, 6, 8]; - assert_eq!(rank3_shape.dims()[0], 8); - assert_eq!(rank3_shape.dims()[1], 7); - assert_eq!(rank3_shape.dims()[2], 6); - assert_eq!(rank3_shape.dims()[3], 8); - } - - #[test] - fn tensor_layout() { - { - let rank1_shape = [3]; - - let rank1_layout = TensorLayout1::rank(rank1_shape); - - let arr = [4, 6, 7]; - - assert_eq!(rank1_layout.num_elements(), 3); - assert_eq!(rank1_layout.num_elements(), rank1_layout.padded_area()); - assert_eq!(arr.len(), rank1_layout.padded_area()); - - for i in 0..rank1_shape.dims()[0] { - assert_eq!(arr[i], arr[rank1_layout.index([i])]); - } - } - { - let rank2_shape = [2, 3]; - let rank2_layout = TensorLayout2::rank(rank2_shape); - let arr = [ - 4, 6, 7, // - 8, 9, 10, - ]; - - assert_eq!(rank2_layout.num_elements(), 6); - assert_eq!(rank2_layout.num_elements(), rank2_layout.padded_area()); - assert_eq!(arr.len(), rank2_layout.padded_area()); - - let row_arr = [ - [4, 6, 7], // - [8, 9, 10], - ]; - - for d1 in 0..rank2_shape.dims()[1] { - for d0 in 0..rank2_shape.dims()[0] { - assert_eq!(row_arr[d0][d1], arr[rank2_layout.index([d0, d1])]); - } - } - } - - { - let rank3_shape = [4, 2, 3]; - let rank3_layout = TensorLayout3::rank(rank3_shape); - let arr = [ - 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, - 27, 28, - ]; - - assert_eq!(rank3_layout.num_elements(), 24); - assert_eq!(rank3_layout.num_elements(), rank3_layout.padded_area()); - assert_eq!(arr.len(), rank3_layout.padded_area()); - - let row_col_arr = [ - [ - [4, 6, 7], // - [8, 9, 10], - ], - [ - [11, 12, 13], // - [14, 15, 16], - ], - [ - [17, 18, 19], // - [20, 21, 22], - ], - [ - [23, 24, 25], // - [26, 27, 28], - ], - ]; - - for d2 in 0..rank3_shape.dims()[2] { - for d1 in 0..rank3_shape.dims()[1] { - for d0 in 0..rank3_shape.dims()[0] { - assert_eq!( - row_col_arr[d0][d1][d2], - arr[rank3_layout.index([d0, d1, d2])] - ); - } - } - } - } - } -} diff --git a/crates/sophus_core/src/tensor/mut_tensor.rs b/crates/sophus_core/src/tensor/mut_tensor.rs index 4a969f6f..39d6acc3 100644 --- a/crates/sophus_core/src/tensor/mut_tensor.rs +++ b/crates/sophus_core/src/tensor/mut_tensor.rs @@ -1,13 +1,9 @@ -use crate::linalg::scalar::IsCoreScalar; use crate::linalg::SMat; use crate::linalg::SVec; -use crate::tensor::arc_tensor::ArcTensor; -use crate::tensor::element::IsStaticTensor; -use crate::tensor::mut_tensor_view::IsMutTensorLike; -use crate::tensor::mut_tensor_view::MutTensorView; -use crate::tensor::tensor_view::IsTensorLike; -use crate::tensor::tensor_view::IsTensorView; -use crate::tensor::tensor_view::TensorView; +use crate::prelude::*; +use crate::tensor::ArcTensor; +use crate::tensor::MutTensorView; +use crate::tensor::TensorView; use ndarray::Dim; use ndarray::Ix; use std::fmt::Debug; diff --git a/crates/sophus_core/src/tensor/mut_tensor_view.rs b/crates/sophus_core/src/tensor/mut_tensor_view.rs index 05fc4fa3..ff4d3748 100644 --- a/crates/sophus_core/src/tensor/mut_tensor_view.rs +++ b/crates/sophus_core/src/tensor/mut_tensor_view.rs @@ -1,9 +1,6 @@ -use crate::linalg::scalar::IsCoreScalar; -use crate::tensor::element::IsStaticTensor; -use crate::tensor::mut_tensor::MutTensor; -use crate::tensor::tensor_view::IsTensorLike; -use crate::tensor::tensor_view::IsTensorView; -use crate::tensor::tensor_view::TensorView; +use crate::prelude::*; +use crate::tensor::MutTensor; +use crate::tensor::TensorView; use concat_arrays::concat_arrays; use std::marker::PhantomData; @@ -91,6 +88,7 @@ macro_rules! mut_view_is_view { >, ) -> Self { let dims: [usize; $drank] = elem_view_mut.shape().try_into().unwrap(); + #[allow(clippy::drop_non_drop)] let shape: [usize; $scalar_rank] = concat_arrays!(dims, STensor::sdims()); let dstrides: [isize; $drank] = elem_view_mut.strides().try_into().unwrap(); @@ -99,6 +97,7 @@ macro_rules! mut_view_is_view { for d in dstrides.iter_mut() { *d *= num_scalars; } + #[allow(clippy::drop_non_drop)] let strides = concat_arrays!(dstrides, STensor::strides()); let ptr = elem_view_mut.as_ptr() as *mut Scalar; diff --git a/crates/sophus_core/src/tensor/tensor_view.rs b/crates/sophus_core/src/tensor/tensor_view.rs index 40cf98e0..661e5ea7 100644 --- a/crates/sophus_core/src/tensor/tensor_view.rs +++ b/crates/sophus_core/src/tensor/tensor_view.rs @@ -1,9 +1,7 @@ -use crate::linalg::scalar::IsCoreScalar; use crate::linalg::SMat; use crate::linalg::SVec; -use crate::tensor::element::IsStaticTensor; -use crate::tensor::mut_tensor::MutTensor; - +use crate::prelude::*; +use crate::tensor::MutTensor; use concat_arrays::concat_arrays; use std::marker::PhantomData; @@ -179,6 +177,7 @@ macro_rules! tensor_view_is_view { elem_view: ndarray::ArrayView<'a, STensor, ndarray::Dim<[ndarray::Ix; $drank]>>, ) -> Self { let dims: [usize; $drank] = elem_view.shape().try_into().unwrap(); + #[allow(clippy::drop_non_drop)] let shape: [usize; $scalar_rank] = concat_arrays!(dims, STensor::sdims()); let dstrides: [isize; $drank] = elem_view.strides().try_into().unwrap(); @@ -187,6 +186,7 @@ macro_rules! tensor_view_is_view { for d in dstrides.iter_mut() { *d *= num_scalars; } + #[allow(clippy::drop_non_drop)] let strides = concat_arrays!(dstrides, STensor::strides()); let ptr = elem_view.as_ptr() as *const Scalar; diff --git a/crates/sophus_image/src/arc_image.rs b/crates/sophus_image/src/arc_image.rs index 468ce8cc..708be0c4 100644 --- a/crates/sophus_image/src/arc_image.rs +++ b/crates/sophus_image/src/arc_image.rs @@ -1,15 +1,10 @@ use crate::image_view::GenImageView; -use crate::image_view::ImageSize; -use crate::image_view::IsImageView; use crate::mut_image::GenMutImage; - -use sophus_core::linalg::scalar::IsCoreScalar; +use crate::prelude::*; +use crate::ImageSize; use sophus_core::linalg::SVec; -use sophus_core::tensor::arc_tensor::ArcTensor; -use sophus_core::tensor::element::IsStaticTensor; -use sophus_core::tensor::tensor_view::IsTensorLike; -use sophus_core::tensor::tensor_view::IsTensorView; use sophus_core::tensor::tensor_view::TensorView; +use sophus_core::tensor::ArcTensor; /// Image of static tensors with shared ownership #[derive(Debug, Clone)] @@ -249,7 +244,7 @@ macro_rules! arc_image { self.tensor.get([v, u]) } - fn image_size(&self) -> super::image_view::ImageSize { + fn image_size(&self) -> ImageSize { self.image_view().image_size() } } diff --git a/crates/sophus_image/src/image_view.rs b/crates/sophus_image/src/image_view.rs index ece6a0af..da4dc632 100644 --- a/crates/sophus_image/src/image_view.rs +++ b/crates/sophus_image/src/image_view.rs @@ -1,50 +1,8 @@ -use sophus_core::linalg::scalar::IsCoreScalar; +use crate::prelude::*; +use crate::ImageSize; use sophus_core::linalg::SVec; -use sophus_core::tensor::element::IsStaticTensor; -use sophus_core::tensor::tensor_view::IsTensorLike; -use sophus_core::tensor::tensor_view::IsTensorView; use sophus_core::tensor::tensor_view::TensorView; -/// Image size -#[derive(Debug, Copy, Clone, Default)] -pub struct ImageSize { - /// Width of the image - number of columns - pub width: usize, - /// Height of the image - number of rows - pub height: usize, -} - -impl ImageSize { - /// Create a new image size from width and height - pub fn new(width: usize, height: usize) -> Self { - Self { width, height } - } - - /// Get the area of the image - width * height - pub fn area(&self) -> usize { - self.width * self.height - } -} - -impl From<[usize; 2]> for ImageSize { - /// We are converting from Tensor (and matrix) convention (d0 = rows, d1 = cols) - /// to Matrix convention (d0 = width = cols, d1 = height = rows) - fn from(rows_cols: [usize; 2]) -> Self { - ImageSize { - width: rows_cols[1], - height: rows_cols[0], - } - } -} - -impl From for [usize; 2] { - /// We are converting from Image Indexing Convention (d0 = width = cols, d1 = height = rows) - /// to tensor (and matrix) convention (d0 = rows, d1 = cols). - fn from(image_size: ImageSize) -> Self { - [image_size.height, image_size.width] - } -} - /// Image view of static tensors #[derive(Debug, Clone, PartialEq)] pub struct GenImageView< diff --git a/crates/sophus_image/src/intensity_image.rs b/crates/sophus_image/src/intensity_image.rs index fe1c9617..092e86fa 100644 --- a/crates/sophus_image/src/intensity_image.rs +++ b/crates/sophus_image/src/intensity_image.rs @@ -12,7 +12,6 @@ use crate::arc_image::ArcImageF32; use crate::arc_image::ArcImageR; use crate::arc_image::ArcImageU16; use crate::arc_image::ArcImageU8; -use crate::image_view::ImageSize; use crate::image_view::ImageView2F32; use crate::image_view::ImageView2U16; use crate::image_view::ImageView2U8; @@ -25,7 +24,6 @@ use crate::image_view::ImageView4U8; use crate::image_view::ImageViewF32; use crate::image_view::ImageViewU16; use crate::image_view::ImageViewU8; -use crate::image_view::IsImageView; use crate::mut_image::MutImage; use crate::mut_image::MutImage2F32; use crate::mut_image::MutImage2U16; @@ -40,10 +38,9 @@ use crate::mut_image::MutImageF32; use crate::mut_image::MutImageR; use crate::mut_image::MutImageU16; use crate::mut_image::MutImageU8; -use sophus_core::linalg::scalar::IsCoreScalar; - +use crate::prelude::*; +use crate::ImageSize; use sophus_core::linalg::SVec; -use sophus_core::tensor::element::IsStaticTensor; /// dynamic mutable intensity image of unsigned integer values pub enum DynIntensityMutImageU { @@ -221,32 +218,34 @@ impl DynIntensityArcImage { pub fn to_grayscale_u8(&self) -> ArcImageU8 { match self { DynIntensityArcImage::GrayscaleU8(image) => image.clone(), - DynIntensityArcImage::GrayscaleAlphaU8(image) => IntensityArcImage::to_grayscale(image), - DynIntensityArcImage::RgbU8(image) => IntensityArcImage::to_grayscale(image), - DynIntensityArcImage::RgbaU8(image) => IntensityArcImage::to_grayscale(image), + DynIntensityArcImage::GrayscaleAlphaU8(image) => { + IsIntensityArcImage::to_grayscale(image) + } + DynIntensityArcImage::RgbU8(image) => IsIntensityArcImage::to_grayscale(image), + DynIntensityArcImage::RgbaU8(image) => IsIntensityArcImage::to_grayscale(image), DynIntensityArcImage::GrayscaleU16(image) => { - IntensityArcImage::cast_u8(&IntensityArcImage::to_grayscale(image)) + IsIntensityArcImage::cast_u8(&IsIntensityArcImage::to_grayscale(image)) } DynIntensityArcImage::GrayscaleAlphaU16(image) => { - IntensityArcImage::cast_u8(&IntensityArcImage::to_grayscale(image)) + IsIntensityArcImage::cast_u8(&IsIntensityArcImage::to_grayscale(image)) } DynIntensityArcImage::RgbU16(image) => { - IntensityArcImage::cast_u8(&IntensityArcImage::to_grayscale(image)) + IsIntensityArcImage::cast_u8(&IsIntensityArcImage::to_grayscale(image)) } DynIntensityArcImage::RgbaU16(image) => { - IntensityArcImage::cast_u8(&IntensityArcImage::to_grayscale(image)) + IsIntensityArcImage::cast_u8(&IsIntensityArcImage::to_grayscale(image)) } DynIntensityArcImage::GrayscaleF32(image) => { - IntensityArcImage::cast_u8(&IntensityArcImage::to_grayscale(image)) + IsIntensityArcImage::cast_u8(&IsIntensityArcImage::to_grayscale(image)) } DynIntensityArcImage::GrayscaleAlphaF32(image) => { - IntensityArcImage::cast_u8(&IntensityArcImage::to_grayscale(image)) + IsIntensityArcImage::cast_u8(&IsIntensityArcImage::to_grayscale(image)) } DynIntensityArcImage::RgbF32(image) => { - IntensityArcImage::cast_u8(&IntensityArcImage::to_grayscale(image)) + IsIntensityArcImage::cast_u8(&IsIntensityArcImage::to_grayscale(image)) } DynIntensityArcImage::RgbaF32(image) => { - IntensityArcImage::cast_u8(&IntensityArcImage::to_grayscale(image)) + IsIntensityArcImage::cast_u8(&IsIntensityArcImage::to_grayscale(image)) } } } @@ -296,7 +295,7 @@ pub enum DynIntensityImageView<'a> { /// If the f32 is outside this range, conversion results may be surprising. /// /// These are image type which typically used for computer vision and graphics applications. -pub trait IntensityMutImage< +pub trait IsIntensityMutImage< const TOTAL_RANK: usize, const SRANK: usize, Scalar: IsCoreScalar + 'static, @@ -346,7 +345,7 @@ pub trait IntensityMutImage< fn try_into_dyn_image_view_u(img: Self) -> Option; } -impl<'a> IntensityMutImage<2, 0, u8, u8, 1, 1> for MutImageU8 { +impl IsIntensityMutImage<2, 0, u8, u8, 1, 1> for MutImageU8 { type Pixel = S; fn pixel_to_grayscale(pixel: &u8) -> u8 { @@ -398,7 +397,7 @@ impl<'a> IntensityMutImage<2, 0, u8, u8, 1, 1> for MutImageU8 { } } -impl IntensityMutImage<2, 0, u16, u16, 1, 1> for MutImageU16 { +impl IsIntensityMutImage<2, 0, u16, u16, 1, 1> for MutImageU16 { type Pixel = S; fn pixel_to_grayscale(pixel: &u16) -> u16 { @@ -450,7 +449,7 @@ impl IntensityMutImage<2, 0, u16, u16, 1, 1> for MutImageU16 { } } -impl IntensityMutImage<2, 0, f32, f32, 1, 1> for MutImageF32 { +impl IsIntensityMutImage<2, 0, f32, f32, 1, 1> for MutImageF32 { type Pixel = S; fn pixel_to_grayscale(pixel: &f32) -> f32 { @@ -502,7 +501,7 @@ impl IntensityMutImage<2, 0, f32, f32, 1, 1> for MutImageF32 { } } -impl IntensityMutImage<3, 1, u8, SVec, 4, 1> for MutImage4U8 { +impl IsIntensityMutImage<3, 1, u8, SVec, 4, 1> for MutImage4U8 { type Pixel = SVec; fn pixel_to_grayscale(pixel: &SVec) -> u8 { @@ -562,7 +561,7 @@ impl IntensityMutImage<3, 1, u8, SVec, 4, 1> for MutImage4U8 { } /// Trait for "intensity" images with shared ownership. -pub trait IntensityArcImage< +pub trait IsIntensityArcImage< const TOTAL_RANK: usize, const SRANK: usize, Scalar: IsCoreScalar + 'static, @@ -615,7 +614,7 @@ pub trait IntensityArcImage< fn try_into_dyn_image_view_u(img: &Self) -> Option; } -impl IntensityArcImage<2, 0, u8, u8, 1, 1> for ArcImageU8 { +impl IsIntensityArcImage<2, 0, u8, u8, 1, 1> for ArcImageU8 { type Pixel = S; fn pixel_to_grayscale(pixel: &u8) -> u8 { @@ -667,7 +666,7 @@ impl IntensityArcImage<2, 0, u8, u8, 1, 1> for ArcImageU8 { } } -impl IntensityArcImage<2, 0, u16, u16, 1, 1> for ArcImageU16 { +impl IsIntensityArcImage<2, 0, u16, u16, 1, 1> for ArcImageU16 { type Pixel = S; fn pixel_to_grayscale(pixel: &u16) -> u16 { @@ -721,7 +720,7 @@ impl IntensityArcImage<2, 0, u16, u16, 1, 1> for ArcImageU16 { } } -impl IntensityArcImage<2, 0, f32, f32, 1, 1> for ArcImageF32 { +impl IsIntensityArcImage<2, 0, f32, f32, 1, 1> for ArcImageF32 { type Pixel = S; fn pixel_to_grayscale(pixel: &f32) -> f32 { @@ -773,7 +772,7 @@ impl IntensityArcImage<2, 0, f32, f32, 1, 1> for ArcImageF32 { } } -impl IntensityArcImage<3, 1, u8, SVec, 2, 1> for ArcImage2U8 { +impl IsIntensityArcImage<3, 1, u8, SVec, 2, 1> for ArcImage2U8 { type Pixel = SVec; fn pixel_to_grayscale(pixel: &SVec) -> u8 { @@ -827,7 +826,7 @@ impl IntensityArcImage<3, 1, u8, SVec, 2, 1> for ArcImage2U8 { } } -impl IntensityArcImage<3, 1, u8, SVec, 3, 1> for ArcImage3U8 { +impl IsIntensityArcImage<3, 1, u8, SVec, 3, 1> for ArcImage3U8 { type Pixel = SVec; fn pixel_to_grayscale(pixel: &SVec) -> u8 { @@ -885,7 +884,7 @@ impl IntensityArcImage<3, 1, u8, SVec, 3, 1> for ArcImage3U8 { } } -impl IntensityArcImage<3, 1, u8, SVec, 4, 1> for ArcImage4U8 { +impl IsIntensityArcImage<3, 1, u8, SVec, 4, 1> for ArcImage4U8 { type Pixel = SVec; fn pixel_to_grayscale(pixel: &SVec) -> u8 { @@ -944,7 +943,7 @@ impl IntensityArcImage<3, 1, u8, SVec, 4, 1> for ArcImage4U8 { } } -impl IntensityArcImage<3, 1, u16, SVec, 2, 1> for ArcImage2U16 { +impl IsIntensityArcImage<3, 1, u16, SVec, 2, 1> for ArcImage2U16 { type Pixel = SVec; fn pixel_to_grayscale(pixel: &SVec) -> u16 { @@ -1002,7 +1001,7 @@ impl IntensityArcImage<3, 1, u16, SVec, 2, 1> for ArcImage2U16 { } } -impl IntensityArcImage<3, 1, u16, SVec, 3, 1> for ArcImage3U16 { +impl IsIntensityArcImage<3, 1, u16, SVec, 3, 1> for ArcImage3U16 { type Pixel = SVec; fn pixel_to_grayscale(pixel: &SVec) -> u16 { @@ -1065,7 +1064,7 @@ impl IntensityArcImage<3, 1, u16, SVec, 3, 1> for ArcImage3U16 { } } -impl IntensityArcImage<3, 1, u16, SVec, 4, 1> for ArcImage4U16 { +impl IsIntensityArcImage<3, 1, u16, SVec, 4, 1> for ArcImage4U16 { type Pixel = SVec; fn pixel_to_grayscale(pixel: &SVec) -> u16 { @@ -1130,7 +1129,7 @@ impl IntensityArcImage<3, 1, u16, SVec, 4, 1> for ArcImage4U16 { } } -impl IntensityArcImage<3, 1, f32, SVec, 2, 1> for ArcImage2F32 { +impl IsIntensityArcImage<3, 1, f32, SVec, 2, 1> for ArcImage2F32 { type Pixel = SVec; fn pixel_to_grayscale(pixel: &SVec) -> f32 { @@ -1191,7 +1190,7 @@ impl IntensityArcImage<3, 1, f32, SVec, 2, 1> for ArcImage2F32 { } } -impl IntensityArcImage<3, 1, f32, SVec, 3, 1> for ArcImage3F32 { +impl IsIntensityArcImage<3, 1, f32, SVec, 3, 1> for ArcImage3F32 { type Pixel = SVec; fn pixel_to_grayscale(pixel: &SVec) -> f32 { @@ -1254,7 +1253,7 @@ impl IntensityArcImage<3, 1, f32, SVec, 3, 1> for ArcImage3F32 { } } -impl IntensityArcImage<3, 1, f32, SVec, 4, 1> for ArcImage4F32 { +impl IsIntensityArcImage<3, 1, f32, SVec, 4, 1> for ArcImage4F32 { type Pixel = SVec; fn pixel_to_grayscale(pixel: &SVec) -> f32 { @@ -1320,7 +1319,7 @@ impl IntensityArcImage<3, 1, f32, SVec, 4, 1> for ArcImage4F32 { } /// Intensity image view of unsigned integer values. -pub trait IntensityViewImageU<'a> { +pub trait IsIntensityViewImageU<'a> { /// Color type of the image const COLOR_TYPE: png::ColorType; /// Bit depth of the image @@ -1333,7 +1332,7 @@ pub trait IntensityViewImageU<'a> { fn raw_u8_slice(&self) -> &[u8]; } -impl<'a> IntensityViewImageU<'a> for ImageViewU8<'a> { +impl<'a> IsIntensityViewImageU<'a> for ImageViewU8<'a> { const COLOR_TYPE: png::ColorType = png::ColorType::Grayscale; const BIT_DEPTH: png::BitDepth = png::BitDepth::Eight; @@ -1346,7 +1345,7 @@ impl<'a> IntensityViewImageU<'a> for ImageViewU8<'a> { } } -impl<'a> IntensityViewImageU<'a> for ImageView2U8<'a> { +impl<'a> IsIntensityViewImageU<'a> for ImageView2U8<'a> { const COLOR_TYPE: png::ColorType = png::ColorType::GrayscaleAlpha; const BIT_DEPTH: png::BitDepth = png::BitDepth::Eight; @@ -1359,7 +1358,7 @@ impl<'a> IntensityViewImageU<'a> for ImageView2U8<'a> { } } -impl<'a> IntensityViewImageU<'a> for ImageView3U8<'a> { +impl<'a> IsIntensityViewImageU<'a> for ImageView3U8<'a> { const COLOR_TYPE: png::ColorType = png::ColorType::Rgb; const BIT_DEPTH: png::BitDepth = png::BitDepth::Eight; @@ -1372,7 +1371,7 @@ impl<'a> IntensityViewImageU<'a> for ImageView3U8<'a> { } } -impl<'a> IntensityViewImageU<'a> for ImageView4U8<'a> { +impl<'a> IsIntensityViewImageU<'a> for ImageView4U8<'a> { const COLOR_TYPE: png::ColorType = png::ColorType::Rgba; const BIT_DEPTH: png::BitDepth = png::BitDepth::Eight; @@ -1385,7 +1384,7 @@ impl<'a> IntensityViewImageU<'a> for ImageView4U8<'a> { } } -impl<'a> IntensityViewImageU<'a> for ImageViewU16<'a> { +impl<'a> IsIntensityViewImageU<'a> for ImageViewU16<'a> { const COLOR_TYPE: png::ColorType = png::ColorType::Grayscale; const BIT_DEPTH: png::BitDepth = png::BitDepth::Sixteen; @@ -1398,7 +1397,7 @@ impl<'a> IntensityViewImageU<'a> for ImageViewU16<'a> { } } -impl<'a> IntensityViewImageU<'a> for ImageView2U16<'a> { +impl<'a> IsIntensityViewImageU<'a> for ImageView2U16<'a> { const COLOR_TYPE: png::ColorType = png::ColorType::GrayscaleAlpha; const BIT_DEPTH: png::BitDepth = png::BitDepth::Sixteen; @@ -1411,7 +1410,7 @@ impl<'a> IntensityViewImageU<'a> for ImageView2U16<'a> { } } -impl<'a> IntensityViewImageU<'a> for ImageView3U16<'a> { +impl<'a> IsIntensityViewImageU<'a> for ImageView3U16<'a> { const COLOR_TYPE: png::ColorType = png::ColorType::Rgb; const BIT_DEPTH: png::BitDepth = png::BitDepth::Sixteen; @@ -1424,7 +1423,7 @@ impl<'a> IntensityViewImageU<'a> for ImageView3U16<'a> { } } -impl<'a> IntensityViewImageU<'a> for ImageView4U16<'a> { +impl<'a> IsIntensityViewImageU<'a> for ImageView4U16<'a> { const COLOR_TYPE: png::ColorType = png::ColorType::Rgba; const BIT_DEPTH: png::BitDepth = png::BitDepth::Sixteen; diff --git a/crates/sophus_image/src/interpolation.rs b/crates/sophus_image/src/interpolation.rs index eb9a4d47..a7d88bfd 100644 --- a/crates/sophus_image/src/interpolation.rs +++ b/crates/sophus_image/src/interpolation.rs @@ -1,4 +1,4 @@ -use crate::image_view::IsImageView; +use crate::prelude::*; use sophus_core::linalg::SVec; /// Bilinear interpolated image lookup diff --git a/crates/sophus_image/src/lib.rs b/crates/sophus_image/src/lib.rs index 61c27c39..7723c275 100644 --- a/crates/sophus_image/src/lib.rs +++ b/crates/sophus_image/src/lib.rs @@ -1,6 +1,6 @@ #![feature(portable_simd)] #![deny(missing_docs)] -//! # image module +//! image crate - part of the sophus-rs project /// image with shared ownership pub mod arc_image; @@ -16,3 +16,59 @@ pub mod mut_image; pub mod mut_image_view; /// png image io pub mod png; + +pub use crate::arc_image::ArcImage; +pub use crate::image_view::ImageView; +pub use crate::interpolation::interpolate; +pub use crate::mut_image::MutImage; +pub use crate::mut_image_view::MutImageView; + +/// Image size +#[derive(Debug, Copy, Clone, Default)] +pub struct ImageSize { + /// Width of the image - number of columns + pub width: usize, + /// Height of the image - number of rows + pub height: usize, +} + +impl ImageSize { + /// Create a new image size from width and height + pub fn new(width: usize, height: usize) -> Self { + Self { width, height } + } + + /// Get the area of the image - width * height + pub fn area(&self) -> usize { + self.width * self.height + } +} + +impl From<[usize; 2]> for ImageSize { + /// We are converting from Tensor (and matrix) convention (d0 = rows, d1 = cols) + /// to Matrix convention (d0 = width = cols, d1 = height = rows) + fn from(rows_cols: [usize; 2]) -> Self { + ImageSize { + width: rows_cols[1], + height: rows_cols[0], + } + } +} + +impl From for [usize; 2] { + /// We are converting from Image Indexing Convention (d0 = width = cols, d1 = height = rows) + /// to tensor (and matrix) convention (d0 = rows, d1 = cols). + fn from(image_size: ImageSize) -> Self { + [image_size.height, image_size.width] + } +} + +/// sophus_image prelude +pub mod prelude { + pub use crate::image_view::IsImageView; + pub use crate::intensity_image::IsIntensityArcImage; + pub use crate::intensity_image::IsIntensityMutImage; + pub use crate::intensity_image::IsIntensityViewImageU; + pub use crate::mut_image_view::IsMutImageView; + pub use sophus_core::prelude::*; +} diff --git a/crates/sophus_image/src/mut_image.rs b/crates/sophus_image/src/mut_image.rs index 8bd53e69..eb2e1633 100644 --- a/crates/sophus_image/src/mut_image.rs +++ b/crates/sophus_image/src/mut_image.rs @@ -1,16 +1,10 @@ use crate::arc_image::GenArcImage; use crate::image_view::GenImageView; -use crate::image_view::ImageSize; -use crate::image_view::IsImageView; -use crate::mut_image_view::IsMutImageView; - -use sophus_core::linalg::scalar::IsCoreScalar; +use crate::prelude::*; +use crate::ImageSize; use sophus_core::linalg::SVec; -use sophus_core::tensor::element::IsStaticTensor; -use sophus_core::tensor::mut_tensor::MutTensor; -use sophus_core::tensor::mut_tensor_view::IsMutTensorLike; -use sophus_core::tensor::tensor_view::IsTensorView; -use sophus_core::tensor::tensor_view::TensorView; +use sophus_core::tensor::MutTensor; +use sophus_core::tensor::TensorView; /// Mutable image of static tensors #[derive(Debug, Clone, Default)] @@ -111,7 +105,7 @@ macro_rules! mut_image { self.mut_tensor.mut_array[[v, u]].clone() } - fn image_size(&self) -> crate::image_view::ImageSize { + fn image_size(&self) -> ImageSize { self.image_view().image_size() } } @@ -126,7 +120,7 @@ macro_rules! mut_image { > GenMutImage<$scalar_rank, $srank, Scalar, STensor, ROWS, COLS> { /// creates a mutable image view from image size - pub fn from_image_size(size: crate::image_view::ImageSize) -> Self { + pub fn from_image_size(size: ImageSize) -> Self { Self { mut_tensor: MutTensor::< $scalar_rank, @@ -142,7 +136,7 @@ macro_rules! mut_image { } /// creates a mutable image from image size and value - pub fn from_image_size_and_val(size: crate::image_view::ImageSize, val: STensor) -> Self { + pub fn from_image_size_and_val(size: ImageSize, val: STensor) -> Self { Self { mut_tensor: MutTensor::< $scalar_rank, diff --git a/crates/sophus_image/src/mut_image_view.rs b/crates/sophus_image/src/mut_image_view.rs index 550274f8..8c717703 100644 --- a/crates/sophus_image/src/mut_image_view.rs +++ b/crates/sophus_image/src/mut_image_view.rs @@ -1,12 +1,7 @@ use crate::image_view::GenImageView; -use crate::image_view::ImageSize; -use crate::image_view::IsImageView; - -use sophus_core::linalg::scalar::IsCoreScalar; -use sophus_core::tensor::element::IsStaticTensor; -use sophus_core::tensor::mut_tensor_view::IsMutTensorLike; +use crate::prelude::*; +use crate::ImageSize; use sophus_core::tensor::mut_tensor_view::MutTensorView; -use sophus_core::tensor::tensor_view::IsTensorLike; /// Mutable image view of a static tensors #[derive(Debug, PartialEq)] @@ -25,6 +20,9 @@ pub struct GenMutImageView< pub mut_tensor_view: MutTensorView<'a, TOTAL_RANK, 2, SRANK, Scalar, STensor, ROWS, COLS>, } +/// Mutable image view of scalar values +pub type MutImageView<'a, Scalar> = GenMutImageView<'a, 2, 0, Scalar, Scalar, 1, 1>; + macro_rules! mut_image_view { ($scalar_rank:literal, $srank:literal) => { impl< diff --git a/crates/sophus_image/src/png.rs b/crates/sophus_image/src/png.rs index fbee9e7d..bdd8464a 100644 --- a/crates/sophus_image/src/png.rs +++ b/crates/sophus_image/src/png.rs @@ -1,11 +1,6 @@ -use std::fs::File; -use std::io::BufWriter; - -use crate::mut_image::MutImage2U16; - -use crate::image_view::ImageSize; use crate::intensity_image::DynIntensityMutImage; -use crate::intensity_image::IntensityViewImageU; +use crate::intensity_image::IsIntensityViewImageU; +use crate::mut_image::MutImage2U16; use crate::mut_image::MutImage2U8; use crate::mut_image::MutImage3U16; use crate::mut_image::MutImage3U8; @@ -13,9 +8,12 @@ use crate::mut_image::MutImage4U16; use crate::mut_image::MutImage4U8; use crate::mut_image::MutImageU16; use crate::mut_image::MutImageU8; +use crate::ImageSize; +use std::fs::File; +use std::io::BufWriter; /// Save an image of unsigned integers as a PNG file -pub fn save_as_png<'a, GenImageView: IntensityViewImageU<'a>>( +pub fn save_as_png<'a, GenImageView: IsIntensityViewImageU<'a>>( image_u: &'a GenImageView, path: &std::path::Path, ) { diff --git a/crates/sophus_lie/src/factor_lie_group.rs b/crates/sophus_lie/src/factor_lie_group.rs index 55b1fd90..9dab2933 100644 --- a/crates/sophus_lie/src/factor_lie_group.rs +++ b/crates/sophus_lie/src/factor_lie_group.rs @@ -1,14 +1,14 @@ -use crate::groups::rotation2::Rotation2; -use crate::groups::rotation3::Rotation3; use crate::lie_group::LieGroup; +use crate::prelude::*; use crate::traits::IsRealLieFactorGroupImpl; +use crate::Rotation2; +use crate::Rotation3; use approx::assert_relative_eq; -use sophus_core::calculus::dual::dual_scalar::DualBatchScalar; -use sophus_core::calculus::dual::dual_scalar::DualScalar; -use sophus_core::calculus::maps::matrix_valued_maps::MatrixValuedMapFromVector; -use sophus_core::linalg::matrix::IsMatrix; -use sophus_core::linalg::scalar::IsRealScalar; +use sophus_core::calculus::dual::DualBatchScalar; +use sophus_core::calculus::dual::DualScalar; +use sophus_core::calculus::maps::MatrixValuedMapFromVector; use sophus_core::linalg::BatchScalarF64; +use sophus_core::manifold::traits::TangentImpl; impl< S: IsRealScalar, @@ -69,7 +69,6 @@ macro_rules! def_real_group_test_template { impl RealFactorLieGroupTest for $group { fn mat_v_test() { use crate::traits::IsLieGroup; - use sophus_core::calculus::manifold::traits::TangentImpl; use sophus_core::linalg::scalar::IsScalar; const POINT: usize = <$group>::POINT; @@ -88,8 +87,6 @@ macro_rules! def_real_group_test_template { fn test_mat_v_jacobian() { use crate::traits::IsLieGroup; - use sophus_core::calculus::dual::dual_scalar::IsDualScalar; - use sophus_core::calculus::manifold::traits::TangentImpl; use sophus_core::calculus::maps::vector_valued_maps::VectorValuedMapFromVector; use sophus_core::linalg::scalar::IsScalar; use sophus_core::linalg::vector::IsVector; @@ -132,8 +129,6 @@ macro_rules! def_real_group_test_template { } for p in example_points::<$scalar, POINT, $batch>() { for a in Self::element_examples() { - let dual_params_a = <$dual_scalar>::vector_v(*a.clone().params()); - let _dual_a = <$dual_group>::from_params(&dual_params_a); let dual_p = <$dual_scalar as IsScalar<$batch>>::Vector::from_real_vector(p.clone()); diff --git a/crates/sophus_lie/src/groups/isometry2.rs b/crates/sophus_lie/src/groups/isometry2.rs index decb7635..61825da8 100644 --- a/crates/sophus_lie/src/groups/isometry2.rs +++ b/crates/sophus_lie/src/groups/isometry2.rs @@ -1,9 +1,8 @@ -use super::rotation2::Rotation2Impl; -use super::translation_product_product::TranslationProductGroupImpl; -use crate::groups::rotation2::Rotation2; +use crate::groups::rotation2::Rotation2Impl; +use crate::groups::translation_product_product::TranslationProductGroupImpl; use crate::lie_group::LieGroup; -use crate::traits::IsTranslationProductGroup; -use sophus_core::linalg::scalar::IsScalar; +use crate::prelude::*; +use crate::Rotation2; /// 2D isometry group implementation struct - SE(2) pub type Isometry2Impl = diff --git a/crates/sophus_lie/src/groups/isometry3.rs b/crates/sophus_lie/src/groups/isometry3.rs index 025c8769..e14cf50e 100644 --- a/crates/sophus_lie/src/groups/isometry3.rs +++ b/crates/sophus_lie/src/groups/isometry3.rs @@ -1,9 +1,8 @@ -use super::rotation3::Rotation3; use super::rotation3::Rotation3Impl; use super::translation_product_product::TranslationProductGroupImpl; use crate::lie_group::LieGroup; -use crate::traits::IsTranslationProductGroup; -use sophus_core::linalg::scalar::IsScalar; +use crate::prelude::*; +use crate::Rotation3; /// 3D isometry group implementation struct - SE(3) pub type Isometry3Impl = diff --git a/crates/sophus_lie/src/groups/rotation2.rs b/crates/sophus_lie/src/groups/rotation2.rs index 5e3e9953..7ca396c7 100644 --- a/crates/sophus_lie/src/groups/rotation2.rs +++ b/crates/sophus_lie/src/groups/rotation2.rs @@ -1,14 +1,10 @@ use crate::lie_group::LieGroup; +use crate::prelude::*; use crate::traits::IsLieFactorGroupImpl; use crate::traits::IsLieGroupImpl; use crate::traits::IsRealLieFactorGroupImpl; use crate::traits::IsRealLieGroupImpl; -use sophus_core::calculus::manifold::{self}; -use sophus_core::linalg::matrix::IsMatrix; -use sophus_core::linalg::scalar::IsRealScalar; -use sophus_core::linalg::scalar::IsScalar; -use sophus_core::linalg::vector::IsVector; -use sophus_core::params::HasParams; +use sophus_core::manifold::traits::TangentImpl; use sophus_core::params::ParamsImpl; use std::marker::PhantomData; @@ -52,8 +48,8 @@ impl, const BATCH_SIZE: usize> ParamsImpl, const BATCH_SIZE: usize> - manifold::traits::TangentImpl for Rotation2Impl +impl, const BATCH_SIZE: usize> TangentImpl + for Rotation2Impl { fn tangent_examples() -> Vec> { vec![ @@ -154,7 +150,7 @@ impl, const BATCH_SIZE: usize> IsRealLieGroupImpl for Rotation2Impl { fn dx_exp_x_at_0() -> S::Matrix<2, 1> { - S::Matrix::from_real_array2([[S::RealScalar::zeros()], [S::RealScalar::ones()]]) + S::Matrix::from_real_scalar_array2([[S::RealScalar::zeros()], [S::RealScalar::ones()]]) } fn dx_exp_x_times_point_at_0(point: S::Vector<2>) -> S::Matrix<2, 1> { @@ -182,7 +178,7 @@ impl, const BATCH_SIZE: usize> } fn has_shortest_path_ambiguity(params: &S::Vector<2>) -> S::Mask { - (Self::log(params).vector().get_elem(0).abs() - S::from_f64(std::f64::consts::PI)) + (Self::log(params).get_elem(0).abs() - S::from_f64(std::f64::consts::PI)) .abs() .less_equal(&S::from_f64(1e-5)) } diff --git a/crates/sophus_lie/src/groups/rotation3.rs b/crates/sophus_lie/src/groups/rotation3.rs index bb19e3f4..5554b76e 100644 --- a/crates/sophus_lie/src/groups/rotation3.rs +++ b/crates/sophus_lie/src/groups/rotation3.rs @@ -1,15 +1,10 @@ use crate::lie_group::LieGroup; +use crate::prelude::*; use crate::traits::IsLieGroupImpl; use crate::traits::IsRealLieFactorGroupImpl; use crate::traits::IsRealLieGroupImpl; -use sophus_core::calculus::manifold::{self}; -use sophus_core::linalg::bool_mask::BoolMask; -use sophus_core::linalg::matrix::IsMatrix; -use sophus_core::linalg::scalar::IsRealScalar; -use sophus_core::linalg::scalar::IsScalar; use sophus_core::linalg::vector::cross; -use sophus_core::linalg::vector::IsVector; -use sophus_core::params::HasParams; +use sophus_core::manifold::traits::TangentImpl; use sophus_core::params::ParamsImpl; use std::marker::PhantomData; @@ -21,29 +16,20 @@ pub struct Rotation3Impl, const BATCH: usize> { impl, const BATCH: usize> ParamsImpl for Rotation3Impl { fn params_examples() -> Vec> { - let mut params = vec![]; - - params.push( + vec![ Rotation3::::exp(&S::Vector::<3>::from_f64_array([0.0, 0.0, 0.0])) .params() .clone(), - ); - params.push( Rotation3::::exp(&S::Vector::<3>::from_f64_array([0.1, 0.5, -0.1])) .params() .clone(), - ); - params.push( Rotation3::::exp(&S::Vector::<3>::from_f64_array([0.0, 0.2, 1.0])) .params() .clone(), - ); - params.push( Rotation3::::exp(&S::Vector::<3>::from_f64_array([-0.2, 0.0, 0.8])) .params() .clone(), - ); - params + ] } fn invalid_params_examples() -> Vec> { @@ -62,9 +48,7 @@ impl, const BATCH: usize> ParamsImpl for Rotatio } } -impl, const BATCH: usize> manifold::traits::TangentImpl - for Rotation3Impl -{ +impl, const BATCH: usize> TangentImpl for Rotation3Impl { fn tangent_examples() -> Vec> { vec![ S::Vector::<3>::from_f64_array([0.0, 0.0, 0.0]), @@ -123,7 +107,7 @@ impl, const BATCH: usize> IsLieGroupImpl) -> S::Vector<3> { const EPS: f64 = 1e-8; - let ivec: S::Vector<3> = params.get_fixed_rows::<3>(1); + let ivec: S::Vector<3> = params.get_fixed_subvec::<3>(1); let squared_n = ivec.squared_norm(); let w = params.get_elem(0); @@ -188,7 +172,7 @@ impl, const BATCH: usize> IsLieGroupImpl) -> S::Matrix<3, 3> { - let ivec = params.get_fixed_rows::<3>(1); + let ivec = params.get_fixed_subvec::<3>(1); let re = params.get_elem(0); let unit_x = S::Vector::from_f64_array([1.0, 0.0, 0.0]); @@ -228,8 +212,8 @@ impl, const BATCH: usize> IsLieGroupImpl(1); - let rhs_ivec = rhs_params.get_fixed_rows::<3>(1); + let lhs_ivec = lhs_params.get_fixed_subvec::<3>(1); + let rhs_ivec = rhs_params.get_fixed_subvec::<3>(1); let re = lhs_re.clone() * rhs_re.clone() - lhs_ivec.clone().dot(rhs_ivec.clone()); let ivec = rhs_ivec.scaled(lhs_re) @@ -334,7 +318,7 @@ impl, const BATCH: usize> IsRealLieGroupImpl) -> S::Matrix<3, 4> { - let ivec: S::Vector<3> = params.get_fixed_rows::<3>(1); + let ivec: S::Vector<3> = params.get_fixed_subvec::<3>(1); let w = params.get_elem(0); let squared_n = ivec.squared_norm(); @@ -592,10 +576,11 @@ impl, const BATCH: usize> IsRealLieFactorGroupImpl S::Matrix<3, 3> { - let t: &S::Matrix<3, 3> = &dt_mat_omega_sq[i]; - let foo: S::Matrix<3, 3> = - t.scaled(c) + mat_omega_sq.scaled(domega_theta.get_elem(i) * dt_c); - let mut l_i: S::Matrix<3, 3> = S::Matrix::zeros().select(&near_zero, foo); + let dt_mat_omega_sq_i: &S::Matrix<3, 3> = &dt_mat_omega_sq[i]; + let mut l_i: S::Matrix<3, 3> = S::Matrix::zeros().select( + &near_zero, + dt_mat_omega_sq_i.scaled(c) + mat_omega_sq.scaled(domega_theta.get_elem(i) * dt_c), + ); let pos_idx = dt_mat_omega_pos_idx[i]; l_i.set_elem(pos_idx, S::from_f64(-0.5) + l_i.get_elem(pos_idx)); diff --git a/crates/sophus_lie/src/groups/translation_product_product.rs b/crates/sophus_lie/src/groups/translation_product_product.rs index b1f4f350..ca9c8cf0 100644 --- a/crates/sophus_lie/src/groups/translation_product_product.rs +++ b/crates/sophus_lie/src/groups/translation_product_product.rs @@ -1,17 +1,10 @@ -use std::vec; - use crate::lie_group::LieGroup; +use crate::prelude::*; use crate::traits::IsLieFactorGroupImpl; use crate::traits::IsLieGroupImpl; use crate::traits::IsRealLieFactorGroupImpl; use crate::traits::IsRealLieGroupImpl; -use crate::traits::IsTranslationProductGroup; -use sophus_core::calculus::manifold; -use sophus_core::linalg::matrix::IsMatrix; -use sophus_core::linalg::scalar::IsRealScalar; -use sophus_core::linalg::scalar::IsScalar; -use sophus_core::linalg::vector::IsVector; -use sophus_core::params::HasParams; +use sophus_core::manifold::traits::TangentImpl; use sophus_core::params::ParamsImpl; use sophus_core::points::example_points; @@ -47,12 +40,12 @@ impl< { /// translation part of the group parameters pub fn translation(params: &S::Vector) -> S::Vector { - params.get_fixed_rows::(0) + params.get_fixed_subvec::(0) } /// factor part of the group parameters pub fn factor_params(params: &S::Vector) -> S::Vector { - params.get_fixed_rows::(POINT) + params.get_fixed_subvec::(POINT) } /// create group parameters from translation and factor parameters @@ -65,12 +58,12 @@ impl< /// translation part of the tangent vector fn translation_tangent(tangent: &S::Vector) -> S::Vector { - tangent.get_fixed_rows::(0) + tangent.get_fixed_subvec::(0) } /// factor part of the tangent vector fn factor_tangent(tangent: &S::Vector) -> S::Vector { - tangent.get_fixed_rows::(POINT) + tangent.get_fixed_subvec::(POINT) } /// create tangent vector from translation and factor tangent @@ -131,7 +124,7 @@ impl< const SPARAMS: usize, const BATCH: usize, F: IsLieFactorGroupImpl, - > manifold::traits::TangentImpl + > TangentImpl for TranslationProductGroupImpl { fn tangent_examples() -> Vec> { diff --git a/crates/sophus_lie/src/lib.rs b/crates/sophus_lie/src/lib.rs index a8bb3a97..8e6e5fa6 100644 --- a/crates/sophus_lie/src/lib.rs +++ b/crates/sophus_lie/src/lib.rs @@ -1,12 +1,18 @@ #![feature(portable_simd)] #![deny(missing_docs)] -//! # Lie groups module +#![allow(clippy::needless_range_loop)] +//! Lie groups crate - part of the sophus-rs project /// Lie groups pub mod groups; +pub use crate::groups::isometry2::Isometry2; +pub use crate::groups::isometry3::Isometry3; +pub use crate::groups::rotation2::Rotation2; +pub use crate::groups::rotation3::Rotation3; /// Lie groups pub mod lie_group; +pub use crate::lie_group::LieGroup; /// Lie groups pub mod factor_lie_group; @@ -19,3 +25,10 @@ pub mod traits; /// Real lie group pub mod real_lie_group; + +/// sophus_lie prelude +pub mod prelude { + pub use crate::traits::IsLieGroup; + pub use crate::traits::IsTranslationProductGroup; + pub use sophus_core::prelude::*; +} diff --git a/crates/sophus_lie/src/lie_group.rs b/crates/sophus_lie/src/lie_group.rs index f2a1f7b1..8f1e6c2d 100644 --- a/crates/sophus_lie/src/lie_group.rs +++ b/crates/sophus_lie/src/lie_group.rs @@ -1,13 +1,8 @@ use super::traits::IsLieGroupImpl; -use crate::traits::IsLieGroup; +use crate::prelude::*; use approx::assert_relative_eq; use assertables::assert_le_as_result; -use sophus_core::calculus::manifold::traits::TangentImpl; -use sophus_core::linalg::bool_mask::BoolMask; -use sophus_core::linalg::matrix::IsMatrix; -use sophus_core::linalg::scalar::IsScalar; -use sophus_core::linalg::vector::IsVector; -use sophus_core::params::HasParams; +use sophus_core::manifold::traits::TangentImpl; use sophus_core::params::ParamsImpl; use std::fmt::Debug; diff --git a/crates/sophus_lie/src/lie_group_manifold.rs b/crates/sophus_lie/src/lie_group_manifold.rs index d206dd41..bfefb55e 100644 --- a/crates/sophus_lie/src/lie_group_manifold.rs +++ b/crates/sophus_lie/src/lie_group_manifold.rs @@ -1,13 +1,12 @@ use crate::lie_group::LieGroup; +use crate::prelude::*; use crate::traits::IsLieGroupImpl; -use sophus_core::calculus::manifold::traits::IsManifold; -use sophus_core::linalg::scalar::IsScalar; -use sophus_core::params::HasParams; use sophus_core::params::ParamsImpl; use std::fmt::Debug; +/// Left group manifold #[derive(Debug, Clone)] -struct LeftGroupManifold< +pub struct LeftGroupManifold< S: IsScalar, const DOF: usize, const PARAMS: usize, diff --git a/crates/sophus_lie/src/real_lie_group.rs b/crates/sophus_lie/src/real_lie_group.rs index e139bb60..2b7dbfbe 100644 --- a/crates/sophus_lie/src/real_lie_group.rs +++ b/crates/sophus_lie/src/real_lie_group.rs @@ -1,24 +1,19 @@ use super::traits::IsLieGroupImpl; use super::traits::IsRealLieGroupImpl; -use crate::groups::isometry2::Isometry2; -use crate::groups::isometry3::Isometry3; -use crate::groups::rotation2::Rotation2; -use crate::groups::rotation3::Rotation3; use crate::lie_group::LieGroup; +use crate::prelude::*; +use crate::Isometry2; +use crate::Isometry3; +use crate::Rotation2; +use crate::Rotation3; use approx::assert_relative_eq; use nalgebra::SVector; -use sophus_core::calculus::dual::dual_scalar::DualBatchScalar; -use sophus_core::calculus::dual::dual_scalar::DualScalar; -use sophus_core::calculus::maps::matrix_valued_maps::MatrixValuedMapFromVector; -use sophus_core::calculus::maps::vector_valued_maps::VectorValuedMapFromMatrix; -use sophus_core::calculus::maps::vector_valued_maps::VectorValuedMapFromVector; -use sophus_core::linalg::matrix::IsMatrix; -use sophus_core::linalg::scalar::IsRealScalar; -use sophus_core::linalg::scalar::IsScalar; -use sophus_core::linalg::vector::IsVector; +use sophus_core::calculus::dual::DualBatchScalar; +use sophus_core::calculus::dual::DualScalar; +use sophus_core::calculus::maps::MatrixValuedMapFromVector; +use sophus_core::calculus::maps::VectorValuedMapFromMatrix; +use sophus_core::calculus::maps::VectorValuedMapFromVector; use sophus_core::linalg::BatchScalarF64; -use sophus_core::params::HasParams; -use sophus_core::tensor::tensor_view::IsTensorLike; use std::fmt::Display; use std::fmt::Formatter; @@ -136,7 +131,7 @@ macro_rules! def_real_group_test_template { fn adjoint_jacobian_tests() { use crate::traits::IsLieGroup; const DOF: usize = <$group>::DOF; - use sophus_core::calculus::manifold::traits::TangentImpl; + use sophus_core::manifold::traits::TangentImpl; let tangent_examples: Vec<<$scalar as IsScalar<$batch>>::Vector> = <$group>::tangent_examples(); @@ -206,7 +201,7 @@ macro_rules! def_real_group_test_template { const POINT: usize = <$group>::POINT; const PARAMS: usize = <$group>::PARAMS; - use sophus_core::calculus::manifold::traits::TangentImpl; + use sophus_core::manifold::traits::TangentImpl; use sophus_core::points::example_points; for t in <$group>::tangent_examples() { @@ -311,7 +306,6 @@ macro_rules! def_real_group_test_template { for g in Self::element_examples() { // dx log(y) { - use sophus_core::linalg::bool_mask::BoolMask; if g.has_shortest_path_ambiguity().any() { // jacobian not uniquely defined, let's skip these cases continue; @@ -397,7 +391,7 @@ macro_rules! def_real_group_test_template { fn hat_jacobians_tests() { use crate::traits::IsLieGroup; - use sophus_core::calculus::manifold::traits::TangentImpl; + use sophus_core::manifold::traits::TangentImpl; const DOF: usize = <$group>::DOF; const AMBIENT: usize = <$group>::AMBIENT; diff --git a/crates/sophus_lie/src/traits.rs b/crates/sophus_lie/src/traits.rs index 1868816d..63579f1a 100644 --- a/crates/sophus_lie/src/traits.rs +++ b/crates/sophus_lie/src/traits.rs @@ -1,11 +1,7 @@ -use std::fmt::Debug; - -use sophus_core::calculus::manifold::traits::TangentImpl; -use sophus_core::calculus::manifold::{self}; -use sophus_core::linalg::scalar::IsRealScalar; -use sophus_core::linalg::scalar::IsScalar; -use sophus_core::params::HasParams; +use crate::prelude::*; +use sophus_core::manifold::traits::TangentImpl; use sophus_core::params::ParamsImpl; +use std::fmt::Debug; /// Lie Group implementation trait /// @@ -23,11 +19,7 @@ pub trait IsLieGroupImpl< const POINT: usize, const AMBIENT: usize, const BATCH_SIZE: usize, ->: - ParamsImpl - + manifold::traits::TangentImpl - + Clone - + Debug +>: ParamsImpl + TangentImpl + Clone + Debug { /// Generic scalar, real scalar, and dual scalar type GenG>: IsLieGroupImpl; diff --git a/crates/sophus_opt/src/cost.rs b/crates/sophus_opt/src/cost.rs index 3f0ffbea..6277f08f 100644 --- a/crates/sophus_opt/src/cost.rs +++ b/crates/sophus_opt/src/cost.rs @@ -1,12 +1,10 @@ use crate::term::Term; use crate::variables::VarKind; use crate::variables::VarPool; - +use dyn_clone::DynClone; use std::fmt::Debug; use std::ops::AddAssign; -use dyn_clone::DynClone; - /// Evaluated cost pub trait IsCost: Debug + DynClone { /// squared error diff --git a/crates/sophus_opt/src/cost_fn.rs b/crates/sophus_opt/src/cost_fn.rs index af926b5d..9aa4d6ef 100644 --- a/crates/sophus_opt/src/cost_fn.rs +++ b/crates/sophus_opt/src/cost_fn.rs @@ -1,14 +1,12 @@ -use std::marker::PhantomData; - use crate::cost::Cost; -use crate::cost_args::c_from_var_kind; -use crate::variables::VarKind; - use crate::cost::IsCost; +use crate::cost_args::c_from_var_kind; use crate::robust_kernel::RobustKernel; use crate::term::Term; use crate::variables::IsVarTuple; +use crate::variables::VarKind; use crate::variables::VarPool; +use std::marker::PhantomData; /// Signature of a term of a cost function pub trait IsTermSignature { @@ -99,7 +97,7 @@ where ResidualFn: IsResidualFn + 'static, { /// create a new cost function from a signature and a residual function - pub fn new( + pub fn new_box( signature: CostSignature, residual_fn: ResidualFn, ) -> Box { diff --git a/crates/sophus_opt/src/example_problems/cam_calib.rs b/crates/sophus_opt/src/example_problems/cam_calib.rs index a5f901f0..59ca19a6 100644 --- a/crates/sophus_opt/src/example_problems/cam_calib.rs +++ b/crates/sophus_opt/src/example_problems/cam_calib.rs @@ -6,18 +6,17 @@ use crate::example_problems::cost_fn::reprojection::ReprojTermSignature; use crate::example_problems::cost_fn::reprojection::ReprojectionCostFn; use crate::nlls::optimize; use crate::nlls::OptParams; +use crate::prelude::*; use crate::robust_kernel::HuberKernel; use crate::variables::VarFamily; use crate::variables::VarKind; use crate::variables::VarPoolBuilder; use sophus_core::linalg::MatF64; use sophus_core::linalg::VecF64; -use sophus_image::image_view::ImageSize; -use sophus_lie::groups::isometry3::Isometry3; -use sophus_lie::groups::rotation3::Rotation3; -use sophus_sensor::camera_enum::perspective_camera::PinholeCamera; - -use sophus_lie::traits::IsTranslationProductGroup; +use sophus_image::ImageSize; +use sophus_lie::Isometry3; +use sophus_lie::Rotation3; +use sophus_sensor::PinholeCamera; use std::collections::HashMap; /// Camera calibration problem @@ -234,8 +233,8 @@ impl CamCalibProblem { let up_var_pool = optimize( var_pool, vec![ - CostFn::new(priors.clone(), Isometry3PriorCostFn {}), - CostFn::new(reproj_obs.clone(), ReprojectionCostFn {}), + CostFn::new_box(priors.clone(), Isometry3PriorCostFn {}), + CostFn::new_box(reproj_obs.clone(), ReprojectionCostFn {}), ], OptParams { num_iter: 5, diff --git a/crates/sophus_opt/src/example_problems/cost_fn/isometry2_prior.rs b/crates/sophus_opt/src/example_problems/cost_fn/isometry2_prior.rs index a48ce977..9151f30a 100644 --- a/crates/sophus_opt/src/example_problems/cost_fn/isometry2_prior.rs +++ b/crates/sophus_opt/src/example_problems/cost_fn/isometry2_prior.rs @@ -1,18 +1,15 @@ use crate::cost_fn::IsResidualFn; use crate::cost_fn::IsTermSignature; +use crate::prelude::*; use crate::robust_kernel; use crate::term::MakeTerm; use crate::term::Term; use crate::variables::VarKind; -use sophus_core::calculus::dual::dual_scalar::DualScalar; -use sophus_core::calculus::dual::dual_vector::DualVector; -use sophus_core::calculus::maps::vector_valued_maps::VectorValuedMapFromVector; -use sophus_core::linalg::scalar::IsScalar; -use sophus_core::linalg::scalar::IsSingleScalar; -use sophus_core::linalg::vector::IsVector; +use sophus_core::calculus::dual::DualScalar; +use sophus_core::calculus::dual::DualVector; +use sophus_core::calculus::maps::VectorValuedMapFromVector; use sophus_core::linalg::VecF64; -use sophus_core::params::HasParams; -use sophus_lie::groups::isometry2::Isometry2; +use sophus_lie::Isometry2; /// Cost function for a prior on an 2d isometry #[derive(Copy, Clone)] diff --git a/crates/sophus_opt/src/example_problems/cost_fn/isometry3_prior.rs b/crates/sophus_opt/src/example_problems/cost_fn/isometry3_prior.rs index a5ed7c53..df41e480 100644 --- a/crates/sophus_opt/src/example_problems/cost_fn/isometry3_prior.rs +++ b/crates/sophus_opt/src/example_problems/cost_fn/isometry3_prior.rs @@ -1,19 +1,16 @@ use crate::cost_fn::IsResidualFn; use crate::cost_fn::IsTermSignature; +use crate::prelude::*; use crate::robust_kernel; use crate::term::MakeTerm; use crate::term::Term; use crate::variables::VarKind; -use sophus_core::calculus::dual::dual_scalar::DualScalar; -use sophus_core::calculus::dual::dual_vector::DualVector; -use sophus_core::calculus::maps::vector_valued_maps::VectorValuedMapFromVector; -use sophus_core::linalg::scalar::IsScalar; -use sophus_core::linalg::scalar::IsSingleScalar; -use sophus_core::linalg::vector::IsVector; +use sophus_core::calculus::dual::DualScalar; +use sophus_core::calculus::dual::DualVector; +use sophus_core::calculus::maps::VectorValuedMapFromVector; use sophus_core::linalg::MatF64; use sophus_core::linalg::VecF64; -use sophus_core::params::HasParams; -use sophus_lie::groups::isometry3::Isometry3; +use sophus_lie::Isometry3; /// Cost function for a prior on an 3d isometry #[derive(Copy, Clone)] diff --git a/crates/sophus_opt/src/example_problems/cost_fn/pose_graph.rs b/crates/sophus_opt/src/example_problems/cost_fn/pose_graph.rs index bed652b7..b0556bf1 100644 --- a/crates/sophus_opt/src/example_problems/cost_fn/pose_graph.rs +++ b/crates/sophus_opt/src/example_problems/cost_fn/pose_graph.rs @@ -1,11 +1,11 @@ use crate::cost_fn::IsResidualFn; use crate::cost_fn::IsTermSignature; +use crate::prelude::*; use crate::robust_kernel; use crate::term::MakeTerm; use crate::term::Term; use crate::variables::VarKind; -use sophus_core::linalg::scalar::IsSingleScalar; -use sophus_lie::groups::isometry2::Isometry2; +use sophus_lie::Isometry2; /// residual function for a pose-pose constraint pub fn res_fn( diff --git a/crates/sophus_opt/src/example_problems/cost_fn/reprojection.rs b/crates/sophus_opt/src/example_problems/cost_fn/reprojection.rs index d3491a47..86139b30 100644 --- a/crates/sophus_opt/src/example_problems/cost_fn/reprojection.rs +++ b/crates/sophus_opt/src/example_problems/cost_fn/reprojection.rs @@ -1,19 +1,17 @@ use crate::cost_fn::IsResidualFn; use crate::cost_fn::IsTermSignature; +use crate::prelude::*; use crate::robust_kernel; use crate::term::MakeTerm; use crate::term::Term; use crate::variables::IsVariable; use crate::variables::VarKind; -use sophus_core::calculus::dual::dual_scalar::DualScalar; -use sophus_core::calculus::dual::dual_vector::DualVector; -use sophus_core::calculus::maps::vector_valued_maps::VectorValuedMapFromVector; -use sophus_core::linalg::scalar::IsScalar; -use sophus_core::linalg::scalar::IsSingleScalar; -use sophus_core::linalg::vector::IsVector; +use sophus_core::calculus::dual::DualScalar; +use sophus_core::calculus::dual::DualVector; +use sophus_core::calculus::maps::VectorValuedMapFromVector; use sophus_core::linalg::VecF64; -use sophus_lie::groups::isometry3::Isometry3; -use sophus_sensor::camera_enum::perspective_camera::PinholeCamera; +use sophus_lie::Isometry3; +use sophus_sensor::PinholeCamera; /// Camera re-projection cost function #[derive(Copy, Clone)] @@ -34,9 +32,7 @@ fn res_fn>( point_in_world: Scalar::Vector<3>, uv_in_image: Scalar::Vector<2>, ) -> Scalar::Vector<2> { - let point_in_cam = world_from_camera - .inverse() - .transform(&point_in_world.vector()); + let point_in_cam = world_from_camera.inverse().transform(&point_in_world); uv_in_image - intrinscs.cam_proj(&point_in_cam) } diff --git a/crates/sophus_opt/src/example_problems/pose_circle.rs b/crates/sophus_opt/src/example_problems/pose_circle.rs index dddd656c..1d3d5ddb 100644 --- a/crates/sophus_opt/src/example_problems/pose_circle.rs +++ b/crates/sophus_opt/src/example_problems/pose_circle.rs @@ -1,19 +1,18 @@ +use super::cost_fn::pose_graph::PoseGraphCostFn; use crate::cost_fn::CostFn; use crate::cost_fn::CostSignature; use crate::example_problems::cost_fn::pose_graph::PoseGraphCostTermSignature; -use crate::nlls::*; +use crate::nlls::optimize; +use crate::nlls::OptParams; +use crate::prelude::*; use crate::variables::VarFamily; use crate::variables::VarKind; use crate::variables::VarPool; use crate::variables::VarPoolBuilder; - -use sophus_core::linalg::vector::IsVector; use sophus_core::linalg::VecF64; -use sophus_lie::groups::isometry2::Isometry2; +use sophus_lie::Isometry2; use std::collections::HashMap; -use super::cost_fn::pose_graph::PoseGraphCostFn; - /// Pose graph example problem #[derive(Debug, Clone)] pub struct PoseCircleProblem { @@ -106,7 +105,7 @@ impl PoseCircleProblem { } /// Calculate the error of the current estimate - pub fn calc_error(&self, est_world_from_robot: &Vec>) -> f64 { + pub fn calc_error(&self, est_world_from_robot: &[Isometry2]) -> f64 { let mut res_err = 0.0; for obs in self.obs_pose_a_from_pose_b_poses.terms.clone() { let residual = super::cost_fn::pose_graph::res_fn( @@ -135,7 +134,7 @@ impl PoseCircleProblem { optimize( var_pool, - vec![CostFn::new( + vec![CostFn::new_box( self.obs_pose_a_from_pose_b_poses.clone(), PoseGraphCostFn {}, )], diff --git a/crates/sophus_opt/src/example_problems/simple_prior.rs b/crates/sophus_opt/src/example_problems/simple_prior.rs index b1180ada..50ef01f4 100644 --- a/crates/sophus_opt/src/example_problems/simple_prior.rs +++ b/crates/sophus_opt/src/example_problems/simple_prior.rs @@ -6,14 +6,14 @@ use crate::example_problems::cost_fn::isometry3_prior::Isometry3PriorCostFn; use crate::example_problems::cost_fn::isometry3_prior::Isometry3PriorTermSignature; use crate::nlls::optimize; use crate::nlls::OptParams; +use crate::prelude::*; use crate::variables::VarFamily; use crate::variables::VarKind; use crate::variables::VarPoolBuilder; -use sophus_core::linalg::vector::IsVector; use sophus_core::linalg::MatF64; use sophus_core::linalg::VecF64; -use sophus_lie::groups::isometry2::Isometry2; -use sophus_lie::groups::isometry3::Isometry3; +use sophus_lie::Isometry2; +use sophus_lie::Isometry3; /// Simple 2D isometry prior problem pub struct SimpleIso2PriorProblem { @@ -65,7 +65,7 @@ impl SimpleIso2PriorProblem { let up_families = optimize( families, - vec![CostFn::new( + vec![CostFn::new_box( obs_pose_a_from_pose_b_poses.clone(), Isometry2PriorCostFn {}, )], @@ -134,7 +134,7 @@ impl SimpleIso3PriorProblem { let up_families = optimize( families, - vec![CostFn::new( + vec![CostFn::new_box( obs_pose_a_from_pose_b_poses.clone(), Isometry3PriorCostFn {}, )], diff --git a/crates/sophus_opt/src/lib.rs b/crates/sophus_opt/src/lib.rs index 3800d6ce..2396f0a6 100644 --- a/crates/sophus_opt/src/lib.rs +++ b/crates/sophus_opt/src/lib.rs @@ -1,7 +1,8 @@ #![feature(portable_simd)] #![deny(missing_docs)] +#![allow(clippy::needless_range_loop)] -//! # Non-linear least squares optimization module +//! # Non-linear least squares optimization crate - part of the sophus-rs project /// Block vector and matrix operations pub mod block; @@ -25,3 +26,11 @@ pub mod solvers; pub mod term; /// Decision variables pub mod variables; + +/// Sophus optimization prelude +pub mod prelude { + pub use crate::robust_kernel::IsRobustKernel; + pub use sophus_image::prelude::*; + pub use sophus_lie::prelude::*; + pub use sophus_sensor::prelude::*; +} diff --git a/crates/sophus_opt/src/nlls.rs b/crates/sophus_opt/src/nlls.rs index de848943..ed1856ae 100644 --- a/crates/sophus_opt/src/nlls.rs +++ b/crates/sophus_opt/src/nlls.rs @@ -1,9 +1,7 @@ use crate::cost::IsCost; -use crate::solvers::solve; - use crate::cost_fn::IsCostFn; +use crate::solvers::solve; use crate::variables::VarPool; - use std::fmt::Debug; /// Optimization parameters diff --git a/crates/sophus_opt/src/term.rs b/crates/sophus_opt/src/term.rs index 8911e2b8..c24c9f70 100644 --- a/crates/sophus_opt/src/term.rs +++ b/crates/sophus_opt/src/term.rs @@ -1,7 +1,7 @@ use crate::block::BlockVector; use crate::block::NewBlockMatrix; +use crate::prelude::*; use crate::robust_kernel; -use crate::robust_kernel::IsRobustKernel; use crate::variables::VarKind; use sophus_core::linalg::MatF64; use sophus_core::linalg::VecF64; @@ -161,10 +161,10 @@ impl< ) { if idx == j { if let Some(self_0) = self.0 { - if precision_mat.is_none() { - hessian.set_block(i, j, lhs.transpose() * self_0); + if let Some(precision) = precision_mat { + hessian.set_block(i, j, lhs.transpose() * precision * self_0); } else { - hessian.set_block(i, j, lhs.transpose() * precision_mat.unwrap() * self_0); + hessian.set_block(i, j, lhs.transpose() * self_0); } } } else { diff --git a/crates/sophus_opt/src/variables.rs b/crates/sophus_opt/src/variables.rs index 440625b5..3c9e5467 100644 --- a/crates/sophus_opt/src/variables.rs +++ b/crates/sophus_opt/src/variables.rs @@ -1,8 +1,8 @@ +use crate::prelude::*; use dyn_clone::DynClone; use sophus_core::linalg::VecF64; -use sophus_core::params::HasParams; -use sophus_lie::groups::isometry2::Isometry2; -use sophus_lie::groups::isometry3::Isometry3; +use sophus_lie::Isometry2; +use sophus_lie::Isometry3; use std::collections::BTreeMap; use std::collections::HashMap; use std::fmt::Debug; @@ -137,7 +137,7 @@ impl IsVariable for Isometry2 { fn update(&mut self, delta: nalgebra::DVectorView) { let mut delta_vec = VecF64::<3>::zeros(); - for d in 0..Self::DOF { + for d in 0..::DOF { delta_vec[d] = delta[d]; } self.set_params( @@ -152,7 +152,7 @@ impl IsVariable for Isometry3 { fn update(&mut self, delta: nalgebra::DVectorView) { let mut delta_vec = VecF64::<6>::zeros(); - for d in 0..Self::DOF { + for d in 0..::DOF { delta_vec[d] = delta[d]; } self.set_params( @@ -224,6 +224,11 @@ pub trait IsVarFamily: as_any::AsAny + Debug + DynClone { /// number of members in the family fn len(&self) -> usize; + /// is empty + fn is_empty(&self) -> bool { + self.len() == 0 + } + /// returns 0 if variable is conditioned, DOF otherwise fn free_or_marg_dof(&self) -> usize; diff --git a/crates/sophus_pyo3/Cargo.toml b/crates/sophus_pyo3/Cargo.toml index 02bbda7b..be6b9714 100644 --- a/crates/sophus_pyo3/Cargo.toml +++ b/crates/sophus_pyo3/Cargo.toml @@ -20,4 +20,4 @@ numpy.workspace = true [dependencies.pyo3] version = "0.21.0" # "abi3-py38" tells pyo3 (and maturin) to build using the stable ABI with minimum Python version 3.8 -features = ["abi3-py38", "multiple-pymethods"] +features = ["abi3-py38"] diff --git a/crates/sophus_pyo3/src/lib.rs b/crates/sophus_pyo3/src/lib.rs index 0788058a..c05b8f3e 100644 --- a/crates/sophus_pyo3/src/lib.rs +++ b/crates/sophus_pyo3/src/lib.rs @@ -1,6 +1,5 @@ -#![feature(portable_simd)] #![deny(missing_docs)] -//! # Pyo3 module +//! pyo3 bindings for sophus-rs /// python wrapper pub mod pyo3; @@ -15,7 +14,7 @@ use numpy::pyo3::prelude::*; /// the `lib.name` setting in the `Cargo.toml`, else Python will not be able to /// import the module. #[pymodule] -fn sophus_pyo3(_py: Python, m: &PyModule) -> PyResult<()> { +fn sophus_pyo3(m: &Bound<'_, PyModule>) -> PyResult<()> { m.add_class::()?; m.add_class::()?; m.add_class::()?; diff --git a/crates/sophus_pyo3/src/pyo3/errors.rs b/crates/sophus_pyo3/src/pyo3/errors.rs index 856600fa..891a3b35 100644 --- a/crates/sophus_pyo3/src/pyo3/errors.rs +++ b/crates/sophus_pyo3/src/pyo3/errors.rs @@ -1,5 +1,8 @@ use numpy::PyArray1; +use numpy::PyArrayMethods; +use numpy::PyUntypedArrayMethods; use pyo3::exceptions::PyOSError; +use pyo3::Bound; use pyo3::PyErr; use std::fmt; @@ -32,12 +35,12 @@ impl std::convert::From for PyErr { /// Check if array has expected dimension pub fn check_array1_dim_impl( - array: &PyArray1, + array: &Bound>, expected: usize, file: &'static str, line: u32, ) -> Result<(), PyArray1DimMismatch> { - if array.len() == expected { + if array.readonly().len() == expected { Ok(()) } else { Err(PyArray1DimMismatch { diff --git a/crates/sophus_pyo3/src/pyo3/lie_groups.rs b/crates/sophus_pyo3/src/pyo3/lie_groups.rs index 0757a866..0c12b0a0 100644 --- a/crates/sophus_pyo3/src/pyo3/lie_groups.rs +++ b/crates/sophus_pyo3/src/pyo3/lie_groups.rs @@ -2,16 +2,19 @@ use crate::pyo3::errors::check_array1_dim_impl; use crate::pyo3::errors::PyArray1DimMismatch; use numpy::PyArray1; use numpy::PyArray2; +use numpy::PyArrayMethods; use pyo3::pyclass; use pyo3::pymethods; +use pyo3::Bound; use pyo3::Py; +use pyo3::PyRef; +use pyo3::PyRefMut; use pyo3::Python; -use sophus_core::params::HasParams; -use sophus_lie::groups::isometry2::Isometry2; -use sophus_lie::groups::isometry3::Isometry3; -use sophus_lie::groups::rotation2::Rotation2; -use sophus_lie::groups::rotation3::Rotation3; -use sophus_lie::traits::IsTranslationProductGroup; +use sophus_lie::prelude::*; +use sophus_lie::Isometry2; +use sophus_lie::Isometry3; +use sophus_lie::Rotation2; +use sophus_lie::Rotation3; macro_rules! check_array1_dim { ($array:expr, $expected:expr) => { @@ -34,7 +37,7 @@ macro_rules! crate_py_lie_group_class { impl $py_group { #[staticmethod] fn ad( - tangent: &PyArray1, + tangent: &Bound>, py: Python, ) -> Result>, PyArray1DimMismatch> { check_array1_dim!(tangent, $dof)?; @@ -43,50 +46,50 @@ macro_rules! crate_py_lie_group_class { let tangent_vec = nalgebra::SVector::::from_column_slice(tangent_slice); Ok( - PyArray1::from_slice(py, <$rust_group>::ad(&tangent_vec).as_slice()) + PyArray1::from_slice_bound(py, <$rust_group>::ad(&tangent_vec).as_slice()) .reshape([$ambient, $ambient]) .unwrap() - .to_owned(), + .into(), ) } fn adj(&self, py: Python) -> Py> { let adj = self.inner.adj(); - PyArray1::from_slice(py, adj.as_slice()) + PyArray1::from_slice_bound(py, adj.as_slice()) .reshape([$dof, $dof]) .unwrap() - .to_owned() + .into() } fn compact(&self, py: Python) -> Py> { let compact = self.inner.compact(); - PyArray1::from_slice(py, compact.as_slice()) + PyArray1::from_slice_bound(py, compact.as_slice()) .reshape([$point, $ambient]) .unwrap() - .to_owned() + .into() } #[staticmethod] fn da_a_mul_b(a: &Self, b: &Self, py: Python) -> Py> { let result = <$rust_group>::da_a_mul_b(&a.inner, &b.inner); - PyArray1::from_slice(py, result.as_slice()) + PyArray1::from_slice_bound(py, result.as_slice()) .reshape([$params, $params]) .unwrap() - .to_owned() + .into() } #[staticmethod] fn db_a_mul_b(a: &Self, b: &Self, py: Python) -> Py> { let result = <$rust_group>::db_a_mul_b(&a.inner, &b.inner); - PyArray1::from_slice(py, result.as_slice()) + PyArray1::from_slice_bound(py, result.as_slice()) .reshape([$params, $params]) .unwrap() - .to_owned() + .into() } #[staticmethod] fn dx_exp( - tangent: &PyArray1, + tangent: &Bound>, py: Python, ) -> Result>, PyArray1DimMismatch> { check_array1_dim!(tangent, $dof)?; @@ -95,24 +98,24 @@ macro_rules! crate_py_lie_group_class { let tangent_vec = nalgebra::SVector::::from_column_slice(tangent_slice); let result = <$rust_group>::dx_exp(&tangent_vec); - Ok(PyArray1::from_slice(py, result.as_slice()) + Ok(PyArray1::from_slice_bound(py, result.as_slice()) .reshape([$params, $dof]) .unwrap() - .to_owned()) + .into()) } #[staticmethod] fn dx_exp_x_at_0(py: Python) -> Py> { let result = <$rust_group>::dx_exp_x_at_0(); - PyArray1::from_slice(py, result.as_slice()) + PyArray1::from_slice_bound(py, result.as_slice()) .reshape([$params, $dof]) .unwrap() - .to_owned() + .into() } #[staticmethod] fn dx_exp_x_times_point_at_0( - point: &PyArray1, + point: &Bound>, py: Python, ) -> Result>, PyArray1DimMismatch> { check_array1_dim!(point, $point)?; @@ -121,24 +124,24 @@ macro_rules! crate_py_lie_group_class { let point_vec = nalgebra::SVector::::from_column_slice(point_slice); let result = <$rust_group>::dx_exp_x_times_point_at_0(point_vec); - Ok(PyArray1::from_slice(py, result.as_slice()) + Ok(PyArray1::from_slice_bound(py, result.as_slice()) .reshape([$params, $point]) .unwrap() - .to_owned()) + .into()) } #[staticmethod] fn dx_log_a_exp_x_b_at_0(a: &Self, b: &Self, py: Python) -> Py> { let result = <$rust_group>::dx_log_a_exp_x_b_at_0(&a.inner, &b.inner); - PyArray1::from_slice(py, result.as_slice()) + PyArray1::from_slice_bound(py, result.as_slice()) .reshape([$dof, $dof]) .unwrap() - .to_owned() + .into() } #[staticmethod] fn dx_log_x( - params: &PyArray1, + params: &Bound>, py: Python, ) -> Result>, PyArray1DimMismatch> { check_array1_dim!(params, $params)?; @@ -146,14 +149,14 @@ macro_rules! crate_py_lie_group_class { let params_slice = read_only_params.as_slice().unwrap(); let params_vec = nalgebra::SVector::::from_column_slice(params_slice); let result = <$rust_group>::dx_log_x(¶ms_vec); - Ok(PyArray1::from_slice(py, result.as_slice()) + Ok(PyArray1::from_slice_bound(py, result.as_slice()) .reshape([$dof, $params]) .unwrap() - .to_owned()) + .into()) } #[staticmethod] - fn exp(tangent: &PyArray1) -> Result { + fn exp(tangent: &Bound>) -> Result { check_array1_dim!(tangent, $dof)?; let read_only_tangent = tangent.readonly(); let tangent_slice = read_only_tangent.as_slice().unwrap(); @@ -164,7 +167,7 @@ macro_rules! crate_py_lie_group_class { } #[staticmethod] - fn from_params(params: &PyArray1) -> Result { + fn from_params(params: &Bound>) -> Result { check_array1_dim!(params, $params)?; let read_only_params = params.readonly(); let params_slice = read_only_params.as_slice().unwrap(); @@ -183,7 +186,7 @@ macro_rules! crate_py_lie_group_class { #[staticmethod] fn hat( - omega: &PyArray1, + omega: &Bound>, py: Python, ) -> Result>, PyArray1DimMismatch> { check_array1_dim!(omega, $dof)?; @@ -192,10 +195,10 @@ macro_rules! crate_py_lie_group_class { let omega_vec = nalgebra::SVector::::from_column_slice(omega_slice); let result = <$rust_group>::hat(&omega_vec); - Ok(PyArray1::from_slice(py, result.as_slice()) + Ok(PyArray1::from_slice_bound(py, result.as_slice()) .reshape([$ambient, $ambient]) .unwrap() - .to_owned()) + .into()) } #[new] @@ -213,23 +216,26 @@ macro_rules! crate_py_lie_group_class { fn log(&self, py: Python) -> Py> { let log = self.inner.log(); - PyArray1::from_slice(py, log.as_slice()).to_owned() + PyArray1::from_slice_bound(py, log.as_slice()).into() } fn matrix(&self, py: Python) -> Py> { let matrix = self.inner.matrix(); - PyArray1::from_slice(py, matrix.as_slice()) + PyArray1::from_slice_bound(py, matrix.as_slice()) .reshape([$ambient, $ambient]) .unwrap() - .to_owned() + .into() } fn params(&self, py: Python) -> Py> { let params = self.inner.params(); - PyArray1::from_slice(py, params.as_slice()).to_owned() + PyArray1::from_slice_bound(py, params.as_slice()).into() } - fn set_params(&mut self, params: &PyArray1) -> Result<(), PyArray1DimMismatch> { + fn set_params( + &mut self, + params: &Bound>, + ) -> Result<(), PyArray1DimMismatch> { check_array1_dim!(params, $params)?; let read_only_params = params.readonly(); let params_slice = read_only_params.as_slice().unwrap(); @@ -242,7 +248,7 @@ macro_rules! crate_py_lie_group_class { #[staticmethod] fn to_ambient( - point: &PyArray1, + point: &Bound>, py: Python, ) -> Result>, PyArray1DimMismatch> { check_array1_dim!(point, $point)?; @@ -251,13 +257,13 @@ macro_rules! crate_py_lie_group_class { let point_vec = nalgebra::SVector::::from_column_slice(point_slice); let result = <$rust_group>::to_ambient(&point_vec); - Ok(PyArray1::from_slice(py, result.as_slice()).to_owned()) + Ok(PyArray1::from_slice_bound(py, result.as_slice()).into()) } fn transform( &self, py: Python, - point: &PyArray1, + point: &Bound>, ) -> Result>, PyArray1DimMismatch> { check_array1_dim!(point, $point)?; let read_only_point = point.readonly(); @@ -265,12 +271,15 @@ macro_rules! crate_py_lie_group_class { let point_vec = nalgebra::SVector::::from_column_slice(point_slice); let result = self.inner.transform(&point_vec); - Ok(PyArray1::from_slice(py, result.fixed_rows::<$point>(0).as_slice()).to_owned()) + Ok( + PyArray1::from_slice_bound(py, result.fixed_rows::<$point>(0).as_slice()) + .into(), + ) } #[staticmethod] fn vee( - omega_hat: &PyArray2, + omega_hat: &Bound>, py: Python, ) -> Result>, PyArray1DimMismatch> { let omega_hat = omega_hat.readonly(); @@ -280,7 +289,7 @@ macro_rules! crate_py_lie_group_class { ); let result = <$rust_group>::vee(&omega_hat_mat); - Ok(PyArray1::from_slice(py, result.as_slice()).to_owned()) + Ok(PyArray1::from_slice_bound(py, result.as_slice()).into()) } fn __mul__(&self, other: &$py_group) -> Self { @@ -297,67 +306,112 @@ macro_rules! crate_py_lie_group_class { } crate_py_lie_group_class!(PyRotation2, Rotation2::, "Rotation2", 1, 2, 2, 2); -crate_py_lie_group_class!(PyIsometry2, Isometry2::, "Isometry2", 3, 4, 2, 3); +crate_py_lie_group_class!( + PyBaseIsometry2, + Isometry2::, + "BaseIsometry2", + 3, + 4, + 2, + 3 +); crate_py_lie_group_class!(PyRotation3, Rotation3::, "Rotation3", 3, 4, 3, 3); -crate_py_lie_group_class!(PyIsometry3, Isometry3::, "Isometry3", 6, 7, 3, 4); +crate_py_lie_group_class!( + PyBaseIsometry3, + Isometry3::, + "BaseIsometry3", + 6, + 7, + 3, + 4 +); macro_rules! augment_py_product_group_class { - ($py_product_group: ident, $rust_group:ty, $py_factor_group: ident, $point:literal) => { - // second pymethods block, requires the "multiple-pymethods" feature + ( + $py_base: ident, + $py_product_group: ident, + $rust_group:ty, + $py_factor_group: ident, + $name: literal, + $point:literal + ) => { + /// Python wrapper for python group + #[pyclass(name = $name, extends=$py_base)] + #[derive(Debug, Clone)] + pub struct $py_product_group {} + #[pymethods] impl $py_product_group { - #[staticmethod] + #[new] fn from_translation_and_rotation( - translation: &PyArray1, + translation: &Bound>, rotation: $py_factor_group, - ) -> Result { + ) -> Result<(Self, $py_base), PyArray1DimMismatch> { check_array1_dim!(translation, $point)?; let read_only_translation = translation.readonly(); let translation_slice = read_only_translation.as_slice().unwrap(); let translation_vec = nalgebra::SVector::::from_column_slice(translation_slice); - Ok(Self { - inner: <$rust_group>::from_translation_and_rotation( - &translation_vec, - &rotation.inner, - ), - }) + Ok(( + Self {}, + $py_base { + inner: <$rust_group>::from_translation_and_rotation( + &translation_vec, + &rotation.inner, + ), + }, + )) } - fn translation(&self, py: Python) -> Py> { - let translation = self.inner.translation(); - PyArray1::from_slice(py, translation.as_slice()).to_owned() + fn translation<'a>(self_: PyRef<'_, Self>, py: Python<'a>) -> Bound<'a, PyArray1> { + let super_ = self_.as_ref(); + let translation = super_.inner.translation(); + PyArray1::from_slice_bound(py, translation.as_slice()) } - fn rotation(&self) -> $py_factor_group { + fn rotation(self_: PyRef<'_, Self>) -> $py_factor_group { + let super_ = self_.as_ref(); $py_factor_group { - inner: self.inner.rotation(), + inner: super_.inner.rotation(), } } fn set_translation( - &mut self, - translation: &PyArray1, + mut self_: PyRefMut<'_, Self>, + translation: &Bound>, ) -> Result<(), PyArray1DimMismatch> { check_array1_dim!(translation, $point)?; + let super_ = self_.as_mut(); let read_only_translation = translation.readonly(); let translation_slice = read_only_translation.as_slice().unwrap(); let translation_vec = nalgebra::SVector::::from_column_slice(translation_slice); - self.inner.set_translation(&translation_vec); + super_.inner.set_translation(&translation_vec); Ok(()) } - fn set_rotation(&mut self, rotation: $py_factor_group) { - self.inner.set_rotation(&rotation.inner); + fn set_rotation(mut self_: PyRefMut<'_, Self>, rotation: $py_factor_group) { + let super_ = self_.as_mut(); + super_.inner.set_rotation(&rotation.inner); } } }; } -augment_py_product_group_class!(PyIsometry2, Isometry2, PyRotation2, 2); -augment_py_product_group_class!(PyIsometry3, Isometry3, PyRotation3, 3); +augment_py_product_group_class!( + PyBaseIsometry2, + PyIsometry2, + Isometry2, + PyRotation2, + "Isometry2", + 2); +augment_py_product_group_class!( + PyBaseIsometry3, + PyIsometry3, + Isometry3, + PyRotation3, + "Isometry3",3); diff --git a/crates/sophus_sensor/src/camera.rs b/crates/sophus_sensor/src/camera.rs index c68049a2..7e34b231 100644 --- a/crates/sophus_sensor/src/camera.rs +++ b/crates/sophus_sensor/src/camera.rs @@ -1,8 +1,6 @@ use super::traits::IsCameraDistortionImpl; -use super::traits::IsProjection; -use sophus_core::linalg::bool_mask::BoolMask; -use sophus_core::linalg::scalar::IsScalar; -use sophus_image::image_view::ImageSize; +use crate::prelude::*; +use sophus_image::ImageSize; /// A generic camera model #[derive(Debug, Copy, Clone)] diff --git a/crates/sophus_sensor/src/camera_enum.rs b/crates/sophus_sensor/src/camera_enum.rs index 58bf8ac9..8a3ad99a 100644 --- a/crates/sophus_sensor/src/camera_enum.rs +++ b/crates/sophus_sensor/src/camera_enum.rs @@ -1,4 +1,7 @@ /// general camera - either perspective or orthographic pub mod general_camera; +pub use crate::camera_enum::general_camera::GeneralCameraEnum; + /// perspective camera pub mod perspective_camera; +pub use crate::camera_enum::perspective_camera::PerspectiveCameraEnum; diff --git a/crates/sophus_sensor/src/camera_enum/general_camera.rs b/crates/sophus_sensor/src/camera_enum/general_camera.rs index 82efd432..38ee194d 100644 --- a/crates/sophus_sensor/src/camera_enum/general_camera.rs +++ b/crates/sophus_sensor/src/camera_enum/general_camera.rs @@ -1,8 +1,7 @@ use crate::camera_enum::perspective_camera::PerspectiveCameraEnum; +use crate::prelude::*; use crate::projections::orthographic::OrthographicCamera; -use crate::traits::IsCameraEnum; -use sophus_core::linalg::scalar::IsScalar; -use sophus_image::image_view::ImageSize; +use sophus_image::ImageSize; /// Generalized camera enum #[derive(Debug, Clone)] diff --git a/crates/sophus_sensor/src/camera_enum/perspective_camera.rs b/crates/sophus_sensor/src/camera_enum/perspective_camera.rs index a1971b82..dfad5a3a 100644 --- a/crates/sophus_sensor/src/camera_enum/perspective_camera.rs +++ b/crates/sophus_sensor/src/camera_enum/perspective_camera.rs @@ -1,19 +1,16 @@ -use crate::camera::Camera; use crate::distortions::affine::AffineDistortionImpl; use crate::distortions::kannala_brandt::KannalaBrandtDistortionImpl; -use crate::projections::perspective::PerspectiveProjection; -use crate::traits::IsCameraEnum; -use crate::traits::IsPerspectiveCameraEnum; -use sophus_core::linalg::scalar::IsScalar; -use sophus_core::linalg::vector::IsVector; -use sophus_image::image_view::ImageSize; +use crate::prelude::*; +use crate::projections::perspective::PerspectiveProjectionImpl; +use crate::Camera; +use sophus_image::ImageSize; /// Pinhole camera pub type PinholeCamera = - Camera, PerspectiveProjection>; + Camera, PerspectiveProjectionImpl>; /// Kannala-Brandt camera pub type KannalaBrandtCamera = - Camera, PerspectiveProjection>; + Camera, PerspectiveProjectionImpl>; /// Perspective camera enum #[derive(Debug, Clone)] diff --git a/crates/sophus_sensor/src/distortion_table.rs b/crates/sophus_sensor/src/distortion_table.rs index 7f0feec4..074efa60 100644 --- a/crates/sophus_sensor/src/distortion_table.rs +++ b/crates/sophus_sensor/src/distortion_table.rs @@ -1,8 +1,7 @@ -use crate::dyn_camera::DynCamera; +use crate::prelude::*; +use crate::DynCamera; use nalgebra::SVector; -use sophus_core::calculus::region::IsRegion; -use sophus_core::calculus::region::Region; -use sophus_core::linalg::vector::IsVector; +use sophus_core::calculus::Region; use sophus_core::linalg::VecF64; use sophus_image::arc_image::ArcImage2F32; use sophus_image::image_view::IsImageView; diff --git a/crates/sophus_sensor/src/distortions/affine.rs b/crates/sophus_sensor/src/distortions/affine.rs index b7ab9e46..56be394d 100644 --- a/crates/sophus_sensor/src/distortions/affine.rs +++ b/crates/sophus_sensor/src/distortions/affine.rs @@ -1,8 +1,5 @@ +use crate::prelude::*; use crate::traits::IsCameraDistortionImpl; -use sophus_core::linalg::bool_mask::BoolMask; -use sophus_core::linalg::matrix::IsMatrix; -use sophus_core::linalg::scalar::IsScalar; -use sophus_core::linalg::vector::IsVector; use sophus_core::params::ParamsImpl; use std::marker::PhantomData; diff --git a/crates/sophus_sensor/src/distortions/kannala_brandt.rs b/crates/sophus_sensor/src/distortions/kannala_brandt.rs index bf412869..d3a084b5 100644 --- a/crates/sophus_sensor/src/distortions/kannala_brandt.rs +++ b/crates/sophus_sensor/src/distortions/kannala_brandt.rs @@ -1,8 +1,5 @@ +use crate::prelude::*; use crate::traits::IsCameraDistortionImpl; -use sophus_core::linalg::bool_mask::BoolMask; -use sophus_core::linalg::matrix::IsMatrix; -use sophus_core::linalg::scalar::IsScalar; -use sophus_core::linalg::vector::IsVector; use sophus_core::params::ParamsImpl; use std::marker::PhantomData; @@ -158,7 +155,7 @@ impl, const BATCH: usize> IsCameraDistortionImpl(4); + let k = params.get_fixed_subvec::<4>(4); let radius_sq = a.clone() * a.clone() + b.clone() * b.clone(); diff --git a/crates/sophus_sensor/src/dyn_camera.rs b/crates/sophus_sensor/src/dyn_camera.rs index 7ead2090..74ca7386 100644 --- a/crates/sophus_sensor/src/dyn_camera.rs +++ b/crates/sophus_sensor/src/dyn_camera.rs @@ -1,9 +1,7 @@ -use crate::camera_enum::general_camera::GeneralCameraEnum; -use crate::camera_enum::perspective_camera::PerspectiveCameraEnum; -use crate::traits::IsCameraEnum; -use crate::traits::IsPerspectiveCameraEnum; -use sophus_core::linalg::scalar::IsScalar; -use sophus_image::image_view::ImageSize; +use crate::camera_enum::GeneralCameraEnum; +use crate::camera_enum::PerspectiveCameraEnum; +use crate::prelude::*; +use sophus_image::ImageSize; /// Dynamic camera facade #[derive(Debug, Clone)] @@ -96,10 +94,10 @@ fn dyn_camera_tests() { use approx::assert_relative_eq; use sophus_core::calculus::maps::vector_valued_maps::VectorValuedMapFromVector; use sophus_core::linalg::VecF64; - use sophus_image::image_view::ImageSize; use sophus_image::image_view::IsImageView; use sophus_image::interpolation::interpolate; use sophus_image::mut_image::MutImage2F32; + use sophus_image::ImageSize; type DynCameraF64 = DynCamera; diff --git a/crates/sophus_sensor/src/lib.rs b/crates/sophus_sensor/src/lib.rs index 9116ee5f..cea326c1 100644 --- a/crates/sophus_sensor/src/lib.rs +++ b/crates/sophus_sensor/src/lib.rs @@ -1,19 +1,23 @@ #![feature(portable_simd)] #![deny(missing_docs)] -//! # Sensor (aka camera) module +//! Sensor (aka camera) crate - part of the sophus-rs project /// Distortion lookup table pub mod distortion_table; /// A type-erased camera struct pub mod dyn_camera; +pub use crate::dyn_camera::DynCamera; /// A generic camera model pub mod camera; +pub use crate::camera::Camera; /// Projection models pub mod camera_enum; +pub use crate::camera_enum::perspective_camera::KannalaBrandtCamera; +pub use crate::camera_enum::perspective_camera::PinholeCamera; /// Projection models pub mod projections; @@ -23,3 +27,11 @@ pub mod distortions; /// Sensor traits pub mod traits; + +/// sophus sensor prelude +pub mod prelude { + pub use crate::traits::IsCameraEnum; + pub use crate::traits::IsPerspectiveCameraEnum; + pub use crate::traits::IsProjection; + pub use sophus_core::prelude::*; +} diff --git a/crates/sophus_sensor/src/projections/orthographic.rs b/crates/sophus_sensor/src/projections/orthographic.rs index 494b90b6..544b00fa 100644 --- a/crates/sophus_sensor/src/projections/orthographic.rs +++ b/crates/sophus_sensor/src/projections/orthographic.rs @@ -7,15 +7,15 @@ use std::marker::PhantomData; /// Orthographic projection implementation #[derive(Debug, Clone)] -pub struct OrthographisProjection, const BATCH: usize> { +pub struct OrthographisProjectionImpl, const BATCH: usize> { phantom: PhantomData, } impl, const BATCH: usize> IsProjection - for OrthographisProjection + for OrthographisProjectionImpl { fn proj(point_in_camera: &S::Vector<3>) -> S::Vector<2> { - point_in_camera.get_fixed_rows::<2>(0) + point_in_camera.get_fixed_subvec::<2>(0) } fn unproj(point_in_camera: &S::Vector<2>, extension: S) -> S::Vector<3> { @@ -33,4 +33,4 @@ impl, const BATCH: usize> IsProjection /// Orthographic camera pub type OrthographicCamera = - Camera, OrthographisProjection>; + Camera, OrthographisProjectionImpl>; diff --git a/crates/sophus_sensor/src/projections/perspective.rs b/crates/sophus_sensor/src/projections/perspective.rs index 9b2d62c2..d715bb6e 100644 --- a/crates/sophus_sensor/src/projections/perspective.rs +++ b/crates/sophus_sensor/src/projections/perspective.rs @@ -7,9 +7,9 @@ use sophus_core::linalg::vector::IsVector; /// /// Projects a 3D point in the camera frame to a 2D point in the z=1 plane #[derive(Debug, Clone, Copy)] -pub struct PerspectiveProjection; +pub struct PerspectiveProjectionImpl; -impl, const BATCH: usize> IsProjection for PerspectiveProjection { +impl, const BATCH: usize> IsProjection for PerspectiveProjectionImpl { fn proj(point_in_camera: &S::Vector<3>) -> S::Vector<2> { S::Vector::<2>::from_array([ point_in_camera.get_elem(0) / point_in_camera.get_elem(2), diff --git a/crates/sophus_sensor/src/traits.rs b/crates/sophus_sensor/src/traits.rs index 820556e2..120e8be7 100644 --- a/crates/sophus_sensor/src/traits.rs +++ b/crates/sophus_sensor/src/traits.rs @@ -1,7 +1,6 @@ -use sophus_core::linalg::scalar::IsScalar; -use sophus_core::linalg::vector::IsVector; +use crate::prelude::*; use sophus_core::params::ParamsImpl; -use sophus_image::image_view::ImageSize; +use sophus_image::ImageSize; /// Camera distortion implementation trait pub trait IsCameraDistortionImpl< diff --git a/sophus-rs.code-workspace b/sophus-rs.code-workspace index 0b9beda9..dadb157c 100644 --- a/sophus-rs.code-workspace +++ b/sophus-rs.code-workspace @@ -12,5 +12,6 @@ "rust-analyzer.diagnostics.styleLints.enable": true, "rust-analyzer.imports.granularity.group": "item", "rust-analyzer.imports.prefix": "crate", + "rust-analyzer.cargo.unsetTest": [], } } \ No newline at end of file