1use nereids_core::constants::{LM_DIAGONAL_FLOOR, PIVOT_FLOOR};
14
15use crate::error::FittingError;
16use crate::parameters::ParameterSet;
17
18#[derive(Debug, Clone)]
23pub struct FlatMatrix {
24 pub data: Vec<f64>,
26 pub nrows: usize,
28 pub ncols: usize,
30}
31
32impl FlatMatrix {
33 pub fn zeros(nrows: usize, ncols: usize) -> Self {
35 let len = nrows
36 .checked_mul(ncols)
37 .expect("FlatMatrix dimensions overflow usize");
38 Self {
39 data: vec![0.0; len],
40 nrows,
41 ncols,
42 }
43 }
44
45 #[inline(always)]
47 pub fn get(&self, row: usize, col: usize) -> f64 {
48 debug_assert!(row < self.nrows && col < self.ncols);
49 self.data[row * self.ncols + col]
50 }
51
52 #[inline(always)]
54 pub fn get_mut(&mut self, row: usize, col: usize) -> &mut f64 {
55 debug_assert!(row < self.nrows && col < self.ncols);
56 &mut self.data[row * self.ncols + col]
57 }
58}
59
60const LAMBDA_BREAKOUT: f64 = 1e16;
65
66#[derive(Debug, Clone)]
68pub struct LmConfig {
69 pub max_iter: usize,
71 pub lambda_init: f64,
73 pub lambda_up: f64,
75 pub lambda_down: f64,
77 pub tol_chi2: f64,
79 pub tol_param: f64,
81 pub fd_step: f64,
83 pub compute_covariance: bool,
90}
91
92impl Default for LmConfig {
93 fn default() -> Self {
94 Self {
95 max_iter: 200,
96 lambda_init: 1e-3,
97 lambda_up: 10.0,
98 lambda_down: 0.1,
99 tol_chi2: 1e-8,
100 tol_param: 1e-8,
101 fd_step: 1e-6,
102 compute_covariance: true,
103 }
104 }
105}
106
107#[derive(Debug, Clone)]
109pub struct LmResult {
110 pub chi_squared: f64,
112 pub reduced_chi_squared: f64,
114 pub iterations: usize,
116 pub converged: bool,
118 pub params: Vec<f64>,
120 pub covariance: Option<FlatMatrix>,
122 pub uncertainties: Option<Vec<f64>>,
124}
125
126pub trait FitModel {
131 fn evaluate(&self, params: &[f64]) -> Result<Vec<f64>, FittingError>;
142
143 fn analytical_jacobian(
156 &self,
157 _params: &[f64],
158 _free_param_indices: &[usize],
159 _y_current: &[f64],
160 ) -> Option<FlatMatrix> {
161 None
162 }
163}
164
165impl<M: FitModel + ?Sized> FitModel for &M {
172 fn evaluate(&self, params: &[f64]) -> Result<Vec<f64>, FittingError> {
173 (**self).evaluate(params)
174 }
175
176 fn analytical_jacobian(
177 &self,
178 params: &[f64],
179 free_param_indices: &[usize],
180 y_current: &[f64],
181 ) -> Option<FlatMatrix> {
182 (**self).analytical_jacobian(params, free_param_indices, y_current)
183 }
184}
185
186fn chi_squared(residuals: &[f64], weights: &[f64]) -> f64 {
188 residuals
189 .iter()
190 .zip(weights.iter())
191 .map(|(&r, &w)| r * r * w)
192 .sum()
193}
194
195fn scaled_gradient_inf_norm(jtw_j: &FlatMatrix, jtw_r: &[f64], chi2: f64) -> f64 {
201 let residual_scale = chi2.sqrt().max(1.0);
202 let mut max_scaled: f64 = 0.0;
203 for (j, &grad_j) in jtw_r.iter().enumerate() {
204 let curvature = jtw_j.get(j, j).abs().sqrt();
205 let scale = curvature * residual_scale + PIVOT_FLOOR;
206 max_scaled = max_scaled.max(grad_j.abs() / scale);
207 }
208 max_scaled
209}
210
211fn has_informative_curvature(jtw_j: &FlatMatrix) -> bool {
216 (0..jtw_j.nrows).any(|j| jtw_j.get(j, j) > LM_DIAGONAL_FLOOR)
217}
218
219fn compute_jacobian(
233 model: &dyn FitModel,
234 params: &mut ParameterSet,
235 y_current: &[f64],
236 fd_step: f64,
237 all_vals_buf: &mut Vec<f64>,
238 free_idx_buf: &mut Vec<usize>,
239) -> Result<FlatMatrix, FittingError> {
240 params.free_indices_into(free_idx_buf);
241 let n_free = free_idx_buf.len();
242 let n_data = y_current.len();
243
244 params.all_values_into(all_vals_buf);
246 if let Some(j) = model.analytical_jacobian(all_vals_buf, free_idx_buf, y_current) {
247 debug_assert!(
248 j.nrows == n_data && j.ncols == n_free && j.data.len() == n_data * n_free,
249 "analytical_jacobian shape mismatch: got ({}x{}, len={}), expected ({}x{}, len={})",
250 j.nrows,
251 j.ncols,
252 j.data.len(),
253 n_data,
254 n_free,
255 n_data * n_free,
256 );
257 return Ok(j);
258 }
259
260 let mut jacobian = FlatMatrix::zeros(n_data, n_free);
262
263 for (j, &idx) in free_idx_buf.iter().enumerate() {
264 let original = params.params[idx].value;
265 let step = fd_step * (1.0 + original.abs());
266
267 params.params[idx].value = original + step;
268 params.params[idx].clamp();
269 let mut actual_step = params.params[idx].value - original;
270
271 if actual_step.abs() < PIVOT_FLOOR {
274 params.params[idx].value = original - step;
275 params.params[idx].clamp();
276 actual_step = params.params[idx].value - original;
277 if actual_step.abs() < PIVOT_FLOOR {
278 params.params[idx].value = original;
280 continue;
281 }
282 }
283
284 params.all_values_into(all_vals_buf);
285 let perturbed = match model.evaluate(all_vals_buf) {
286 Ok(v) => v,
287 Err(_) => {
288 params.params[idx].value = original;
292 continue;
293 }
294 };
295 params.params[idx].value = original;
296
297 for i in 0..n_data {
298 *jacobian.get_mut(i, j) = (perturbed[i] - y_current[i]) / actual_step;
299 }
300 }
301
302 Ok(jacobian)
303}
304
305pub(crate) fn solve_damped_system(a: &FlatMatrix, b: &[f64], lambda: f64) -> Option<Vec<f64>> {
310 let n = b.len();
311 if n == 0 {
312 return Some(vec![]);
313 }
314
315 let ncols = n + 1;
317 let mut aug = FlatMatrix::zeros(n, ncols);
318 for (i, &bi) in b.iter().enumerate() {
319 for j in 0..n {
320 *aug.get_mut(i, j) = a.get(i, j);
321 }
322 *aug.get_mut(i, i) += lambda * a.get(i, i).max(LM_DIAGONAL_FLOOR); *aug.get_mut(i, n) = bi;
324 }
325
326 for col in 0..n {
328 let mut max_val = aug.get(col, col).abs();
330 let mut max_row = col;
331 for row in (col + 1)..n {
332 if aug.get(row, col).abs() > max_val {
333 max_val = aug.get(row, col).abs();
334 max_row = row;
335 }
336 }
337
338 if max_val < PIVOT_FLOOR {
339 return None; }
341
342 if col != max_row {
344 let (row_a, row_b) = (col * ncols, max_row * ncols);
345 let (first, second) = aug.data.split_at_mut(row_b);
346 first[row_a..row_a + ncols].swap_with_slice(&mut second[..ncols]);
347 }
348
349 let pivot = aug.get(col, col);
350 for row in (col + 1)..n {
351 let factor = aug.get(row, col) / pivot;
352 for j in col..=n {
353 let val = aug.get(col, j);
354 *aug.get_mut(row, j) -= factor * val;
355 }
356 }
357 }
358
359 let mut x = vec![0.0; n];
361 for i in (0..n).rev() {
362 let mut sum = aug.get(i, n);
363 for (j, &xj) in x.iter().enumerate().skip(i + 1) {
364 sum -= aug.get(i, j) * xj;
365 }
366 x[i] = sum / aug.get(i, i);
367 }
368
369 Some(x)
370}
371
372pub(crate) fn invert_matrix(a: &FlatMatrix) -> Option<FlatMatrix> {
376 let n = a.nrows;
377 if n == 0 {
378 return Some(FlatMatrix::zeros(0, 0));
379 }
380
381 let ncols = 2 * n;
383 let mut aug = FlatMatrix::zeros(n, ncols);
384 for i in 0..n {
385 for j in 0..n {
386 *aug.get_mut(i, j) = a.get(i, j);
387 }
388 *aug.get_mut(i, n + i) = 1.0;
389 }
390
391 for col in 0..n {
393 let mut max_val = aug.get(col, col).abs();
394 let mut max_row = col;
395 for row in (col + 1)..n {
396 if aug.get(row, col).abs() > max_val {
397 max_val = aug.get(row, col).abs();
398 max_row = row;
399 }
400 }
401
402 if max_val < PIVOT_FLOOR {
403 return None;
404 }
405
406 if col != max_row {
408 let (row_a, row_b) = (col * ncols, max_row * ncols);
409 let (first, second) = aug.data.split_at_mut(row_b);
410 first[row_a..row_a + ncols].swap_with_slice(&mut second[..ncols]);
411 }
412
413 let pivot = aug.get(col, col);
414 for j in 0..ncols {
415 *aug.get_mut(col, j) /= pivot;
416 }
417
418 for row in 0..n {
419 if row != col {
420 let factor = aug.get(row, col);
421 for j in 0..ncols {
422 let val = aug.get(col, j);
423 *aug.get_mut(row, j) -= factor * val;
424 }
425 }
426 }
427 }
428
429 let mut inv = FlatMatrix::zeros(n, n);
431 for i in 0..n {
432 for j in 0..n {
433 *inv.get_mut(i, j) = aug.get(i, n + j);
434 }
435 }
436
437 Some(inv)
438}
439
440pub fn levenberg_marquardt(
452 model: &dyn FitModel,
453 y_obs: &[f64],
454 sigma: &[f64],
455 params: &mut ParameterSet,
456 config: &LmConfig,
457) -> Result<LmResult, FittingError> {
458 let n_data = y_obs.len();
459 if n_data == 0 {
460 return Err(FittingError::EmptyData);
461 }
462 if sigma.len() != n_data {
463 return Err(FittingError::LengthMismatch {
464 expected: n_data,
465 actual: sigma.len(),
466 field: "sigma",
467 });
468 }
469
470 let n_free = params.n_free();
471
472 if n_free == 0 {
476 let weights: Vec<f64> = sigma
477 .iter()
478 .map(|&s| {
479 if !s.is_finite() || s <= 0.0 {
480 1.0 / 1e30
481 } else {
482 1.0 / (s * s)
483 }
484 })
485 .collect();
486 let y_model = model.evaluate(¶ms.all_values())?;
487
488 if !y_model.iter().all(|v| v.is_finite()) {
493 return Ok(LmResult {
494 chi_squared: f64::NAN,
495 reduced_chi_squared: f64::NAN,
496 iterations: 0,
497 converged: false,
498 params: params.all_values(),
499 covariance: None,
500 uncertainties: None,
501 });
502 }
503
504 let residuals: Vec<f64> = y_obs
505 .iter()
506 .zip(y_model.iter())
507 .map(|(&obs, &mdl)| obs - mdl)
508 .collect();
509 let chi2 = chi_squared(&residuals, &weights);
510 let dof = n_data - n_free;
513 let reduced = if dof > 0 { chi2 / dof as f64 } else { f64::NAN };
517 return Ok(LmResult {
518 chi_squared: chi2,
519 reduced_chi_squared: reduced,
520 iterations: 0,
521 converged: true,
522 params: params.all_values(),
523 covariance: Some(FlatMatrix::zeros(0, 0)),
524 uncertainties: Some(vec![]),
525 });
526 }
527
528 if n_data < n_free {
534 return Ok(LmResult {
535 chi_squared: f64::NAN,
536 reduced_chi_squared: f64::NAN,
537 iterations: 0,
538 converged: false,
539 params: params.all_values(),
540 covariance: None,
541 uncertainties: None,
542 });
543 }
544 let dof = n_data - n_free;
545
546 let weights: Vec<f64> = sigma
551 .iter()
552 .map(|&s| {
553 if !s.is_finite() || s <= 0.0 {
554 1.0 / 1e30
556 } else {
557 1.0 / (s * s)
558 }
559 })
560 .collect();
561
562 let mut all_vals_buf = Vec::with_capacity(params.params.len());
569 let mut free_vals_buf = Vec::with_capacity(n_free);
570 let mut free_idx_buf = Vec::with_capacity(n_free);
571
572 params.all_values_into(&mut all_vals_buf);
576 let mut y_current = model.evaluate(&all_vals_buf)?;
577 let mut residuals: Vec<f64> = y_obs
578 .iter()
579 .zip(y_current.iter())
580 .map(|(&obs, &mdl)| obs - mdl)
581 .collect();
582 let mut chi2 = chi_squared(&residuals, &weights);
583
584 let mut lambda = config.lambda_init;
585 let mut converged = false;
586 let mut iter = 0;
587
588 for _ in 0..config.max_iter {
589 iter += 1;
590
591 let jacobian = compute_jacobian(
595 model,
596 params,
597 &y_current,
598 config.fd_step,
599 &mut all_vals_buf,
600 &mut free_idx_buf,
601 )?;
602
603 let mut jtw_j = FlatMatrix::zeros(n_free, n_free);
605 let mut jtw_r = vec![0.0; n_free];
606
607 for (i, (&wi, &ri)) in weights.iter().zip(residuals.iter()).enumerate() {
608 for (j, jtw_r_j) in jtw_r.iter_mut().enumerate() {
609 let jij = jacobian.get(i, j);
610 *jtw_r_j += jij * wi * ri;
611 for k in 0..n_free {
612 *jtw_j.get_mut(j, k) += jij * wi * jacobian.get(i, k);
613 }
614 }
615 }
616 let scaled_grad_inf = scaled_gradient_inf_norm(&jtw_j, &jtw_r, chi2);
617 let informative_curvature = has_informative_curvature(&jtw_j);
618
619 let delta = match solve_damped_system(&jtw_j, &jtw_r, lambda) {
621 Some(d) => d,
622 None => break, };
624
625 params.free_values_into(&mut free_vals_buf);
628 let trial_free: Vec<f64> = free_vals_buf
629 .iter()
630 .zip(delta.iter())
631 .map(|(&v, &d)| v + d)
632 .collect();
633 let param_change: f64 = delta
634 .iter()
635 .zip(free_vals_buf.iter())
636 .map(|(&d, &v)| (d / (v.abs() + PIVOT_FLOOR)).powi(2))
637 .sum::<f64>()
638 .sqrt();
639 params.set_free_values(&trial_free);
640
641 params.all_values_into(&mut all_vals_buf);
642 let y_trial = match model.evaluate(&all_vals_buf) {
643 Ok(y) => y,
644 Err(_) => {
645 params.set_free_values(&free_vals_buf);
647 lambda *= config.lambda_up;
648 if lambda > LAMBDA_BREAKOUT {
649 converged = false;
650 break;
651 }
652 continue;
653 }
654 };
655
656 if y_trial.iter().any(|v| !v.is_finite()) {
659 params.set_free_values(&free_vals_buf);
660 lambda *= config.lambda_up;
661 if lambda > LAMBDA_BREAKOUT {
662 converged = false;
663 break;
664 }
665 continue;
666 }
667
668 let trial_residuals: Vec<f64> = y_obs
669 .iter()
670 .zip(y_trial.iter())
671 .map(|(&obs, &mdl)| obs - mdl)
672 .collect();
673 let trial_chi2 = chi_squared(&trial_residuals, &weights);
674 let chi2_delta = (trial_chi2 - chi2).abs();
675 let chi2_scale = chi2.abs().max(trial_chi2.abs()).max(1.0);
676 let chi2_stagnated = chi2_delta <= config.tol_chi2 * chi2_scale;
677
678 if trial_chi2 < chi2 {
679 let rel_change = (chi2 - trial_chi2) / (chi2 + PIVOT_FLOOR);
682 chi2 = trial_chi2;
683 residuals = trial_residuals;
684 y_current = y_trial;
685 lambda *= config.lambda_down;
686
687 if rel_change < config.tol_chi2 || param_change < config.tol_param {
692 converged = true;
693 break;
694 }
695 } else {
696 let grad_tol = config.tol_chi2.sqrt().max(config.tol_param.sqrt());
704 if chi2_stagnated
705 && param_change < config.tol_param
706 && scaled_grad_inf < grad_tol
707 && informative_curvature
708 {
709 params.set_free_values(&free_vals_buf);
710 converged = true;
711 break;
712 }
713
714 params.set_free_values(&free_vals_buf);
717 lambda *= config.lambda_up;
718
719 if lambda > LAMBDA_BREAKOUT {
723 converged = false;
724 break;
725 }
726 }
727 }
728
729 let reduced_chi2 = if dof > 0 { chi2 / dof as f64 } else { f64::NAN };
730
731 let (covariance, uncertainties) = if converged && config.compute_covariance {
738 let jacobian = compute_jacobian(
739 model,
740 params,
741 &y_current,
742 config.fd_step,
743 &mut all_vals_buf,
744 &mut free_idx_buf,
745 )?;
746 let mut jtw_j = FlatMatrix::zeros(n_free, n_free);
747 for (i, &wi) in weights.iter().enumerate() {
748 for j in 0..n_free {
749 let jij = jacobian.get(i, j);
750 for k in 0..n_free {
751 *jtw_j.get_mut(j, k) += jij * wi * jacobian.get(i, k);
752 }
753 }
754 }
755
756 if dof > 0 {
767 if let Some(mut cov) = invert_matrix(&jtw_j) {
768 for elem in cov.data.iter_mut() {
769 *elem *= reduced_chi2;
770 }
771 let unc: Vec<f64> = (0..n_free)
772 .map(|i| {
773 let diag = cov.get(i, i);
774 if diag.is_finite() && diag > 0.0 {
775 diag.sqrt()
776 } else {
777 f64::NAN
778 }
779 })
780 .collect();
781 (Some(cov), Some(unc))
782 } else {
783 (None, None)
784 }
785 } else {
786 (None, None)
788 }
789 } else {
790 (None, None)
792 };
793
794 Ok(LmResult {
795 chi_squared: chi2,
796 reduced_chi_squared: reduced_chi2,
797 iterations: iter,
798 converged,
799 params: params.all_values(),
800 covariance,
801 uncertainties,
802 })
803}
804
805#[cfg(test)]
806mod tests {
807 use super::*;
808 use crate::parameters::{FitParameter, ParameterSet};
809
810 struct LinearModel {
812 x: Vec<f64>,
813 }
814
815 impl FitModel for LinearModel {
816 fn evaluate(&self, params: &[f64]) -> Result<Vec<f64>, FittingError> {
817 let a = params[0];
818 let b = params[1];
819 Ok(self.x.iter().map(|&x| a * x + b).collect())
820 }
821 }
822
823 #[test]
824 fn test_fit_linear_exact() {
825 let x: Vec<f64> = (0..10).map(|i| i as f64).collect();
827 let y_obs: Vec<f64> = x.iter().map(|&xi| 2.0 * xi + 3.0).collect();
828 let sigma = vec![1.0; 10];
829
830 let model = LinearModel { x };
831 let mut params = ParameterSet::new(vec![
832 FitParameter::unbounded("a", 1.0), FitParameter::unbounded("b", 1.0),
834 ]);
835
836 let result =
837 levenberg_marquardt(&model, &y_obs, &sigma, &mut params, &LmConfig::default()).unwrap();
838
839 assert!(result.converged, "Fit did not converge");
840 assert!(
841 (result.params[0] - 2.0).abs() < 1e-4,
842 "a = {}, expected 2.0",
843 result.params[0]
844 );
845 assert!(
846 (result.params[1] - 3.0).abs() < 1e-4,
847 "b = {}, expected 3.0",
848 result.params[1]
849 );
850 assert!(result.chi_squared < 1e-6);
851 }
852
853 #[test]
854 fn test_converges_on_exact_flat_bottom_without_lambda_breakout() {
855 let x: Vec<f64> = (0..10).map(|i| i as f64).collect();
860 let y_obs: Vec<f64> = x.iter().map(|&xi| 2.0 * xi + 3.0).collect();
861 let sigma = vec![1.0; 10];
862
863 let model = LinearModel { x };
864 let mut params = ParameterSet::new(vec![
865 FitParameter::unbounded("a", 0.0),
866 FitParameter::unbounded("b", 0.0),
867 ]);
868 let config = LmConfig {
869 lambda_init: 0.0,
870 ..LmConfig::default()
871 };
872
873 let result = levenberg_marquardt(&model, &y_obs, &sigma, &mut params, &config).unwrap();
874
875 assert!(
876 result.converged,
877 "Fit should converge on an exact flat bottom"
878 );
879 assert!(result.chi_squared < 1e-20, "chi2 = {}", result.chi_squared);
880 assert!(
881 result.iterations < config.max_iter,
882 "LM should stop by convergence, not by iteration exhaustion"
883 );
884 }
885
886 #[test]
887 fn test_converges_on_nonzero_chi2_stationary_point() {
888 struct AffineModel {
893 x: Vec<f64>,
894 }
895 impl FitModel for AffineModel {
896 fn evaluate(&self, params: &[f64]) -> Result<Vec<f64>, FittingError> {
897 let a = params[0];
898 let b = params[1];
899 Ok(self.x.iter().map(|&x| a * x + b).collect())
900 }
901 }
902
903 let model = AffineModel {
904 x: vec![0.0, 1.0, 2.0, 3.0, 4.0],
905 };
906 let y_obs = vec![0.1, 0.9, 2.2, 2.8, 4.1];
907 let sigma = vec![1.0; y_obs.len()];
908 let mut params = ParameterSet::new(vec![
909 FitParameter::unbounded("a", 0.0),
910 FitParameter::unbounded("b", 0.0),
911 ]);
912 let config = LmConfig {
913 max_iter: 200,
914 tol_chi2: 1e-16,
915 tol_param: 1e-16,
916 ..LmConfig::default()
917 };
918
919 let result = levenberg_marquardt(&model, &y_obs, &sigma, &mut params, &config).unwrap();
920
921 assert!(
922 result.converged,
923 "stationary nonzero-chi2 optimum should converge instead of lambda breakout"
924 );
925 assert!(
926 result.reduced_chi_squared.is_finite() && result.reduced_chi_squared > 0.0,
927 "expected nonzero reduced chi2 at noisy optimum, got {}",
928 result.reduced_chi_squared
929 );
930 }
931
932 #[test]
933 fn test_fit_linear_with_fixed_param() {
934 let x: Vec<f64> = (0..10).map(|i| i as f64).collect();
936 let y_obs: Vec<f64> = x.iter().map(|&xi| 2.0 * xi + 3.0).collect();
937 let sigma = vec![1.0; 10];
938
939 let model = LinearModel { x };
940 let mut params = ParameterSet::new(vec![
941 FitParameter::unbounded("a", 1.0),
942 FitParameter::fixed("b", 3.0),
943 ]);
944
945 let result =
946 levenberg_marquardt(&model, &y_obs, &sigma, &mut params, &LmConfig::default()).unwrap();
947
948 assert!(result.converged);
949 assert!(
950 (result.params[0] - 2.0).abs() < 1e-6,
951 "a = {}",
952 result.params[0]
953 );
954 assert_eq!(result.params[1], 3.0); }
956
957 #[test]
958 fn test_fit_quadratic() {
959 struct QuadModel {
961 x: Vec<f64>,
962 }
963 impl FitModel for QuadModel {
964 fn evaluate(&self, params: &[f64]) -> Result<Vec<f64>, FittingError> {
965 let (a, b, c) = (params[0], params[1], params[2]);
966 Ok(self.x.iter().map(|&x| a * x * x + b * x + c).collect())
967 }
968 }
969
970 let x: Vec<f64> = (0..20).map(|i| i as f64 * 0.5).collect();
971 let y_obs: Vec<f64> = x.iter().map(|&xi| 0.5 * xi * xi - 2.0 * xi + 1.0).collect();
972 let sigma = vec![1.0; 20];
973
974 let model = QuadModel { x };
975 let mut params = ParameterSet::new(vec![
976 FitParameter::unbounded("a", 1.0),
977 FitParameter::unbounded("b", 0.0),
978 FitParameter::unbounded("c", 0.0),
979 ]);
980
981 let result =
982 levenberg_marquardt(&model, &y_obs, &sigma, &mut params, &LmConfig::default()).unwrap();
983
984 assert!(result.converged);
985 assert!(
986 (result.params[0] - 0.5).abs() < 1e-5,
987 "a = {}",
988 result.params[0]
989 );
990 assert!(
991 (result.params[1] - (-2.0)).abs() < 1e-5,
992 "b = {}",
993 result.params[1]
994 );
995 assert!(
996 (result.params[2] - 1.0).abs() < 1e-5,
997 "c = {}",
998 result.params[2]
999 );
1000 }
1001
1002 #[test]
1003 fn test_non_negative_constraint() {
1004 struct SlopeModel {
1008 x: Vec<f64>,
1009 }
1010 impl FitModel for SlopeModel {
1011 fn evaluate(&self, params: &[f64]) -> Result<Vec<f64>, FittingError> {
1012 let a = params[0];
1013 Ok(self.x.iter().map(|&x| a * x).collect())
1014 }
1015 }
1016
1017 let x: Vec<f64> = (1..10).map(|i| i as f64).collect();
1018 let y_obs: Vec<f64> = x.iter().map(|&xi| -2.0 * xi).collect();
1019 let sigma = vec![1.0; 9];
1020
1021 let model = SlopeModel { x };
1022 let mut params = ParameterSet::new(vec![FitParameter::non_negative("a", 1.0)]);
1023
1024 let result =
1025 levenberg_marquardt(&model, &y_obs, &sigma, &mut params, &LmConfig::default()).unwrap();
1026
1027 assert!(
1029 result.params[0] >= 0.0 && result.params[0] < 0.1,
1030 "a = {}, expected ~0",
1031 result.params[0]
1032 );
1033 }
1034
1035 #[test]
1036 fn test_uncertainty_estimation() {
1037 let x: Vec<f64> = (0..100).map(|i| i as f64 * 0.1).collect();
1039 let y_obs: Vec<f64> = x.iter().map(|&xi| 2.0 * xi + 3.0).collect();
1040 let sigma = vec![0.1; 100]; let model = LinearModel { x };
1043 let mut params = ParameterSet::new(vec![
1044 FitParameter::unbounded("a", 1.0),
1045 FitParameter::unbounded("b", 1.0),
1046 ]);
1047
1048 let result =
1049 levenberg_marquardt(&model, &y_obs, &sigma, &mut params, &LmConfig::default()).unwrap();
1050
1051 assert!(result.converged);
1052 assert!(result.uncertainties.is_some());
1053 let unc = result.uncertainties.unwrap();
1054 assert!(unc[0] > 0.0 && unc[0] < 0.01, "σ_a = {}", unc[0]);
1056 assert!(unc[1] > 0.0 && unc[1] < 0.1, "σ_b = {}", unc[1]);
1057 }
1058
1059 #[test]
1060 fn test_solve_damped_system_identity() {
1061 let a = FlatMatrix {
1063 data: vec![1.0, 0.0, 0.0, 1.0],
1064 nrows: 2,
1065 ncols: 2,
1066 };
1067 let b = vec![2.0, 4.0];
1068 let lambda = 1.0;
1069 let x = solve_damped_system(&a, &b, lambda).unwrap();
1070 assert!((x[0] - 1.0).abs() < 1e-10);
1071 assert!((x[1] - 2.0).abs() < 1e-10);
1072 }
1073
1074 #[test]
1075 fn test_invert_matrix_2x2() {
1076 let a = FlatMatrix {
1077 data: vec![4.0, 7.0, 2.0, 6.0],
1078 nrows: 2,
1079 ncols: 2,
1080 };
1081 let inv = invert_matrix(&a).unwrap();
1082 assert!((inv.get(0, 0) - 0.6).abs() < 1e-10);
1084 assert!((inv.get(0, 1) - (-0.7)).abs() < 1e-10);
1085 assert!((inv.get(1, 0) - (-0.2)).abs() < 1e-10);
1086 assert!((inv.get(1, 1) - 0.4).abs() < 1e-10);
1087 }
1088
1089 #[test]
1092 fn test_all_fixed_params_nan_model() {
1093 struct NanModel;
1096 impl FitModel for NanModel {
1097 fn evaluate(&self, _params: &[f64]) -> Result<Vec<f64>, FittingError> {
1098 Ok(vec![f64::NAN; 5])
1099 }
1100 }
1101
1102 let y_obs = vec![1.0; 5];
1103 let sigma = vec![1.0; 5];
1104 let mut params = ParameterSet::new(vec![FitParameter::fixed("a", 1.0)]);
1105
1106 let result =
1107 levenberg_marquardt(&NanModel, &y_obs, &sigma, &mut params, &LmConfig::default())
1108 .unwrap();
1109
1110 assert!(!result.converged, "All-fixed NaN model should not converge");
1111 assert!(result.chi_squared.is_nan(), "chi2 should be NaN");
1112 assert_eq!(result.iterations, 0);
1113 }
1114
1115 #[test]
1116 fn test_underdetermined_system() {
1117 let y_obs = vec![1.0, 2.0]; let sigma = vec![1.0, 1.0];
1121
1122 struct ThreeParamModel;
1125 impl FitModel for ThreeParamModel {
1126 fn evaluate(&self, params: &[f64]) -> Result<Vec<f64>, FittingError> {
1127 Ok(vec![params[0] + params[1] + params[2]; 2])
1128 }
1129 }
1130
1131 let mut params = ParameterSet::new(vec![
1132 FitParameter::unbounded("a", 1.0),
1133 FitParameter::unbounded("b", 1.0),
1134 FitParameter::unbounded("c", 1.0),
1135 ]);
1136
1137 let result = levenberg_marquardt(
1138 &ThreeParamModel,
1139 &y_obs,
1140 &sigma,
1141 &mut params,
1142 &LmConfig::default(),
1143 )
1144 .unwrap();
1145
1146 assert!(
1147 !result.converged,
1148 "Underdetermined system should not converge"
1149 );
1150 assert!(result.chi_squared.is_nan());
1151 assert_eq!(result.iterations, 0);
1152 }
1153
1154 #[test]
1155 fn test_exactly_determined_dof_zero() {
1156 let y_obs = vec![5.0, 11.0]; let sigma = vec![1.0, 1.0];
1160
1161 let model = LinearModel { x: vec![1.0, 4.0] };
1162 let mut params = ParameterSet::new(vec![
1163 FitParameter::unbounded("a", 1.0),
1164 FitParameter::unbounded("b", 1.0),
1165 ]);
1166
1167 let result =
1168 levenberg_marquardt(&model, &y_obs, &sigma, &mut params, &LmConfig::default()).unwrap();
1169
1170 assert!(
1171 result.converged,
1172 "Exactly-determined system should converge"
1173 );
1174 assert!(
1175 result.chi_squared < 1e-6,
1176 "chi2 should be ~0, got {}",
1177 result.chi_squared
1178 );
1179 assert!(
1181 result.reduced_chi_squared.is_nan(),
1182 "reduced_chi2 should be NaN for dof=0, got {}",
1183 result.reduced_chi_squared
1184 );
1185 assert!(result.covariance.is_none());
1187 assert!(result.uncertainties.is_none());
1188 }
1189
1190 #[test]
1191 fn test_lambda_breakout() {
1192 struct ConstantModel;
1194 impl FitModel for ConstantModel {
1195 fn evaluate(&self, _params: &[f64]) -> Result<Vec<f64>, FittingError> {
1196 Ok(vec![42.0; 5])
1199 }
1200 }
1201
1202 let y_obs = vec![1.0, 2.0, 3.0, 4.0, 5.0];
1203 let sigma = vec![1.0; 5];
1204 let mut params = ParameterSet::new(vec![FitParameter::unbounded("a", 1.0)]);
1205
1206 let config = LmConfig {
1207 max_iter: 1000,
1208 ..LmConfig::default()
1209 };
1210
1211 let result =
1212 levenberg_marquardt(&ConstantModel, &y_obs, &sigma, &mut params, &config).unwrap();
1213
1214 assert!(
1215 !result.converged,
1216 "Flat model should not converge (lambda breakout)"
1217 );
1218 assert!(
1219 result.covariance.is_none(),
1220 "unconverged fit should not report covariance"
1221 );
1222 assert!(
1223 result.uncertainties.is_none(),
1224 "unconverged fit should not report uncertainties"
1225 );
1226 }
1227
1228 #[test]
1229 fn test_nan_model_during_iteration() {
1230 struct NanAtLargeModel {
1233 x: Vec<f64>,
1234 }
1235 impl FitModel for NanAtLargeModel {
1236 fn evaluate(&self, params: &[f64]) -> Result<Vec<f64>, FittingError> {
1237 let a = params[0];
1238 Ok(self
1239 .x
1240 .iter()
1241 .map(|&x| if a > 5.0 { f64::NAN } else { a * x + 1.0 })
1242 .collect())
1243 }
1244 }
1245
1246 let x: Vec<f64> = (0..10).map(|i| i as f64).collect();
1247 let y_obs: Vec<f64> = x.iter().map(|&xi| 2.0 * xi + 1.0).collect();
1248 let sigma = vec![1.0; 10];
1249
1250 let model = NanAtLargeModel { x };
1251 let mut params = ParameterSet::new(vec![FitParameter::unbounded("a", 3.0)]);
1252
1253 let result =
1254 levenberg_marquardt(&model, &y_obs, &sigma, &mut params, &LmConfig::default()).unwrap();
1255
1256 assert!(result.converged, "Should converge avoiding NaN region");
1258 assert!(
1259 (result.params[0] - 2.0).abs() < 0.1,
1260 "a = {}, expected ~2.0",
1261 result.params[0]
1262 );
1263 }
1264
1265 #[test]
1266 fn test_err_model_during_trial_step() {
1267 struct ErrAtLargeModel {
1271 x: Vec<f64>,
1272 }
1273 impl FitModel for ErrAtLargeModel {
1274 fn evaluate(&self, params: &[f64]) -> Result<Vec<f64>, FittingError> {
1275 let a = params[0];
1276 if a > 5.0 {
1277 return Err(FittingError::EvaluationFailed(
1278 "parameter out of valid range".into(),
1279 ));
1280 }
1281 Ok(self.x.iter().map(|&x| a * x + 1.0).collect())
1282 }
1283 }
1284
1285 let x: Vec<f64> = (0..10).map(|i| i as f64).collect();
1286 let y_obs: Vec<f64> = x.iter().map(|&xi| 2.0 * xi + 1.0).collect();
1287 let sigma = vec![1.0; 10];
1288
1289 let model = ErrAtLargeModel { x };
1290 let mut params = ParameterSet::new(vec![FitParameter::unbounded("a", 3.0)]);
1291
1292 let result =
1293 levenberg_marquardt(&model, &y_obs, &sigma, &mut params, &LmConfig::default()).unwrap();
1294
1295 assert!(result.converged, "Should converge avoiding Err region");
1297 assert!(
1298 (result.params[0] - 2.0).abs() < 0.1,
1299 "a = {}, expected ~2.0",
1300 result.params[0]
1301 );
1302 }
1303
1304 #[test]
1305 fn test_fit_linear_no_covariance() {
1306 let x: Vec<f64> = (0..10).map(|i| i as f64).collect();
1309 let y_obs: Vec<f64> = x.iter().map(|&xi| 2.0 * xi + 3.0).collect();
1310 let sigma = vec![1.0; 10];
1311
1312 let model = LinearModel { x };
1313 let mut params = ParameterSet::new(vec![
1314 FitParameter::unbounded("a", 1.0),
1315 FitParameter::unbounded("b", 1.0),
1316 ]);
1317
1318 let config = LmConfig {
1319 compute_covariance: false,
1320 ..LmConfig::default()
1321 };
1322
1323 let result = levenberg_marquardt(&model, &y_obs, &sigma, &mut params, &config).unwrap();
1324
1325 assert!(result.converged, "Fit did not converge");
1326 assert!(
1327 (result.params[0] - 2.0).abs() < 1e-4,
1328 "a = {}, expected 2.0",
1329 result.params[0]
1330 );
1331 assert!(
1332 (result.params[1] - 3.0).abs() < 1e-4,
1333 "b = {}, expected 3.0",
1334 result.params[1]
1335 );
1336 assert!(result.chi_squared < 1e-6);
1337 assert!(
1338 result.covariance.is_none(),
1339 "covariance should be None when compute_covariance=false"
1340 );
1341 assert!(
1342 result.uncertainties.is_none(),
1343 "uncertainties should be None when compute_covariance=false"
1344 );
1345 }
1346
1347 #[test]
1348 fn test_zero_negative_sigma_clamping() {
1349 let y_obs = vec![1.0, 2.0, 3.0, 4.0, 5.0];
1352 let sigma = vec![0.0, -1.0, f64::NAN, f64::INFINITY, 1.0];
1353
1354 let model = LinearModel {
1355 x: vec![0.0, 1.0, 2.0, 3.0, 4.0],
1356 };
1357 let mut params = ParameterSet::new(vec![
1358 FitParameter::unbounded("a", 1.0),
1359 FitParameter::unbounded("b", 0.0),
1360 ]);
1361
1362 let result =
1364 levenberg_marquardt(&model, &y_obs, &sigma, &mut params, &LmConfig::default()).unwrap();
1365
1366 assert!(
1367 result.chi_squared.is_finite(),
1368 "chi2 should be finite despite bad sigma, got {}",
1369 result.chi_squared
1370 );
1371 assert!(
1372 result.converged,
1373 "Fit should converge despite bad sigma values"
1374 );
1375 let y_at_4 = result.params[0] * 4.0 + result.params[1];
1378 assert!(
1379 (y_at_4 - 5.0).abs() < 1.0,
1380 "Fitted line should pass near (4, 5): a={}, b={}, y(4)={}",
1381 result.params[0],
1382 result.params[1],
1383 y_at_4,
1384 );
1385 }
1386}