From 54fc7afc54ded67ccf6d20af4b04059e445782a7 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Mon, 20 Jan 2025 19:34:34 +0100 Subject: [PATCH 01/11] General updates --- Project.toml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Project.toml b/Project.toml index 19aae4c..ecf3575 100644 --- a/Project.toml +++ b/Project.toml @@ -16,16 +16,16 @@ SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" [compat] Calculus = "0.5" -Dierckx = "0.4, 0.5" +Dierckx = "0.5" DiffEqBase = "6" Distributions = "0.25" ForwardDiff = "0.10" -PenaltyFunctions = "0.1, 0.2, 0.3" -PreallocationTools = "0.2, 0.3, 0.4" -RecursiveArrayTools = "1.0, 2.0, 3" +PenaltyFunctions = "0.3" +PreallocationTools = "0.4" +RecursiveArrayTools = "3" SciMLBase = "1.69, 2" SciMLSensitivity = "7" -julia = "1.6" +julia = "1.10" [extras] BlackBoxOptim = "a134a8b2-14d6-55f6-9291-3336d3ab0209" From 8189492c180adcba384ee2397ca94363d1937b87 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Tue, 21 Jan 2025 14:11:58 +0100 Subject: [PATCH 02/11] Update regularization_test.jl --- test/tests_on_odes/regularization_test.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/tests_on_odes/regularization_test.jl b/test/tests_on_odes/regularization_test.jl index 3351fc6..98687ad 100644 --- a/test/tests_on_odes/regularization_test.jl +++ b/test/tests_on_odes/regularization_test.jl @@ -3,20 +3,20 @@ using PenaltyFunctions, OptimizationOptimJL, LinearAlgebra, SciMLSensitivity cost_function_1 = build_loss_objective(prob1, Tsit5(), L2Loss(t, data), Optimization.AutoZygote(), Regularization(0.6, L2Penalty()), maxiters = 10000, - verbose = false, abstol = 1e-8, reltol = 1e-8) + verbose = false, abstol = 1e-10, reltol = 1e-10) cost_function_2 = build_loss_objective(prob2, Tsit5(), L2Loss(t, data), Optimization.AutoZygote(), Regularization(0.1, MahalanobisPenalty(Matrix(1.0I, 2, 2))), verbose = false, - abstol = 1e-8, reltol = 1e-8, + abstol = 1e-10, reltol = 1e-10, maxiters = 10000) cost_function_3 = build_loss_objective(prob3, Tsit5(), L2Loss(t, data), Optimization.AutoZygote(), Regularization(0.1, MahalanobisPenalty(Matrix(1.0I, 4, 4))), verbose = false, - abstol = 1e-8, reltol = 1e-8, + abstol = 1e-10, reltol = 1e-10, maxiters = 10000) println("Use Optim BFGS to fit the parameter") From c14ddfea16c68483ec90a12066142bc4b58c2465 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Sat, 19 Jul 2025 15:12:21 -0400 Subject: [PATCH 03/11] See if this fixes CI --- Project.toml | 3 +- test/tests_on_odes/blackboxoptim_test.jl | 18 +++++----- test/tests_on_odes/regularization_test.jl | 12 +++---- test/tests_on_odes/test_problems.jl | 41 ++++++++++++++--------- 4 files changed, 41 insertions(+), 33 deletions(-) diff --git a/Project.toml b/Project.toml index ecf3575..7fa8c1a 100644 --- a/Project.toml +++ b/Project.toml @@ -38,7 +38,6 @@ OptimizationBBO = "3e6eede4-6085-4f62-9a71-46d9bc1eb92b" OptimizationNLopt = "4e6fcdb7-1186-4e1f-a706-475e75c168bb" OptimizationOptimJL = "36348300-93cb-4f02-beb5-3c3902f8871e" OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed" -ParameterizedFunctions = "65888b18-ceab-5e60-b2b9-181511a3b968" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" SciMLSensitivity = "1ed8b502-d754-442c-8d5d-10ac956f44a1" SteadyStateDiffEq = "9672c7b4-1e72-59bd-8a11-6ac3964bc41f" @@ -48,4 +47,4 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" [targets] -test = ["Test", "BlackBoxOptim", "DelayDiffEq", "ForwardDiff", "NLopt", "Optim", "Optimization", "OptimizationBBO", "OptimizationNLopt", "OptimizationOptimJL", "OrdinaryDiffEq", "ParameterizedFunctions", "Random", "SciMLSensitivity", "StochasticDiffEq", "SteadyStateDiffEq", "Sundials", "Zygote"] +test = ["Test", "BlackBoxOptim", "DelayDiffEq", "ForwardDiff", "NLopt", "Optim", "Optimization", "OptimizationBBO", "OptimizationNLopt", "OptimizationOptimJL", "OrdinaryDiffEq", "Random", "SciMLSensitivity", "StochasticDiffEq", "SteadyStateDiffEq", "Sundials", "Zygote"] diff --git a/test/tests_on_odes/blackboxoptim_test.jl b/test/tests_on_odes/blackboxoptim_test.jl index 7752730..88cce93 100644 --- a/test/tests_on_odes/blackboxoptim_test.jl +++ b/test/tests_on_odes/blackboxoptim_test.jl @@ -2,19 +2,19 @@ using BlackBoxOptim println("Use BlackBoxOptim to fit the parameter") cost_function = build_loss_objective(prob1, Tsit5(), L2Loss(t, data), - maxiters = 10000, verbose = false) + maxiters = 1000, verbose = false) bound1 = Tuple{Float64, Float64}[(1, 2)] -result = bboptimize(cost_function; SearchRange = bound1, MaxSteps = 11e3) -@test result.archive_output.best_candidate[1]≈1.5 atol=3e-1 +result = bboptimize(cost_function; SearchRange = bound1, MaxSteps = 100) +@test result.archive_output.best_candidate[1]≈1.5 atol=5e-1 cost_function = build_loss_objective(prob2, Tsit5(), L2Loss(t, data), - maxiters = 10000, verbose = false) + maxiters = 1000, verbose = false) bound2 = Tuple{Float64, Float64}[(1, 2), (2, 4)] -result = bboptimize(cost_function; SearchRange = bound2, MaxSteps = 11e3) -@test result.archive_output.best_candidate≈[1.5; 3.0] atol=3e-1 +result = bboptimize(cost_function; SearchRange = bound2, MaxSteps = 100) +@test result.archive_output.best_candidate≈[1.5; 3.0] atol=5e-1 cost_function = build_loss_objective(prob3, Tsit5(), L2Loss(t, data), - maxiters = 10000, verbose = false) + maxiters = 1000, verbose = false) bound3 = Tuple{Float64, Float64}[(1, 2), (0, 2), (2, 4), (0, 2)] -result = bboptimize(cost_function; SearchRange = bound3, MaxSteps = 11e3) -@test result.archive_output.best_candidate≈[1.5; 1.0; 3.0; 1.0] atol=5e-1 +result = bboptimize(cost_function; SearchRange = bound3, MaxSteps = 100) +@test result.archive_output.best_candidate≈[1.5; 1.0; 3.0; 1.0] atol=8e-1 diff --git a/test/tests_on_odes/regularization_test.jl b/test/tests_on_odes/regularization_test.jl index 98687ad..7edefd1 100644 --- a/test/tests_on_odes/regularization_test.jl +++ b/test/tests_on_odes/regularization_test.jl @@ -2,22 +2,22 @@ using PenaltyFunctions, OptimizationOptimJL, LinearAlgebra, SciMLSensitivity cost_function_1 = build_loss_objective(prob1, Tsit5(), L2Loss(t, data), Optimization.AutoZygote(), - Regularization(0.6, L2Penalty()), maxiters = 10000, - verbose = false, abstol = 1e-10, reltol = 1e-10) + Regularization(0.6, L2Penalty()), maxiters = 1000, + verbose = false, abstol = 1e-8, reltol = 1e-8) cost_function_2 = build_loss_objective(prob2, Tsit5(), L2Loss(t, data), Optimization.AutoZygote(), Regularization(0.1, MahalanobisPenalty(Matrix(1.0I, 2, 2))), verbose = false, - abstol = 1e-10, reltol = 1e-10, - maxiters = 10000) + abstol = 1e-8, reltol = 1e-8, + maxiters = 1000) cost_function_3 = build_loss_objective(prob3, Tsit5(), L2Loss(t, data), Optimization.AutoZygote(), Regularization(0.1, MahalanobisPenalty(Matrix(1.0I, 4, 4))), verbose = false, - abstol = 1e-10, reltol = 1e-10, - maxiters = 10000) + abstol = 1e-8, reltol = 1e-8, + maxiters = 1000) println("Use Optim BFGS to fit the parameter") optprob = Optimization.OptimizationProblem(cost_function_1, [1.0]) diff --git a/test/tests_on_odes/test_problems.jl b/test/tests_on_odes/test_problems.jl index 530b5fa..a63e055 100644 --- a/test/tests_on_odes/test_problems.jl +++ b/test/tests_on_odes/test_problems.jl @@ -1,29 +1,38 @@ -using OrdinaryDiffEq, ParameterizedFunctions, RecursiveArrayTools +using OrdinaryDiffEq, RecursiveArrayTools # Here are the problems to solve -f1 = @ode_def begin - dx = a * x - x * y - dy = -3y + x * y -end a +function f1!(du, u, p, t) + a = p[1] + x, y = u + du[1] = a * x - x * y + du[2] = -3y + x * y +end + u0 = [1.0; 1.0] tspan = (0.0, 10.0) p = [1.5] -prob1 = ODEProblem(f1, u0, tspan, [1.5]) +prob1 = ODEProblem(f1!, u0, tspan, [1.5]) + +function f2!(du, u, p, t) + a, c = p + x, y = u + du[1] = a * x - x * y + du[2] = -c * y + x * y +end -f2 = @ode_def begin - dx = a * x - x * y - dy = -c * y + x * y -end a c p = [1.5, 3.0] -prob2 = ODEProblem(f2, u0, tspan, p) +prob2 = ODEProblem(f2!, u0, tspan, p) + +function f3!(du, u, p, t) + a, b, c, d = p + x, y = u + du[1] = a * x - b * x * y + du[2] = -c * y + d * x * y +end -f3 = @ode_def begin - dx = a * x - b * x * y - dy = -c * y + d * x * y -end a b c d p = [1.5, 1.0, 3.0, 1.0] -prob3 = ODEProblem(f3, u0, tspan, p) +prob3 = ODEProblem(f3!, u0, tspan, p) # Generate random data based off of the known solution sol = solve(prob1, Tsit5()) From 12f47c6edd8ecc9c3404dd753aa9ce6725699091 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Sun, 20 Jul 2025 01:20:57 -0400 Subject: [PATCH 04/11] Update CI.yml --- .github/workflows/CI.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index beedbdb..1a69480 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -14,6 +14,7 @@ jobs: test: runs-on: ubuntu-latest strategy: + fail-fast: false matrix: group: - All From 64550288cf5a60141e2b552a18d4887ede98f62e Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Sun, 20 Jul 2025 09:01:48 -0400 Subject: [PATCH 05/11] try a few fixes --- src/DiffEqParamEstim.jl | 11 ++++++++--- test/tests_on_odes/blackboxoptim_test.jl | 6 +++--- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/src/DiffEqParamEstim.jl b/src/DiffEqParamEstim.jl index 20d9f7e..0852c30 100644 --- a/src/DiffEqParamEstim.jl +++ b/src/DiffEqParamEstim.jl @@ -4,9 +4,13 @@ using DiffEqBase, PenaltyFunctions, LinearAlgebra, Dierckx, SciMLBase import PreallocationTools -STANDARD_PROB_GENERATOR(prob, p) = remake(prob; u0 = eltype(p).(prob.u0), p = p) +function STANDARD_PROB_GENERATOR(prob, p) + f = ODEFunction{isinplace(prob), SciMLBase.FullSpecialize}(SciMLBase.unwrapped_f(prob.f)) + remake(prob; u0 = eltype(p).(prob.u0), p = p, f = f) +end function STANDARD_PROB_GENERATOR(prob::EnsembleProblem, p) - EnsembleProblem(remake(prob.prob; u0 = eltype(p).(prob.prob.u0), p = p), + f = ODEFunction{isinplace(prob.prob), SciMLBase.FullSpecialize}(SciMLBase.unwrapped_f(prob.prob.f)) + EnsembleProblem(remake(prob.prob; u0 = eltype(p).(prob.prob.u0), p = p, f = f), output_func = prob.output_func, prob_func = prob.prob_func, reduction = prob.reduction, @@ -17,8 +21,9 @@ STANDARD_MS_PROB_GENERATOR = function (prob, p, k) P, N = length(prob.p), length(prob.u0) K = Int((length(p) - P) / N) τ = range(t0, tf, length = K + 1) + f = ODEFunction{isinplace(prob), SciMLBase.FullSpecialize}(SciMLBase.unwrapped_f(prob.f)) remake(prob; u0 = p[(1 + (k - 1) * N):(k * N)], p = p[(end - P + 1):end], - tspan = (τ[k], τ[k + 1])) + tspan = (τ[k], τ[k + 1]), f = f) end include("cost_functions.jl") diff --git a/test/tests_on_odes/blackboxoptim_test.jl b/test/tests_on_odes/blackboxoptim_test.jl index 88cce93..798cb4a 100644 --- a/test/tests_on_odes/blackboxoptim_test.jl +++ b/test/tests_on_odes/blackboxoptim_test.jl @@ -2,19 +2,19 @@ using BlackBoxOptim println("Use BlackBoxOptim to fit the parameter") cost_function = build_loss_objective(prob1, Tsit5(), L2Loss(t, data), - maxiters = 1000, verbose = false) + maxiters = 10000, verbose = false) bound1 = Tuple{Float64, Float64}[(1, 2)] result = bboptimize(cost_function; SearchRange = bound1, MaxSteps = 100) @test result.archive_output.best_candidate[1]≈1.5 atol=5e-1 cost_function = build_loss_objective(prob2, Tsit5(), L2Loss(t, data), - maxiters = 1000, verbose = false) + maxiters = 10000, verbose = false) bound2 = Tuple{Float64, Float64}[(1, 2), (2, 4)] result = bboptimize(cost_function; SearchRange = bound2, MaxSteps = 100) @test result.archive_output.best_candidate≈[1.5; 3.0] atol=5e-1 cost_function = build_loss_objective(prob3, Tsit5(), L2Loss(t, data), - maxiters = 1000, verbose = false) + maxiters = 10000, verbose = false) bound3 = Tuple{Float64, Float64}[(1, 2), (0, 2), (2, 4), (0, 2)] result = bboptimize(cost_function; SearchRange = bound3, MaxSteps = 100) @test result.archive_output.best_candidate≈[1.5; 1.0; 3.0; 1.0] atol=8e-1 From aafa3fcfc5bdff46df4fc67984b5a2a62453600b Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Sun, 20 Jul 2025 11:14:43 -0400 Subject: [PATCH 06/11] A few test fixes --- test/tests_on_odes/blackboxoptim_test.jl | 6 +++--- test/tests_on_odes/l2_colloc_grad_test.jl | 8 ++++---- test/tests_on_odes/regularization_test.jl | 10 +++++----- test/tests_on_odes/weighted_loss_test.jl | 4 +++- 4 files changed, 15 insertions(+), 13 deletions(-) diff --git a/test/tests_on_odes/blackboxoptim_test.jl b/test/tests_on_odes/blackboxoptim_test.jl index 798cb4a..f088e47 100644 --- a/test/tests_on_odes/blackboxoptim_test.jl +++ b/test/tests_on_odes/blackboxoptim_test.jl @@ -4,17 +4,17 @@ println("Use BlackBoxOptim to fit the parameter") cost_function = build_loss_objective(prob1, Tsit5(), L2Loss(t, data), maxiters = 10000, verbose = false) bound1 = Tuple{Float64, Float64}[(1, 2)] -result = bboptimize(cost_function; SearchRange = bound1, MaxSteps = 100) +result = bboptimize(cost_function; SearchRange = bound1, MaxSteps = 1000) @test result.archive_output.best_candidate[1]≈1.5 atol=5e-1 cost_function = build_loss_objective(prob2, Tsit5(), L2Loss(t, data), maxiters = 10000, verbose = false) bound2 = Tuple{Float64, Float64}[(1, 2), (2, 4)] -result = bboptimize(cost_function; SearchRange = bound2, MaxSteps = 100) +result = bboptimize(cost_function; SearchRange = bound2, MaxSteps = 1000) @test result.archive_output.best_candidate≈[1.5; 3.0] atol=5e-1 cost_function = build_loss_objective(prob3, Tsit5(), L2Loss(t, data), maxiters = 10000, verbose = false) bound3 = Tuple{Float64, Float64}[(1, 2), (0, 2), (2, 4), (0, 2)] -result = bboptimize(cost_function; SearchRange = bound3, MaxSteps = 100) +result = bboptimize(cost_function; SearchRange = bound3, MaxSteps = 2000) @test result.archive_output.best_candidate≈[1.5; 1.0; 3.0; 1.0] atol=8e-1 diff --git a/test/tests_on_odes/l2_colloc_grad_test.jl b/test/tests_on_odes/l2_colloc_grad_test.jl index 10c8cb7..f2cb84b 100644 --- a/test/tests_on_odes/l2_colloc_grad_test.jl +++ b/test/tests_on_odes/l2_colloc_grad_test.jl @@ -4,7 +4,7 @@ cost_function = build_loss_objective(prob1, Tsit5(), L2Loss(t, data, colloc_grad = colloc_grad(t, data)), maxiters = 10000, verbose = false) result = Optim.optimize(cost_function, 1.0, 2.0) -@test result.minimizer≈1.5 atol=3e-1 +@test result.minimizer≈1.5 atol=5e-1 cost_function = build_loss_objective(prob2, Tsit5(), L2Loss(t, data, @@ -12,7 +12,7 @@ cost_function = build_loss_objective(prob2, Tsit5(), colloc_grad = colloc_grad(t, data)), maxiters = 10000, verbose = false) result = Optim.optimize(cost_function, [1.3, 2.8], Optim.BFGS()) -@test result.minimizer≈[1.5; 3.0] atol=3e-1 +@test result.minimizer≈[1.5; 3.0] atol=5e-1 cost_function = build_loss_objective(prob3, Tsit5(), L2Loss(t, data, @@ -20,7 +20,7 @@ cost_function = build_loss_objective(prob3, Tsit5(), colloc_grad = colloc_grad(t, data)), maxiters = 10000, verbose = false) result = Optim.optimize(cost_function, [1.4, 0.9, 2.9, 1.2], Optim.BFGS()) -@test result.minimizer≈[1.5, 1.0, 3.0, 1.0] atol=3e-1 +@test result.minimizer≈[1.5, 1.0, 3.0, 1.0] atol=5e-1 cost_function = build_loss_objective(prob1, Tsit5(), L2Loss(t, data, @@ -28,4 +28,4 @@ cost_function = build_loss_objective(prob1, Tsit5(), colloc_grad = colloc_grad(t, data)), maxiters = 10000, verbose = false) result = Optim.optimize(cost_function, 1.0, 2) -@test result.minimizer≈1.5 atol=3e-1 +@test result.minimizer≈1.5 atol=5e-1 diff --git a/test/tests_on_odes/regularization_test.jl b/test/tests_on_odes/regularization_test.jl index 7edefd1..cb7f46b 100644 --- a/test/tests_on_odes/regularization_test.jl +++ b/test/tests_on_odes/regularization_test.jl @@ -20,14 +20,14 @@ cost_function_3 = build_loss_objective(prob3, Tsit5(), L2Loss(t, data), maxiters = 1000) println("Use Optim BFGS to fit the parameter") -optprob = Optimization.OptimizationProblem(cost_function_1, [1.0]) +optprob = Optimization.OptimizationProblem(cost_function_1, [1.0], lb=[0.5], ub=[2.5]) result = solve(optprob, Optim.BFGS()) -@test result.u[1]≈1.5 atol=3e-1 +@test result.u[1]≈1.5 atol=5e-1 -optprob = Optimization.OptimizationProblem(cost_function_2, [1.2, 2.7]) +optprob = Optimization.OptimizationProblem(cost_function_2, [1.2, 2.7], lb=[0.5, 1.0], ub=[2.5, 5.0]) result = solve(optprob, Optim.BFGS()) -@test result.minimizer≈[1.5; 3.0] atol=3e-1 +@test result.minimizer≈[1.5; 3.0] atol=5e-1 -optprob = Optimization.OptimizationProblem(cost_function_3, [1.3, 0.8, 2.8, 1.2]) +optprob = Optimization.OptimizationProblem(cost_function_3, [1.3, 0.8, 2.8, 1.2], lb=[0.5, 0.1, 1.0, 0.1], ub=[2.5, 2.0, 5.0, 2.0]) result = solve(optprob, Optim.BFGS()) @test result.minimizer≈[1.5; 1.0; 3.0; 1.0] atol=5e-1 diff --git a/test/tests_on_odes/weighted_loss_test.jl b/test/tests_on_odes/weighted_loss_test.jl index 3acb60a..b98ea7f 100644 --- a/test/tests_on_odes/weighted_loss_test.jl +++ b/test/tests_on_odes/weighted_loss_test.jl @@ -24,6 +24,8 @@ weighted_cost_function = build_loss_objective(prob1, Tsit5(), data_weight = weight), maxiters = 10000, verbose = false) opt = Opt(:LN_COBYLA, 1) +lower_bounds!(opt, [0.5]) +upper_bounds!(opt, [2.5]) min_objective!(opt, weighted_cost_function) (minf, minx, ret) = NLopt.optimize(opt, [1.3]) -@test minx[1]≈1.5 atol=1e-2 +@test minx[1]≈1.5 atol=5e-1 From b88aeaa2e929a6aa0d793f89c597a48bf6b67f65 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Sun, 20 Jul 2025 15:36:41 -0400 Subject: [PATCH 07/11] bump up steps --- test/tests_on_odes/blackboxoptim_test.jl | 12 ++++++------ test/tests_on_odes/l2_colloc_grad_test.jl | 8 ++++---- test/tests_on_odes/regularization_test.jl | 10 +++++----- test/tests_on_odes/weighted_loss_test.jl | 2 +- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/test/tests_on_odes/blackboxoptim_test.jl b/test/tests_on_odes/blackboxoptim_test.jl index f088e47..7752730 100644 --- a/test/tests_on_odes/blackboxoptim_test.jl +++ b/test/tests_on_odes/blackboxoptim_test.jl @@ -4,17 +4,17 @@ println("Use BlackBoxOptim to fit the parameter") cost_function = build_loss_objective(prob1, Tsit5(), L2Loss(t, data), maxiters = 10000, verbose = false) bound1 = Tuple{Float64, Float64}[(1, 2)] -result = bboptimize(cost_function; SearchRange = bound1, MaxSteps = 1000) -@test result.archive_output.best_candidate[1]≈1.5 atol=5e-1 +result = bboptimize(cost_function; SearchRange = bound1, MaxSteps = 11e3) +@test result.archive_output.best_candidate[1]≈1.5 atol=3e-1 cost_function = build_loss_objective(prob2, Tsit5(), L2Loss(t, data), maxiters = 10000, verbose = false) bound2 = Tuple{Float64, Float64}[(1, 2), (2, 4)] -result = bboptimize(cost_function; SearchRange = bound2, MaxSteps = 1000) -@test result.archive_output.best_candidate≈[1.5; 3.0] atol=5e-1 +result = bboptimize(cost_function; SearchRange = bound2, MaxSteps = 11e3) +@test result.archive_output.best_candidate≈[1.5; 3.0] atol=3e-1 cost_function = build_loss_objective(prob3, Tsit5(), L2Loss(t, data), maxiters = 10000, verbose = false) bound3 = Tuple{Float64, Float64}[(1, 2), (0, 2), (2, 4), (0, 2)] -result = bboptimize(cost_function; SearchRange = bound3, MaxSteps = 2000) -@test result.archive_output.best_candidate≈[1.5; 1.0; 3.0; 1.0] atol=8e-1 +result = bboptimize(cost_function; SearchRange = bound3, MaxSteps = 11e3) +@test result.archive_output.best_candidate≈[1.5; 1.0; 3.0; 1.0] atol=5e-1 diff --git a/test/tests_on_odes/l2_colloc_grad_test.jl b/test/tests_on_odes/l2_colloc_grad_test.jl index f2cb84b..10c8cb7 100644 --- a/test/tests_on_odes/l2_colloc_grad_test.jl +++ b/test/tests_on_odes/l2_colloc_grad_test.jl @@ -4,7 +4,7 @@ cost_function = build_loss_objective(prob1, Tsit5(), L2Loss(t, data, colloc_grad = colloc_grad(t, data)), maxiters = 10000, verbose = false) result = Optim.optimize(cost_function, 1.0, 2.0) -@test result.minimizer≈1.5 atol=5e-1 +@test result.minimizer≈1.5 atol=3e-1 cost_function = build_loss_objective(prob2, Tsit5(), L2Loss(t, data, @@ -12,7 +12,7 @@ cost_function = build_loss_objective(prob2, Tsit5(), colloc_grad = colloc_grad(t, data)), maxiters = 10000, verbose = false) result = Optim.optimize(cost_function, [1.3, 2.8], Optim.BFGS()) -@test result.minimizer≈[1.5; 3.0] atol=5e-1 +@test result.minimizer≈[1.5; 3.0] atol=3e-1 cost_function = build_loss_objective(prob3, Tsit5(), L2Loss(t, data, @@ -20,7 +20,7 @@ cost_function = build_loss_objective(prob3, Tsit5(), colloc_grad = colloc_grad(t, data)), maxiters = 10000, verbose = false) result = Optim.optimize(cost_function, [1.4, 0.9, 2.9, 1.2], Optim.BFGS()) -@test result.minimizer≈[1.5, 1.0, 3.0, 1.0] atol=5e-1 +@test result.minimizer≈[1.5, 1.0, 3.0, 1.0] atol=3e-1 cost_function = build_loss_objective(prob1, Tsit5(), L2Loss(t, data, @@ -28,4 +28,4 @@ cost_function = build_loss_objective(prob1, Tsit5(), colloc_grad = colloc_grad(t, data)), maxiters = 10000, verbose = false) result = Optim.optimize(cost_function, 1.0, 2) -@test result.minimizer≈1.5 atol=5e-1 +@test result.minimizer≈1.5 atol=3e-1 diff --git a/test/tests_on_odes/regularization_test.jl b/test/tests_on_odes/regularization_test.jl index cb7f46b..e7b8a45 100644 --- a/test/tests_on_odes/regularization_test.jl +++ b/test/tests_on_odes/regularization_test.jl @@ -2,7 +2,7 @@ using PenaltyFunctions, OptimizationOptimJL, LinearAlgebra, SciMLSensitivity cost_function_1 = build_loss_objective(prob1, Tsit5(), L2Loss(t, data), Optimization.AutoZygote(), - Regularization(0.6, L2Penalty()), maxiters = 1000, + Regularization(0.6, L2Penalty()), maxiters = 10000, verbose = false, abstol = 1e-8, reltol = 1e-8) cost_function_2 = build_loss_objective(prob2, Tsit5(), L2Loss(t, data), Optimization.AutoZygote(), @@ -10,23 +10,23 @@ cost_function_2 = build_loss_objective(prob2, Tsit5(), L2Loss(t, data), MahalanobisPenalty(Matrix(1.0I, 2, 2))), verbose = false, abstol = 1e-8, reltol = 1e-8, - maxiters = 1000) + maxiters = 10000) cost_function_3 = build_loss_objective(prob3, Tsit5(), L2Loss(t, data), Optimization.AutoZygote(), Regularization(0.1, MahalanobisPenalty(Matrix(1.0I, 4, 4))), verbose = false, abstol = 1e-8, reltol = 1e-8, - maxiters = 1000) + maxiters = 10000) println("Use Optim BFGS to fit the parameter") optprob = Optimization.OptimizationProblem(cost_function_1, [1.0], lb=[0.5], ub=[2.5]) result = solve(optprob, Optim.BFGS()) -@test result.u[1]≈1.5 atol=5e-1 +@test result.u[1]≈1.5 atol=3e-1 optprob = Optimization.OptimizationProblem(cost_function_2, [1.2, 2.7], lb=[0.5, 1.0], ub=[2.5, 5.0]) result = solve(optprob, Optim.BFGS()) -@test result.minimizer≈[1.5; 3.0] atol=5e-1 +@test result.minimizer≈[1.5; 3.0] atol=3e-1 optprob = Optimization.OptimizationProblem(cost_function_3, [1.3, 0.8, 2.8, 1.2], lb=[0.5, 0.1, 1.0, 0.1], ub=[2.5, 2.0, 5.0, 2.0]) result = solve(optprob, Optim.BFGS()) diff --git a/test/tests_on_odes/weighted_loss_test.jl b/test/tests_on_odes/weighted_loss_test.jl index b98ea7f..a5ab8b8 100644 --- a/test/tests_on_odes/weighted_loss_test.jl +++ b/test/tests_on_odes/weighted_loss_test.jl @@ -28,4 +28,4 @@ lower_bounds!(opt, [0.5]) upper_bounds!(opt, [2.5]) min_objective!(opt, weighted_cost_function) (minf, minx, ret) = NLopt.optimize(opt, [1.3]) -@test minx[1]≈1.5 atol=5e-1 +@test minx[1]≈1.5 atol=1e-2 From fd24accc74ff8f63c464a4e83d2b15eac698c44c Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Sun, 20 Jul 2025 19:03:30 -0400 Subject: [PATCH 08/11] simplify test --- test/tests_on_odes/regularization_test.jl | 6 +++--- test/tests_on_odes/weighted_loss_test.jl | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/test/tests_on_odes/regularization_test.jl b/test/tests_on_odes/regularization_test.jl index e7b8a45..c6bc878 100644 --- a/test/tests_on_odes/regularization_test.jl +++ b/test/tests_on_odes/regularization_test.jl @@ -20,14 +20,14 @@ cost_function_3 = build_loss_objective(prob3, Tsit5(), L2Loss(t, data), maxiters = 10000) println("Use Optim BFGS to fit the parameter") -optprob = Optimization.OptimizationProblem(cost_function_1, [1.0], lb=[0.5], ub=[2.5]) +optprob = Optimization.OptimizationProblem(cost_function_1, [1.0], lb=[1.0], ub=[2.0]) result = solve(optprob, Optim.BFGS()) @test result.u[1]≈1.5 atol=3e-1 -optprob = Optimization.OptimizationProblem(cost_function_2, [1.2, 2.7], lb=[0.5, 1.0], ub=[2.5, 5.0]) +optprob = Optimization.OptimizationProblem(cost_function_2, [1.2, 2.7], lb=[1.0, 2.0], ub=[2.0, 4.0]) result = solve(optprob, Optim.BFGS()) @test result.minimizer≈[1.5; 3.0] atol=3e-1 -optprob = Optimization.OptimizationProblem(cost_function_3, [1.3, 0.8, 2.8, 1.2], lb=[0.5, 0.1, 1.0, 0.1], ub=[2.5, 2.0, 5.0, 2.0]) +optprob = Optimization.OptimizationProblem(cost_function_3, [1.3, 0.8, 2.8, 1.2], lb=[1.0, 0.5, 2.0, 0.5], ub=[2.0, 1.5, 4.0, 1.5]) result = solve(optprob, Optim.BFGS()) @test result.minimizer≈[1.5; 1.0; 3.0; 1.0] atol=5e-1 diff --git a/test/tests_on_odes/weighted_loss_test.jl b/test/tests_on_odes/weighted_loss_test.jl index a5ab8b8..8bc3bc2 100644 --- a/test/tests_on_odes/weighted_loss_test.jl +++ b/test/tests_on_odes/weighted_loss_test.jl @@ -24,8 +24,8 @@ weighted_cost_function = build_loss_objective(prob1, Tsit5(), data_weight = weight), maxiters = 10000, verbose = false) opt = Opt(:LN_COBYLA, 1) -lower_bounds!(opt, [0.5]) -upper_bounds!(opt, [2.5]) +lower_bounds!(opt, [1.0]) +upper_bounds!(opt, [2.0]) min_objective!(opt, weighted_cost_function) (minf, minx, ret) = NLopt.optimize(opt, [1.3]) @test minx[1]≈1.5 atol=1e-2 From 36b68ca427c302daeb06ae27c56033f37b0ff5d2 Mon Sep 17 00:00:00 2001 From: ChrisRackauckas Date: Wed, 23 Jul 2025 00:46:29 -0400 Subject: [PATCH 09/11] Increase iterations for failing tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Fix nlopt_test.jl: Change maxeval from 100 to 10000 (was 100 - 000) - Fix multiple_shooting_objective_test.jl: Increase BFGS maxiters from 500 to 10000 These low iteration counts were likely causing test failures due to insufficient convergence. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude --- test/multiple_shooting_objective_test.jl | 2 +- test/tests_on_odes/nlopt_test.jl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/multiple_shooting_objective_test.jl b/test/multiple_shooting_objective_test.jl index 5155a57..7669564 100644 --- a/test/multiple_shooting_objective_test.jl +++ b/test/multiple_shooting_objective_test.jl @@ -28,5 +28,5 @@ ms_obj1 = multiple_shooting_objective(ms_prob, Tsit5(), L2Loss(t, data), reltol = 1e-6) optprob = Optimization.OptimizationProblem(ms_obj1, zeros(18), lb = first.(bound), ub = last.(bound)) -result = solve(optprob, BFGS(), maxiters = 500) +result = solve(optprob, BFGS(), maxiters = 10000) @test result.u[(end - 1):end]≈[1.5, 1.0] atol=2e-1 diff --git a/test/tests_on_odes/nlopt_test.jl b/test/tests_on_odes/nlopt_test.jl index afd7076..57083ca 100644 --- a/test/tests_on_odes/nlopt_test.jl +++ b/test/tests_on_odes/nlopt_test.jl @@ -22,7 +22,7 @@ opt = Opt(:GN_ISRES, 1) lower_bounds!(opt, [1.0]) upper_bounds!(opt, [3.0]) xtol_rel!(opt, 1e-4) -maxeval!(opt, 100 - 000) +maxeval!(opt, 10000) res = solve(optprob, opt) @test res.u[1]≈1.5 atol=1e-1 From e986325c1b371f36eba8c21d18ddba785322c74b Mon Sep 17 00:00:00 2001 From: ChrisRackauckas Date: Wed, 23 Jul 2025 01:44:19 -0400 Subject: [PATCH 10/11] Further increase iterations for failing tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - blackboxoptim_test.jl: Increase MaxSteps from 11e3 to 50e3 for 4-parameter optimization - weighted_loss_test.jl: Add maxeval\!(opt, 10000) for NLopt optimization These tests were still failing due to insufficient iterations for convergence. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude --- test/tests_on_odes/blackboxoptim_test.jl | 2 +- test/tests_on_odes/weighted_loss_test.jl | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/test/tests_on_odes/blackboxoptim_test.jl b/test/tests_on_odes/blackboxoptim_test.jl index 7752730..051f552 100644 --- a/test/tests_on_odes/blackboxoptim_test.jl +++ b/test/tests_on_odes/blackboxoptim_test.jl @@ -16,5 +16,5 @@ result = bboptimize(cost_function; SearchRange = bound2, MaxSteps = 11e3) cost_function = build_loss_objective(prob3, Tsit5(), L2Loss(t, data), maxiters = 10000, verbose = false) bound3 = Tuple{Float64, Float64}[(1, 2), (0, 2), (2, 4), (0, 2)] -result = bboptimize(cost_function; SearchRange = bound3, MaxSteps = 11e3) +result = bboptimize(cost_function; SearchRange = bound3, MaxSteps = 50e3) @test result.archive_output.best_candidate≈[1.5; 1.0; 3.0; 1.0] atol=5e-1 diff --git a/test/tests_on_odes/weighted_loss_test.jl b/test/tests_on_odes/weighted_loss_test.jl index 8bc3bc2..67c1411 100644 --- a/test/tests_on_odes/weighted_loss_test.jl +++ b/test/tests_on_odes/weighted_loss_test.jl @@ -26,6 +26,7 @@ weighted_cost_function = build_loss_objective(prob1, Tsit5(), opt = Opt(:LN_COBYLA, 1) lower_bounds!(opt, [1.0]) upper_bounds!(opt, [2.0]) +maxeval!(opt, 10000) min_objective!(opt, weighted_cost_function) (minf, minx, ret) = NLopt.optimize(opt, [1.3]) @test minx[1]≈1.5 atol=1e-2 From f417796c5e68ffa0c63b1fe631e22480f9d97fa0 Mon Sep 17 00:00:00 2001 From: ChrisRackauckas Date: Sun, 27 Jul 2025 09:25:49 -0400 Subject: [PATCH 11/11] Fix test data generation for multi-parameter problems MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Generate separate test data (data2, data3) for prob2 and prob3 - Update all tests to use correct data for each problem - Add random seed for reproducible test data - Increase BBO iterations for better convergence This fixes the issue where all parameter estimation tests were using data generated from prob1, causing incorrect parameter estimation for prob2 and prob3. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude --- test/tests_on_odes/blackboxoptim_test.jl | 10 +++++----- test/tests_on_odes/genetic_algorithm_test.jl | 4 ++-- test/tests_on_odes/l2loss_test.jl | 4 ++-- test/tests_on_odes/optim_test.jl | 4 ++-- test/tests_on_odes/regularization_test.jl | 4 ++-- test/tests_on_odes/test_problems.jl | 21 ++++++++++++++++---- 6 files changed, 30 insertions(+), 17 deletions(-) diff --git a/test/tests_on_odes/blackboxoptim_test.jl b/test/tests_on_odes/blackboxoptim_test.jl index 051f552..87e9d88 100644 --- a/test/tests_on_odes/blackboxoptim_test.jl +++ b/test/tests_on_odes/blackboxoptim_test.jl @@ -4,17 +4,17 @@ println("Use BlackBoxOptim to fit the parameter") cost_function = build_loss_objective(prob1, Tsit5(), L2Loss(t, data), maxiters = 10000, verbose = false) bound1 = Tuple{Float64, Float64}[(1, 2)] -result = bboptimize(cost_function; SearchRange = bound1, MaxSteps = 11e3) +result = bboptimize(cost_function; SearchRange = bound1, MaxSteps = 20e3) @test result.archive_output.best_candidate[1]≈1.5 atol=3e-1 -cost_function = build_loss_objective(prob2, Tsit5(), L2Loss(t, data), +cost_function = build_loss_objective(prob2, Tsit5(), L2Loss(t, data2), maxiters = 10000, verbose = false) bound2 = Tuple{Float64, Float64}[(1, 2), (2, 4)] -result = bboptimize(cost_function; SearchRange = bound2, MaxSteps = 11e3) +result = bboptimize(cost_function; SearchRange = bound2, MaxSteps = 20e3) @test result.archive_output.best_candidate≈[1.5; 3.0] atol=3e-1 -cost_function = build_loss_objective(prob3, Tsit5(), L2Loss(t, data), +cost_function = build_loss_objective(prob3, Tsit5(), L2Loss(t, data3), maxiters = 10000, verbose = false) bound3 = Tuple{Float64, Float64}[(1, 2), (0, 2), (2, 4), (0, 2)] -result = bboptimize(cost_function; SearchRange = bound3, MaxSteps = 50e3) +result = bboptimize(cost_function; SearchRange = bound3, MaxSteps = 100e3) @test result.archive_output.best_candidate≈[1.5; 1.0; 3.0; 1.0] atol=5e-1 diff --git a/test/tests_on_odes/genetic_algorithm_test.jl b/test/tests_on_odes/genetic_algorithm_test.jl index ddfc47e..9a6a1c4 100644 --- a/test/tests_on_odes/genetic_algorithm_test.jl +++ b/test/tests_on_odes/genetic_algorithm_test.jl @@ -25,7 +25,7 @@ result, fitness, cnt = ga(cost_function, N; mutation = domainrange(fill(0.5, N))) @test result[1]≈1.5 atol=3e-1 -cost_function = build_loss_objective(prob2, Tsit5(), L2Loss(t, data), +cost_function = build_loss_objective(prob2, Tsit5(), L2Loss(t, data2), maxiters = 10000) N = 2 result, fitness, cnt = ga(cost_function, N; @@ -37,7 +37,7 @@ result, fitness, cnt = ga(cost_function, N; mutation = domainrange(fill(0.5, N))) @test result≈[1.5; 3.0] atol=3e-1 -cost_function = build_loss_objective(prob3, Tsit5(), L2Loss(t, data), +cost_function = build_loss_objective(prob3, Tsit5(), L2Loss(t, data3), maxiters = 10000) N = 4 result, fitness, cnt = ga(cost_function, N; diff --git a/test/tests_on_odes/l2loss_test.jl b/test/tests_on_odes/l2loss_test.jl index 65bcb52..b82ff31 100644 --- a/test/tests_on_odes/l2loss_test.jl +++ b/test/tests_on_odes/l2loss_test.jl @@ -7,14 +7,14 @@ result = bboptimize(cost_function; SearchRange = bound1, MaxSteps = 11e3) @test result.archive_output.best_candidate[1]≈1.5 atol=3e-1 cost_function = build_loss_objective(prob2, Tsit5(), - L2Loss(t, data, differ_weight = nothing, + L2Loss(t, data2, differ_weight = nothing, data_weight = 1.0), maxiters = 10000, verbose = false) bound2 = Tuple{Float64, Float64}[(1, 2), (1, 4)] result = bboptimize(cost_function; SearchRange = bound2, MaxSteps = 11e3) @test result.archive_output.best_candidate≈[1.5; 3.0] atol=3e-1 -cost_function = build_loss_objective(prob3, Tsit5(), L2Loss(t, data, differ_weight = 10), +cost_function = build_loss_objective(prob3, Tsit5(), L2Loss(t, data3, differ_weight = 10), maxiters = 10000, verbose = false) bound3 = Tuple{Float64, Float64}[(1, 2), (0, 2), (2, 4), (0, 2)] result = bboptimize(cost_function; SearchRange = bound3, MaxSteps = 11e3) diff --git a/test/tests_on_odes/optim_test.jl b/test/tests_on_odes/optim_test.jl index b57141a..7fffda2 100644 --- a/test/tests_on_odes/optim_test.jl +++ b/test/tests_on_odes/optim_test.jl @@ -14,13 +14,13 @@ result = Optim.optimize(obj, [1.0], Optim.BFGS()) #sol_optimized2 = solve(prob) #plot!(sol_optimized2,leg=false) -cost_function2 = build_loss_objective(prob2, Tsit5(), L2Loss(t, data), +cost_function2 = build_loss_objective(prob2, Tsit5(), L2Loss(t, data2), maxiters = 10000, verbose = false) result_bfgs = Optim.optimize(cost_function2, [1.0, 2.5], Optim.BFGS()) @test_broken result_bfgs.minimizer≈[1.5; 3.0] atol=3e-1 Random.seed!(200) -cost_function3 = build_loss_objective(prob3, Tsit5(), L2Loss(t, data), +cost_function3 = build_loss_objective(prob3, Tsit5(), L2Loss(t, data3), maxiters = 10000, verbose = false) result_bfgs = Optim.optimize(cost_function3, [1.3, 0.8, 2.8, 1.2], Optim.BFGS()) @test result_bfgs.minimizer≈[1.5; 1.0; 3.0; 1.0] atol=5e-1 diff --git a/test/tests_on_odes/regularization_test.jl b/test/tests_on_odes/regularization_test.jl index c6bc878..4768850 100644 --- a/test/tests_on_odes/regularization_test.jl +++ b/test/tests_on_odes/regularization_test.jl @@ -4,14 +4,14 @@ cost_function_1 = build_loss_objective(prob1, Tsit5(), L2Loss(t, data), Optimization.AutoZygote(), Regularization(0.6, L2Penalty()), maxiters = 10000, verbose = false, abstol = 1e-8, reltol = 1e-8) -cost_function_2 = build_loss_objective(prob2, Tsit5(), L2Loss(t, data), +cost_function_2 = build_loss_objective(prob2, Tsit5(), L2Loss(t, data2), Optimization.AutoZygote(), Regularization(0.1, MahalanobisPenalty(Matrix(1.0I, 2, 2))), verbose = false, abstol = 1e-8, reltol = 1e-8, maxiters = 10000) -cost_function_3 = build_loss_objective(prob3, Tsit5(), L2Loss(t, data), +cost_function_3 = build_loss_objective(prob3, Tsit5(), L2Loss(t, data3), Optimization.AutoZygote(), Regularization(0.1, MahalanobisPenalty(Matrix(1.0I, 4, 4))), diff --git a/test/tests_on_odes/test_problems.jl b/test/tests_on_odes/test_problems.jl index a63e055..d547559 100644 --- a/test/tests_on_odes/test_problems.jl +++ b/test/tests_on_odes/test_problems.jl @@ -1,4 +1,7 @@ -using OrdinaryDiffEq, RecursiveArrayTools +using OrdinaryDiffEq, RecursiveArrayTools, Random + +# Set seed for reproducible test data +Random.seed!(100) # Here are the problems to solve @@ -35,7 +38,17 @@ p = [1.5, 1.0, 3.0, 1.0] prob3 = ODEProblem(f3!, u0, tspan, p) # Generate random data based off of the known solution -sol = solve(prob1, Tsit5()) +sol1 = solve(prob1, Tsit5()) t = collect(range(0, stop = 10, length = 200)) -randomized = VectorOfArray([(sol(t[i]) + 0.01randn(2)) for i in 1:length(t)]) -data = convert(Array, randomized) +randomized1 = VectorOfArray([(sol1(t[i]) + 0.01randn(2)) for i in 1:length(t)]) +data = convert(Array, randomized1) + +# Generate data for prob2 +sol2 = solve(prob2, Tsit5()) +randomized2 = VectorOfArray([(sol2(t[i]) + 0.01randn(2)) for i in 1:length(t)]) +data2 = convert(Array, randomized2) + +# Generate data for prob3 +sol3 = solve(prob3, Tsit5()) +randomized3 = VectorOfArray([(sol3(t[i]) + 0.01randn(2)) for i in 1:length(t)]) +data3 = convert(Array, randomized3)