diff --git a/test/likelihood.jl b/test/likelihood.jl index f6a3e8d..242aea1 100644 --- a/test/likelihood.jl +++ b/test/likelihood.jl @@ -19,12 +19,12 @@ end aggregate_data = convert(Array, VectorOfArray([generate_data(sol, t) for i in 1:100])) distributions = [fit_mle(Normal, aggregate_data[i, j, :]) for i in 1:2, j in 1:200] -obj = build_loss_objective(prob1, Tsit5(), LogLikeLoss(t, distributions), maxiters = 10000, +obj = build_loss_objective(prob1, Tsit5(), LogLikeLoss(t, distributions), maxiters = 1000, verbose = false) optprob = Optimization.OptimizationProblem(obj, [2.0, 2.0], lb = [0.5, 0.5], ub = [5.0, 5.0]) -result = solve(optprob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters = 11e3) +result = solve(optprob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters = 1e3) @test result.original.archive_output.best_candidate≈[1.5, 1.0] atol=1e-1 data_distributions = [fit_mle(Normal, aggregate_data[i, j, :]) for i in 1:2, j in 1:200] @@ -33,11 +33,11 @@ diff_distributions = [fit_mle(Normal, for j in 2:200, i in 1:2] obj = build_loss_objective(prob1, Tsit5(), LogLikeLoss(t, data_distributions, diff_distributions), - Optimization.AutoForwardDiff(), maxiters = 10000, + Optimization.AutoForwardDiff(), maxiters = 1000, verbose = false) optprob = Optimization.OptimizationProblem(obj, [2.0, 2.0], lb = [0.5, 0.5], ub = [5.0, 5.0]) -result = solve(optprob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters = 11e3) +result = solve(optprob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters = 1e3) @test result.original.archive_output.best_candidate≈[1.5, 1.0] atol=1e-1 data_distributions = [fit_mle(Normal, aggregate_data[i, j, :]) for i in 1:2, j in 1:200] @@ -46,14 +46,14 @@ diff_distributions = [fit_mle(Normal, for j in 2:200, i in 1:2] obj = build_loss_objective(prob1, Tsit5(), LogLikeLoss(t, data_distributions, diff_distributions, 0.3), - Optimization.AutoForwardDiff(), maxiters = 10000, + Optimization.AutoForwardDiff(), maxiters = 1000, verbose = false) optprob = Optimization.OptimizationProblem(obj, [2.0, 2.0], lb = [0.5, 0.5], ub = [5.0, 5.0]) -result = solve(optprob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters = 11e3) +result = solve(optprob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters = 1e3) @test result.u≈[1.5, 1.0] atol=1e-1 using OptimizationBBO.BlackBoxOptim -result = bboptimize(obj, SearchRange = [(0.5, 5.0), (0.5, 5.0)], MaxSteps = 11e3) +result = bboptimize(obj, SearchRange = [(0.5, 5.0), (0.5, 5.0)], MaxSteps = 1e3) @test result.archive_output.best_candidate≈[1.5, 1.0] atol=1e-1 distributions = [fit_mle(MvNormal, aggregate_data[:, j, :]) for j in 1:200] @@ -63,9 +63,9 @@ diff_distributions = [fit_mle(MvNormal, priors = [Truncated(Normal(1.5, 0.1), 0, 2), Truncated(Normal(1.0, 0.1), 0, 1.5)] obj = build_loss_objective(prob1, Tsit5(), LogLikeLoss(t, distributions, diff_distributions), - Optimization.AutoForwardDiff(), maxiters = 10000, + Optimization.AutoForwardDiff(), maxiters = 1000, verbose = false, priors = priors) optprob = Optimization.OptimizationProblem(obj, [2.0, 2.0], lb = [0.5, 0.5], ub = [5.0, 5.0]) -result = solve(optprob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters = 11e3) +result = solve(optprob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters = 1e3) @test result.u≈[1.5, 1.0] atol=1e-1 diff --git a/test/multiple_shooting_objective_test.jl b/test/multiple_shooting_objective_test.jl index 5155a57..d206c15 100644 --- a/test/multiple_shooting_objective_test.jl +++ b/test/multiple_shooting_objective_test.jl @@ -18,7 +18,7 @@ ms_obj = multiple_shooting_objective(ms_prob, Tsit5(), L2Loss(t, data), Optimization.AutoZygote(); discontinuity_weight = 1.0, abstol = 1e-12, reltol = 1e-12) -result = bboptimize(ms_obj; SearchRange = bound, MaxSteps = 21e3) +result = bboptimize(ms_obj; SearchRange = bound, MaxSteps = 1e3) @test result.archive_output.best_candidate[(end - 1):end]≈[1.5, 1.0] atol=2e-1 priors = [Truncated(Normal(1.5, 0.5), 0, 2), Truncated(Normal(1.0, 0.5), 0, 1.5)] diff --git a/test/tests_on_odes/blackboxoptim_test.jl b/test/tests_on_odes/blackboxoptim_test.jl index f768315..01f881f 100644 --- a/test/tests_on_odes/blackboxoptim_test.jl +++ b/test/tests_on_odes/blackboxoptim_test.jl @@ -2,19 +2,19 @@ using BlackBoxOptim println("Use BlackBoxOptim to fit the parameter") cost_function = build_loss_objective(prob1, Tsit5(), L2Loss(t, data), - maxiters = 10000, verbose = false) + maxiters = 1000, verbose = false) bound1 = Tuple{Float64, Float64}[(1, 2)] -result = bboptimize(cost_function; SearchRange = bound1, MaxSteps = 5e3) +result = bboptimize(cost_function; SearchRange = bound1, MaxSteps = 1e3) @test result.archive_output.best_candidate[1]≈1.5 atol=3e-1 cost_function = build_loss_objective(prob2, Tsit5(), L2Loss(t, data), - maxiters = 10000, verbose = false) + maxiters = 1000, verbose = false) bound2 = Tuple{Float64, Float64}[(1, 2), (2, 4)] -result = bboptimize(cost_function; SearchRange = bound2, MaxSteps = 5e3) +result = bboptimize(cost_function; SearchRange = bound2, MaxSteps = 1e3) @test result.archive_output.best_candidate≈[1.5; 3.0] atol=3e-1 cost_function = build_loss_objective(prob3, Tsit5(), L2Loss(t, data), - maxiters = 10000, verbose = false) + maxiters = 1000, verbose = false) bound3 = Tuple{Float64, Float64}[(1, 2), (0, 2), (2, 4), (0, 2)] -result = bboptimize(cost_function; SearchRange = bound3, MaxSteps = 5e3) +result = bboptimize(cost_function; SearchRange = bound3, MaxSteps = 11e3) @test result.archive_output.best_candidate≈[1.5; 1.0; 3.0; 1.0] atol=5e-1 diff --git a/test/tests_on_odes/genetic_algorithm_test.jl b/test/tests_on_odes/genetic_algorithm_test.jl index ddfc47e..c698a89 100644 --- a/test/tests_on_odes/genetic_algorithm_test.jl +++ b/test/tests_on_odes/genetic_algorithm_test.jl @@ -14,7 +14,7 @@ println("Use Genetic Algorithm to fit the parameter") # Floating number specifies fraction of population. cost_function = build_loss_objective(prob1, Tsit5(), L2Loss(t, data), - maxiters = 10000, verbose = false) + maxiters = 1000, verbose = false) N = 1 result, fitness, cnt = ga(cost_function, N; initPopulation = Float64[1.2], @@ -26,7 +26,7 @@ result, fitness, cnt = ga(cost_function, N; @test result[1]≈1.5 atol=3e-1 cost_function = build_loss_objective(prob2, Tsit5(), L2Loss(t, data), - maxiters = 10000) + maxiters = 1000) N = 2 result, fitness, cnt = ga(cost_function, N; initPopulation = Float64[1.2, 2.8], @@ -38,7 +38,7 @@ result, fitness, cnt = ga(cost_function, N; @test result≈[1.5; 3.0] atol=3e-1 cost_function = build_loss_objective(prob3, Tsit5(), L2Loss(t, data), - maxiters = 10000) + maxiters = 1000) N = 4 result, fitness, cnt = ga(cost_function, N; initPopulation = Float64[1.3, 0.8, 2.8, 1.2], diff --git a/test/tests_on_odes/l2_colloc_grad_test.jl b/test/tests_on_odes/l2_colloc_grad_test.jl index 10c8cb7..7c87177 100644 --- a/test/tests_on_odes/l2_colloc_grad_test.jl +++ b/test/tests_on_odes/l2_colloc_grad_test.jl @@ -2,7 +2,7 @@ weight = 1.0e-6 cost_function = build_loss_objective(prob1, Tsit5(), L2Loss(t, data, colloc_grad = colloc_grad(t, data)), - maxiters = 10000, verbose = false) + maxiters = 1000, verbose = false) result = Optim.optimize(cost_function, 1.0, 2.0) @test result.minimizer≈1.5 atol=3e-1 @@ -10,7 +10,7 @@ cost_function = build_loss_objective(prob2, Tsit5(), L2Loss(t, data, differ_weight = weight, data_weight = weight, colloc_grad = colloc_grad(t, data)), - maxiters = 10000, verbose = false) + maxiters = 1000, verbose = false) result = Optim.optimize(cost_function, [1.3, 2.8], Optim.BFGS()) @test result.minimizer≈[1.5; 3.0] atol=3e-1 @@ -18,7 +18,7 @@ cost_function = build_loss_objective(prob3, Tsit5(), L2Loss(t, data, differ_weight = weight, colloc_grad = colloc_grad(t, data)), - maxiters = 10000, verbose = false) + maxiters = 1000, verbose = false) result = Optim.optimize(cost_function, [1.4, 0.9, 2.9, 1.2], Optim.BFGS()) @test result.minimizer≈[1.5, 1.0, 3.0, 1.0] atol=3e-1 @@ -26,6 +26,6 @@ cost_function = build_loss_objective(prob1, Tsit5(), L2Loss(t, data, data_weight = weight, colloc_grad = colloc_grad(t, data)), - maxiters = 10000, verbose = false) + maxiters = 1000, verbose = false) result = Optim.optimize(cost_function, 1.0, 2) @test result.minimizer≈1.5 atol=3e-1 diff --git a/test/tests_on_odes/l2loss_test.jl b/test/tests_on_odes/l2loss_test.jl index 65bcb52..8801217 100644 --- a/test/tests_on_odes/l2loss_test.jl +++ b/test/tests_on_odes/l2loss_test.jl @@ -3,7 +3,7 @@ using BlackBoxOptim, Optim cost_function = build_loss_objective(prob1, Tsit5(), L2Loss(t, data), maxiters = 10000, verbose = false) bound1 = Tuple{Float64, Float64}[(1, 2)] -result = bboptimize(cost_function; SearchRange = bound1, MaxSteps = 11e3) +result = bboptimize(cost_function; SearchRange = bound1, MaxSteps = 1e3) @test result.archive_output.best_candidate[1]≈1.5 atol=3e-1 cost_function = build_loss_objective(prob2, Tsit5(), @@ -11,13 +11,13 @@ cost_function = build_loss_objective(prob2, Tsit5(), data_weight = 1.0), maxiters = 10000, verbose = false) bound2 = Tuple{Float64, Float64}[(1, 2), (1, 4)] -result = bboptimize(cost_function; SearchRange = bound2, MaxSteps = 11e3) +result = bboptimize(cost_function; SearchRange = bound2, MaxSteps = 1e3) @test result.archive_output.best_candidate≈[1.5; 3.0] atol=3e-1 cost_function = build_loss_objective(prob3, Tsit5(), L2Loss(t, data, differ_weight = 10), maxiters = 10000, verbose = false) bound3 = Tuple{Float64, Float64}[(1, 2), (0, 2), (2, 4), (0, 2)] -result = bboptimize(cost_function; SearchRange = bound3, MaxSteps = 11e3) +result = bboptimize(cost_function; SearchRange = bound3, MaxSteps = 1e3) @test result.archive_output.best_candidate≈[1.5; 1.0; 3.0; 1.0] atol=5e-1 cost_function = build_loss_objective(prob3, Tsit5(), @@ -25,5 +25,5 @@ cost_function = build_loss_objective(prob3, Tsit5(), data_weight = 0.7), maxiters = 10000, verbose = false) bound3 = Tuple{Float64, Float64}[(1, 2), (0, 2), (1, 4), (0, 2)] -result = bboptimize(cost_function; SearchRange = bound3, MaxSteps = 11e3) +result = bboptimize(cost_function; SearchRange = bound3, MaxSteps = 1e3) @test result.archive_output.best_candidate≈[1.5; 1.0; 3.0; 1.0] atol=5e-1 diff --git a/test/tests_on_odes/nlopt_test.jl b/test/tests_on_odes/nlopt_test.jl index afd7076..9da6460 100644 --- a/test/tests_on_odes/nlopt_test.jl +++ b/test/tests_on_odes/nlopt_test.jl @@ -3,7 +3,7 @@ using OptimizationNLopt, Zygote println("Use NLOpt to fit the parameter") obj = build_loss_objective(prob1, Tsit5(), L2Loss(t, data), Optimization.AutoZygote(), - maxiters = 10000, verbose = false) + maxiters = 1000, verbose = false) opt = Opt(:LN_COBYLA, 1) optprob = OptimizationNLopt.OptimizationProblem(obj, [1.4]) @@ -14,7 +14,7 @@ opt = Opt(:GN_ESCH, 1) lower_bounds!(opt, [1.0]) upper_bounds!(opt, [3.0]) xtol_rel!(opt, 1e-3) -maxeval!(opt, 10000) +maxeval!(opt, 1000) res = solve(optprob, opt) @test res.u[1]≈1.5 atol=1e-1 @@ -22,17 +22,17 @@ opt = Opt(:GN_ISRES, 1) lower_bounds!(opt, [1.0]) upper_bounds!(opt, [3.0]) xtol_rel!(opt, 1e-4) -maxeval!(opt, 100 - 000) +maxeval!(opt, 1000) res = solve(optprob, opt) @test res.u[1]≈1.5 atol=1e-1 # test differentiation obj = build_loss_objective(prob1, Tsit5(), L2Loss(t, data), Optimization.AutoForwardDiff(); - maxiters = 10000) #zygote behaves weirdly here + maxiters = 1000) #zygote behaves weirdly here opt = Opt(:LD_MMA, 1) xtol_rel!(opt, 1e-3) -maxeval!(opt, 10000) +maxeval!(opt, 1000) optprob = OptimizationNLopt.OptimizationProblem(obj, [1.3]) res = solve(optprob, opt) @test res.u[1]≈1.5 atol=1e-1 diff --git a/test/tests_on_odes/optim_test.jl b/test/tests_on_odes/optim_test.jl index b57141a..f6c5356 100644 --- a/test/tests_on_odes/optim_test.jl +++ b/test/tests_on_odes/optim_test.jl @@ -1,6 +1,6 @@ using Optim, Random obj = build_loss_objective(prob1, Tsit5(), L2Loss(t, data), - maxiters = 10000, verbose = false) + maxiters = 1000, verbose = false) ### Optim Method diff --git a/test/tests_on_odes/regularization_test.jl b/test/tests_on_odes/regularization_test.jl index 9877acc..7edefd1 100644 --- a/test/tests_on_odes/regularization_test.jl +++ b/test/tests_on_odes/regularization_test.jl @@ -2,7 +2,7 @@ using PenaltyFunctions, OptimizationOptimJL, LinearAlgebra, SciMLSensitivity cost_function_1 = build_loss_objective(prob1, Tsit5(), L2Loss(t, data), Optimization.AutoZygote(), - Regularization(0.6, L2Penalty()), maxiters = 10000, + Regularization(0.6, L2Penalty()), maxiters = 1000, verbose = false, abstol = 1e-8, reltol = 1e-8) cost_function_2 = build_loss_objective(prob2, Tsit5(), L2Loss(t, data), Optimization.AutoZygote(), @@ -10,24 +10,24 @@ cost_function_2 = build_loss_objective(prob2, Tsit5(), L2Loss(t, data), MahalanobisPenalty(Matrix(1.0I, 2, 2))), verbose = false, abstol = 1e-8, reltol = 1e-8, - maxiters = 10000) + maxiters = 1000) cost_function_3 = build_loss_objective(prob3, Tsit5(), L2Loss(t, data), Optimization.AutoZygote(), Regularization(0.1, MahalanobisPenalty(Matrix(1.0I, 4, 4))), verbose = false, abstol = 1e-8, reltol = 1e-8, - maxiters = 10000) + maxiters = 1000) println("Use Optim BFGS to fit the parameter") optprob = Optimization.OptimizationProblem(cost_function_1, [1.0]) -result = solve(optprob, Optim.LBFGS()) +result = solve(optprob, Optim.BFGS()) @test result.u[1]≈1.5 atol=3e-1 optprob = Optimization.OptimizationProblem(cost_function_2, [1.2, 2.7]) -result = solve(optprob, Optim.LBFGS()) +result = solve(optprob, Optim.BFGS()) @test result.minimizer≈[1.5; 3.0] atol=3e-1 optprob = Optimization.OptimizationProblem(cost_function_3, [1.3, 0.8, 2.8, 1.2]) -result = solve(optprob, Optim.LBFGS()) +result = solve(optprob, Optim.BFGS()) @test result.minimizer≈[1.5; 1.0; 3.0; 1.0] atol=5e-1 diff --git a/test/tests_on_odes/weighted_loss_test.jl b/test/tests_on_odes/weighted_loss_test.jl index 3acb60a..5641f50 100644 --- a/test/tests_on_odes/weighted_loss_test.jl +++ b/test/tests_on_odes/weighted_loss_test.jl @@ -22,7 +22,7 @@ weighted_data = original_solution_matrix_form + error weighted_cost_function = build_loss_objective(prob1, Tsit5(), L2Loss(t, weighted_data, data_weight = weight), - maxiters = 10000, verbose = false) + maxiters = 1000, verbose = false) opt = Opt(:LN_COBYLA, 1) min_objective!(opt, weighted_cost_function) (minf, minx, ret) = NLopt.optimize(opt, [1.3])