Skip to content

Commit

Permalink
More lower iterations
Browse files Browse the repository at this point in the history
  • Loading branch information
Vaibhavdixit02 committed Feb 25, 2024
1 parent 9dbeb8a commit 91089a5
Show file tree
Hide file tree
Showing 10 changed files with 40 additions and 40 deletions.
18 changes: 9 additions & 9 deletions test/likelihood.jl
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,12 @@ end
aggregate_data = convert(Array, VectorOfArray([generate_data(sol, t) for i in 1:100]))

distributions = [fit_mle(Normal, aggregate_data[i, j, :]) for i in 1:2, j in 1:200]
obj = build_loss_objective(prob1, Tsit5(), LogLikeLoss(t, distributions), maxiters = 10000,
obj = build_loss_objective(prob1, Tsit5(), LogLikeLoss(t, distributions), maxiters = 1000,
verbose = false)

optprob = Optimization.OptimizationProblem(obj, [2.0, 2.0], lb = [0.5, 0.5],
ub = [5.0, 5.0])
result = solve(optprob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters = 11e3)
result = solve(optprob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters = 1e3)
@test result.original.archive_output.best_candidate[1.5, 1.0] atol=1e-1

data_distributions = [fit_mle(Normal, aggregate_data[i, j, :]) for i in 1:2, j in 1:200]
Expand All @@ -33,11 +33,11 @@ diff_distributions = [fit_mle(Normal,
for j in 2:200, i in 1:2]
obj = build_loss_objective(prob1, Tsit5(),
LogLikeLoss(t, data_distributions, diff_distributions),
Optimization.AutoForwardDiff(), maxiters = 10000,
Optimization.AutoForwardDiff(), maxiters = 1000,
verbose = false)
optprob = Optimization.OptimizationProblem(obj, [2.0, 2.0], lb = [0.5, 0.5],
ub = [5.0, 5.0])
result = solve(optprob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters = 11e3)
result = solve(optprob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters = 1e3)
@test result.original.archive_output.best_candidate[1.5, 1.0] atol=1e-1

data_distributions = [fit_mle(Normal, aggregate_data[i, j, :]) for i in 1:2, j in 1:200]
Expand All @@ -46,14 +46,14 @@ diff_distributions = [fit_mle(Normal,
for j in 2:200, i in 1:2]
obj = build_loss_objective(prob1, Tsit5(),
LogLikeLoss(t, data_distributions, diff_distributions, 0.3),
Optimization.AutoForwardDiff(), maxiters = 10000,
Optimization.AutoForwardDiff(), maxiters = 1000,
verbose = false)
optprob = Optimization.OptimizationProblem(obj, [2.0, 2.0], lb = [0.5, 0.5],
ub = [5.0, 5.0])
result = solve(optprob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters = 11e3)
result = solve(optprob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters = 1e3)
@test result.u[1.5, 1.0] atol=1e-1
using OptimizationBBO.BlackBoxOptim
result = bboptimize(obj, SearchRange = [(0.5, 5.0), (0.5, 5.0)], MaxSteps = 11e3)
result = bboptimize(obj, SearchRange = [(0.5, 5.0), (0.5, 5.0)], MaxSteps = 1e3)
@test result.archive_output.best_candidate[1.5, 1.0] atol=1e-1

distributions = [fit_mle(MvNormal, aggregate_data[:, j, :]) for j in 1:200]
Expand All @@ -63,9 +63,9 @@ diff_distributions = [fit_mle(MvNormal,
priors = [Truncated(Normal(1.5, 0.1), 0, 2), Truncated(Normal(1.0, 0.1), 0, 1.5)]
obj = build_loss_objective(prob1, Tsit5(),
LogLikeLoss(t, distributions, diff_distributions),
Optimization.AutoForwardDiff(), maxiters = 10000,
Optimization.AutoForwardDiff(), maxiters = 1000,
verbose = false, priors = priors)
optprob = Optimization.OptimizationProblem(obj, [2.0, 2.0], lb = [0.5, 0.5],
ub = [5.0, 5.0])
result = solve(optprob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters = 11e3)
result = solve(optprob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters = 1e3)
@test result.u[1.5, 1.0] atol=1e-1
2 changes: 1 addition & 1 deletion test/multiple_shooting_objective_test.jl
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ ms_obj = multiple_shooting_objective(ms_prob, Tsit5(), L2Loss(t, data),
Optimization.AutoZygote();
discontinuity_weight = 1.0, abstol = 1e-12,
reltol = 1e-12)
result = bboptimize(ms_obj; SearchRange = bound, MaxSteps = 21e3)
result = bboptimize(ms_obj; SearchRange = bound, MaxSteps = 1e3)
@test result.archive_output.best_candidate[(end - 1):end][1.5, 1.0] atol=2e-1

priors = [Truncated(Normal(1.5, 0.5), 0, 2), Truncated(Normal(1.0, 0.5), 0, 1.5)]
Expand Down
12 changes: 6 additions & 6 deletions test/tests_on_odes/blackboxoptim_test.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,19 +2,19 @@ using BlackBoxOptim

println("Use BlackBoxOptim to fit the parameter")
cost_function = build_loss_objective(prob1, Tsit5(), L2Loss(t, data),
maxiters = 10000, verbose = false)
maxiters = 1000, verbose = false)
bound1 = Tuple{Float64, Float64}[(1, 2)]
result = bboptimize(cost_function; SearchRange = bound1, MaxSteps = 5e3)
result = bboptimize(cost_function; SearchRange = bound1, MaxSteps = 1e3)
@test result.archive_output.best_candidate[1]1.5 atol=3e-1

cost_function = build_loss_objective(prob2, Tsit5(), L2Loss(t, data),
maxiters = 10000, verbose = false)
maxiters = 1000, verbose = false)
bound2 = Tuple{Float64, Float64}[(1, 2), (2, 4)]
result = bboptimize(cost_function; SearchRange = bound2, MaxSteps = 5e3)
result = bboptimize(cost_function; SearchRange = bound2, MaxSteps = 1e3)
@test result.archive_output.best_candidate[1.5; 3.0] atol=3e-1

cost_function = build_loss_objective(prob3, Tsit5(), L2Loss(t, data),
maxiters = 10000, verbose = false)
maxiters = 1000, verbose = false)
bound3 = Tuple{Float64, Float64}[(1, 2), (0, 2), (2, 4), (0, 2)]
result = bboptimize(cost_function; SearchRange = bound3, MaxSteps = 5e3)
result = bboptimize(cost_function; SearchRange = bound3, MaxSteps = 11e3)
@test result.archive_output.best_candidate[1.5; 1.0; 3.0; 1.0] atol=5e-1
6 changes: 3 additions & 3 deletions test/tests_on_odes/genetic_algorithm_test.jl
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ println("Use Genetic Algorithm to fit the parameter")
# Floating number specifies fraction of population.

cost_function = build_loss_objective(prob1, Tsit5(), L2Loss(t, data),
maxiters = 10000, verbose = false)
maxiters = 1000, verbose = false)
N = 1
result, fitness, cnt = ga(cost_function, N;
initPopulation = Float64[1.2],
Expand All @@ -26,7 +26,7 @@ result, fitness, cnt = ga(cost_function, N;
@test result[1]1.5 atol=3e-1

cost_function = build_loss_objective(prob2, Tsit5(), L2Loss(t, data),
maxiters = 10000)
maxiters = 1000)
N = 2
result, fitness, cnt = ga(cost_function, N;
initPopulation = Float64[1.2, 2.8],
Expand All @@ -38,7 +38,7 @@ result, fitness, cnt = ga(cost_function, N;
@test result[1.5; 3.0] atol=3e-1

cost_function = build_loss_objective(prob3, Tsit5(), L2Loss(t, data),
maxiters = 10000)
maxiters = 1000)
N = 4
result, fitness, cnt = ga(cost_function, N;
initPopulation = Float64[1.3, 0.8, 2.8, 1.2],
Expand Down
8 changes: 4 additions & 4 deletions test/tests_on_odes/l2_colloc_grad_test.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,30 +2,30 @@ weight = 1.0e-6

cost_function = build_loss_objective(prob1, Tsit5(),
L2Loss(t, data, colloc_grad = colloc_grad(t, data)),
maxiters = 10000, verbose = false)
maxiters = 1000, verbose = false)
result = Optim.optimize(cost_function, 1.0, 2.0)
@test result.minimizer1.5 atol=3e-1

cost_function = build_loss_objective(prob2, Tsit5(),
L2Loss(t, data,
differ_weight = weight, data_weight = weight,
colloc_grad = colloc_grad(t, data)),
maxiters = 10000, verbose = false)
maxiters = 1000, verbose = false)
result = Optim.optimize(cost_function, [1.3, 2.8], Optim.BFGS())
@test result.minimizer[1.5; 3.0] atol=3e-1

cost_function = build_loss_objective(prob3, Tsit5(),
L2Loss(t, data,
differ_weight = weight,
colloc_grad = colloc_grad(t, data)),
maxiters = 10000, verbose = false)
maxiters = 1000, verbose = false)
result = Optim.optimize(cost_function, [1.4, 0.9, 2.9, 1.2], Optim.BFGS())
@test result.minimizer[1.5, 1.0, 3.0, 1.0] atol=3e-1

cost_function = build_loss_objective(prob1, Tsit5(),
L2Loss(t, data,
data_weight = weight,
colloc_grad = colloc_grad(t, data)),
maxiters = 10000, verbose = false)
maxiters = 1000, verbose = false)
result = Optim.optimize(cost_function, 1.0, 2)
@test result.minimizer1.5 atol=3e-1
8 changes: 4 additions & 4 deletions test/tests_on_odes/l2loss_test.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,27 +3,27 @@ using BlackBoxOptim, Optim
cost_function = build_loss_objective(prob1, Tsit5(), L2Loss(t, data),
maxiters = 10000, verbose = false)
bound1 = Tuple{Float64, Float64}[(1, 2)]
result = bboptimize(cost_function; SearchRange = bound1, MaxSteps = 11e3)
result = bboptimize(cost_function; SearchRange = bound1, MaxSteps = 1e3)
@test result.archive_output.best_candidate[1]1.5 atol=3e-1

cost_function = build_loss_objective(prob2, Tsit5(),
L2Loss(t, data, differ_weight = nothing,
data_weight = 1.0),
maxiters = 10000, verbose = false)
bound2 = Tuple{Float64, Float64}[(1, 2), (1, 4)]
result = bboptimize(cost_function; SearchRange = bound2, MaxSteps = 11e3)
result = bboptimize(cost_function; SearchRange = bound2, MaxSteps = 1e3)
@test result.archive_output.best_candidate[1.5; 3.0] atol=3e-1

cost_function = build_loss_objective(prob3, Tsit5(), L2Loss(t, data, differ_weight = 10),
maxiters = 10000, verbose = false)
bound3 = Tuple{Float64, Float64}[(1, 2), (0, 2), (2, 4), (0, 2)]
result = bboptimize(cost_function; SearchRange = bound3, MaxSteps = 11e3)
result = bboptimize(cost_function; SearchRange = bound3, MaxSteps = 1e3)
@test result.archive_output.best_candidate[1.5; 1.0; 3.0; 1.0] atol=5e-1

cost_function = build_loss_objective(prob3, Tsit5(),
L2Loss(t, data, differ_weight = 0.3,
data_weight = 0.7),
maxiters = 10000, verbose = false)
bound3 = Tuple{Float64, Float64}[(1, 2), (0, 2), (1, 4), (0, 2)]
result = bboptimize(cost_function; SearchRange = bound3, MaxSteps = 11e3)
result = bboptimize(cost_function; SearchRange = bound3, MaxSteps = 1e3)
@test result.archive_output.best_candidate[1.5; 1.0; 3.0; 1.0] atol=5e-1
10 changes: 5 additions & 5 deletions test/tests_on_odes/nlopt_test.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ using OptimizationNLopt, Zygote
println("Use NLOpt to fit the parameter")

obj = build_loss_objective(prob1, Tsit5(), L2Loss(t, data), Optimization.AutoZygote(),
maxiters = 10000, verbose = false)
maxiters = 1000, verbose = false)

opt = Opt(:LN_COBYLA, 1)
optprob = OptimizationNLopt.OptimizationProblem(obj, [1.4])
Expand All @@ -14,25 +14,25 @@ opt = Opt(:GN_ESCH, 1)
lower_bounds!(opt, [1.0])
upper_bounds!(opt, [3.0])
xtol_rel!(opt, 1e-3)
maxeval!(opt, 10000)
maxeval!(opt, 1000)
res = solve(optprob, opt)
@test res.u[1]1.5 atol=1e-1

opt = Opt(:GN_ISRES, 1)
lower_bounds!(opt, [1.0])
upper_bounds!(opt, [3.0])
xtol_rel!(opt, 1e-4)
maxeval!(opt, 100 - 000)
maxeval!(opt, 1000)
res = solve(optprob, opt)
@test res.u[1]1.5 atol=1e-1

# test differentiation

obj = build_loss_objective(prob1, Tsit5(), L2Loss(t, data), Optimization.AutoForwardDiff();
maxiters = 10000) #zygote behaves weirdly here
maxiters = 1000) #zygote behaves weirdly here
opt = Opt(:LD_MMA, 1)
xtol_rel!(opt, 1e-3)
maxeval!(opt, 10000)
maxeval!(opt, 1000)
optprob = OptimizationNLopt.OptimizationProblem(obj, [1.3])
res = solve(optprob, opt)
@test res.u[1]1.5 atol=1e-1
2 changes: 1 addition & 1 deletion test/tests_on_odes/optim_test.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
using Optim, Random
obj = build_loss_objective(prob1, Tsit5(), L2Loss(t, data),
maxiters = 10000, verbose = false)
maxiters = 1000, verbose = false)

### Optim Method

Expand Down
12 changes: 6 additions & 6 deletions test/tests_on_odes/regularization_test.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,32 +2,32 @@ using PenaltyFunctions, OptimizationOptimJL, LinearAlgebra, SciMLSensitivity

cost_function_1 = build_loss_objective(prob1, Tsit5(), L2Loss(t, data),
Optimization.AutoZygote(),
Regularization(0.6, L2Penalty()), maxiters = 10000,
Regularization(0.6, L2Penalty()), maxiters = 1000,
verbose = false, abstol = 1e-8, reltol = 1e-8)
cost_function_2 = build_loss_objective(prob2, Tsit5(), L2Loss(t, data),
Optimization.AutoZygote(),
Regularization(0.1,
MahalanobisPenalty(Matrix(1.0I, 2, 2))),
verbose = false,
abstol = 1e-8, reltol = 1e-8,
maxiters = 10000)
maxiters = 1000)
cost_function_3 = build_loss_objective(prob3, Tsit5(), L2Loss(t, data),
Optimization.AutoZygote(),
Regularization(0.1,
MahalanobisPenalty(Matrix(1.0I, 4, 4))),
verbose = false,
abstol = 1e-8, reltol = 1e-8,
maxiters = 10000)
maxiters = 1000)

println("Use Optim BFGS to fit the parameter")
optprob = Optimization.OptimizationProblem(cost_function_1, [1.0])
result = solve(optprob, Optim.LBFGS())
result = solve(optprob, Optim.BFGS())
@test result.u[1]1.5 atol=3e-1

optprob = Optimization.OptimizationProblem(cost_function_2, [1.2, 2.7])
result = solve(optprob, Optim.LBFGS())
result = solve(optprob, Optim.BFGS())
@test result.minimizer[1.5; 3.0] atol=3e-1

optprob = Optimization.OptimizationProblem(cost_function_3, [1.3, 0.8, 2.8, 1.2])
result = solve(optprob, Optim.LBFGS())
result = solve(optprob, Optim.BFGS())
@test result.minimizer[1.5; 1.0; 3.0; 1.0] atol=5e-1
2 changes: 1 addition & 1 deletion test/tests_on_odes/weighted_loss_test.jl
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ weighted_data = original_solution_matrix_form + error
weighted_cost_function = build_loss_objective(prob1, Tsit5(),
L2Loss(t, weighted_data,
data_weight = weight),
maxiters = 10000, verbose = false)
maxiters = 1000, verbose = false)
opt = Opt(:LN_COBYLA, 1)
min_objective!(opt, weighted_cost_function)
(minf, minx, ret) = NLopt.optimize(opt, [1.3])
Expand Down

0 comments on commit 91089a5

Please sign in to comment.