diff --git a/Project.toml b/Project.toml index f163df79..52dab18d 100644 --- a/Project.toml +++ b/Project.toml @@ -8,6 +8,7 @@ ADNLPModels = "54578032-b7ea-4c30-94aa-7cbd1cce6c9a" DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" JSOSolvers = "10dff2fc-5484-5881-a0e0-c90441020f8a" JuMP = "4076af6c-e467-56ae-b986-b466b2749572" +KNITRO = "67920dd8-b58e-52a8-8622-53c4cffbe346" LLSModels = "39f5bc3e-5160-4bf8-ac48-504fd2534d24" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" LinearOperators = "5c8ed15e-5a4c-59e4-a42b-c7e8811fb125" @@ -15,17 +16,36 @@ Logging = "56ddb016-857b-54e1-b83d-db4d58db5568" NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6" NLPModelsJuMP = "792afdf1-32c1-5681-94e0-d7bf7a5df49e" NLPModelsModifiers = "e01155f1-5c6f-4375-a9d8-616dd036575f" +PackageExtensionCompat = "65ce6f38-6b18-4e1d-a461-8949797d7930" Percival = "01435c0c-c90d-11e9-3788-63660f8fbccc" QuadraticModels = "f468eda6-eac5-11e8-05a5-ff9e497bcd19" -Requires = "ae029012-a4dd-5104-9daa-d747884805df" SolverCore = "ff4d7338-4cf1-434d-91df-b86cb86fb843" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" +[weakdeps] +CaNNOLeS = "5a1c9e79-9c58-5ec0-afc4-3298fdea2875" +DCISolver = "bee2e536-65f6-11e9-3844-e5bb4c9c55c9" +FletcherPenaltySolver = "e59f0261-166d-4fee-8bf3-5e50457de5db" +NLPModelsIpopt = "f4238b75-b362-5c4c-b852-0801c9a21d71" +NLPModelsKnitro = "bec4dd0d-7755-52d5-9a02-22f0ffc7efcb" +RipQP = "1e40b3f8-35eb-4cd8-8edd-3e515bb9de08" +SolverBenchmark = "581a75fa-a23a-52d0-a590-d6201de2218a" + +[extensions] +CaNNOLeSExt = ["CaNNOLeS"] +DCISolverExt = ["DCISolver"] +FletcherPenaltySolverExt = ["FletcherPenaltySolver"] +NLPModelsIpoptExt = ["NLPModelsIpopt"] +NLPModelsKnitroExt = ["NLPModelsKnitro"] +RipQPExt = ["RipQP"] +SolverBenchmarkExt = ["SolverBenchmark"] + [compat] ADNLPModels = "0.4, 0.5, 0.6, 0.7" DataFrames = "1" JSOSolvers = "0.10, 0.11" JuMP = "1" +KNITRO = "0.13.0" LLSModels = "0.3.6" LinearOperators = "2" NLPModels = "0.19, 0.20" @@ -33,6 +53,5 @@ NLPModelsJuMP = "0.11, 0.12" NLPModelsModifiers = "0.6.2" Percival = "0.6, 0.7" QuadraticModels = "0.9.2" -Requires = "1" SolverCore = "0.3" julia = "^1.6.0" diff --git a/docs/src/benchmark.md b/docs/src/benchmark.md index 579dc0e3..77918455 100644 --- a/docs/src/benchmark.md +++ b/docs/src/benchmark.md @@ -43,7 +43,7 @@ using NLPModelsIpopt selected_optimizers = JSOSuite.optimizers # optimizers can solve general `nlp` as some are specific to variants (NLS, ...) selected_optimizers = selected_optimizers[selected_optimizers.can_solve_nlp, :] -selected_optimizers[selected_optimizers.is_available, :] # optimizers available +is_available(selected_optimizers) # optimizers available ``` For the purpose of this example, we will consider 3 optimizers. diff --git a/ext/CaNNOLeSExt.jl b/ext/CaNNOLeSExt.jl new file mode 100644 index 00000000..1ef3082c --- /dev/null +++ b/ext/CaNNOLeSExt.jl @@ -0,0 +1,9 @@ +module CaNNOLeSExt + + using CaNNOLeS, JSOSuite + + function minimize(::Val{:CaNNOLeS}, nlp; kwargs...) + return CaNNOLeS.cannoles(nlp; linsolve = :ldlfactorizations, kwargs...) + end + +end diff --git a/ext/DCISolverExt.jl b/ext/DCISolverExt.jl new file mode 100644 index 00000000..37deaccc --- /dev/null +++ b/ext/DCISolverExt.jl @@ -0,0 +1,7 @@ +module DCISolverExt + +using DCISolver, JSOSuite + +minimize(::Val{:DCISolver}, nlp; kwargs...) = DCISolver.dci(nlp; kwargs...) + +end diff --git a/ext/FletcherPenaltySolverExt.jl b/ext/FletcherPenaltySolverExt.jl new file mode 100644 index 00000000..07cb7bfa --- /dev/null +++ b/ext/FletcherPenaltySolverExt.jl @@ -0,0 +1,9 @@ +module FletcherPenaltySolverExt + +using FletcherPenaltySolver, JSOSuite + +function minimize(::Val{:FletcherPenaltySolver}, nlp; kwargs...) + return FletcherPenaltySolver.fps_solve(nlp; kwargs...) +end + +end diff --git a/src/solvers/ipopt_solve.jl b/ext/NLPModelsIpoptExt.jl similarity index 94% rename from src/solvers/ipopt_solve.jl rename to ext/NLPModelsIpoptExt.jl index 03c7082e..f300beee 100644 --- a/src/solvers/ipopt_solve.jl +++ b/ext/NLPModelsIpoptExt.jl @@ -1,3 +1,6 @@ +module NLPModelsIpoptExt + +using NLPModelsIpopt, JSOSuite # Selection of possible [options](https://coin-or.github.io/Ipopt/OPTIONS.html#OPTIONS_REF). function minimize(::Val{:IPOPT}, nlp; kwargs...) keywords = Dict(kwargs) @@ -34,3 +37,5 @@ function minimize(::Val{:IPOPT}, nlp; kwargs...) end return NLPModelsIpopt.ipopt(nlp; keywords...) end + +end diff --git a/src/solvers/knitro_solve.jl b/ext/NLPModelsKnitroExt.jl similarity index 93% rename from src/solvers/knitro_solve.jl rename to ext/NLPModelsKnitroExt.jl index d9d52887..60e07aed 100644 --- a/src/solvers/knitro_solve.jl +++ b/ext/NLPModelsKnitroExt.jl @@ -1,3 +1,7 @@ +module NLPModelsKnitroExt + +using NLPModelsKnitro, JSOSuite + # See https://www.artelys.com/docs/knitro/3_referenceManual/userOptions.html for the list of options accepted. function minimize(::Val{:KNITRO}, nlp; kwargs...) keywords = Dict(kwargs) @@ -25,3 +29,5 @@ function minimize(::Val{:KNITRO}, nlp; kwargs...) end return NLPModelsKnitro.knitro(nlp; keywords...) end + +end diff --git a/src/solvers/ripqp_solve.jl b/ext/RipQPExt.jl similarity index 95% rename from src/solvers/ripqp_solve.jl rename to ext/RipQPExt.jl index b9c41059..4c7d842a 100644 --- a/src/solvers/ripqp_solve.jl +++ b/ext/RipQPExt.jl @@ -1,3 +1,7 @@ +module RipQPExt + +using RipQP, JSOSuite, LLSModels, QuadraticModels + function minimize( ::Val{:RipQP}, nlp::Union{QuadraticModel{T0}, LLSModel{T0}}; @@ -60,3 +64,5 @@ function minimize( end return RipQP.ripqp(nlp; itol = itol, keywords...) end + +end diff --git a/ext/SolverBenchmarkExt.jl b/ext/SolverBenchmarkExt.jl new file mode 100644 index 00000000..779953bc --- /dev/null +++ b/ext/SolverBenchmarkExt.jl @@ -0,0 +1,31 @@ +module SolverBenchmarkExt + +using SolverBenchmark, JSOSuite +function SolverBenchmark.bmark_solvers( + problems, + solver_names::Vector{String}, + solvers::Dict{Symbol, Function} = Dict{Symbol, Function}(); + atol::Real = √eps(), + rtol::Real = √eps(), + verbose::Integer = 0, + max_time::Float64 = 300.0, + max_eval::Integer = 10000, + max_iter::Integer = 10000, + kwargs..., + ) + for s in solver_names + solvers[Symbol(s)] = + nlp -> minimize( + s, + nlp; + atol = atol, + rtol = rtol, + verbose = verbose, + max_time = max_time, + max_eval = max_eval, + max_iter = max_iter, + ) + end + return SolverBenchmark.bmark_solvers(solvers, problems; kwargs...) + end +end diff --git a/src/JSOSuite.jl b/src/JSOSuite.jl index db7d352f..33f831a0 100644 --- a/src/JSOSuite.jl +++ b/src/JSOSuite.jl @@ -1,7 +1,12 @@ module JSOSuite +using PackageExtensionCompat +function __init__() + @require_extensions +end + # other dependencies -using DataFrames, JuMP, Requires +using DataFrames, JuMP, KNITRO # stdlib using LinearAlgebra, Logging, SparseArrays # JSO @@ -20,7 +25,6 @@ For each solver, the following are available: - `name_solver::Symbol`: name of the solver structure for in-place solve, `:not_implemented` if not implemented; - `name_pkg::String`: name of the package implementing this solver or its NLPModel wrapper; - `solve_function::Symbol`: name of the function; -- `is_available::Bool`: `true` if the solver is available; - `bounds::Bool`: `true` if the solver can handle bound constraints; - `equalities::Bool`: `true` if the solver can handle equality constraints; - `inequalities::Bool`: `true` if the solver can handle inequality constraints; @@ -36,7 +40,6 @@ optimizers = DataFrame( name_solver = Symbol[], name_pkg = String[], solve_function = Symbol[], - is_available = Bool[], bounds = Bool[], equalities = Bool[], inequalities = Bool[], @@ -54,7 +57,6 @@ push!( :KnitroSolver, "NLPModelsKnitro.jl", :knitro, - false, true, true, true, @@ -73,7 +75,6 @@ push!( :LBFGSSolver, "JSOSolvers.jl", :lbfgs, - true, false, false, false, @@ -92,7 +93,6 @@ push!( :R2Solver, "JSOSolvers.jl", :R2, - true, false, false, false, @@ -112,7 +112,6 @@ push!( "JSOSolvers.jl", :tron, true, - true, false, false, false, @@ -130,7 +129,6 @@ push!( :TrunkSolver, "JSOSolvers.jl", :trunk, - true, false, false, false, @@ -145,12 +143,11 @@ push!( push!( optimizers, ( - "TRON-NLS", + "TRONNLS", :TronSolverNLS, "JSOSolvers.jl", :tron, true, - true, false, false, true, @@ -164,11 +161,10 @@ push!( push!( optimizers, ( - "TRUNK-NLS", + "TRUNKNLS", :TrunkSolverNLS, "JSOSolvers.jl", :trunk, - true, false, false, false, @@ -188,7 +184,6 @@ push!( "CaNNOLeS.jl", :cannoles, false, - false, true, false, true, @@ -206,7 +201,6 @@ push!( :IpoptSolver, "NLPModelsIpopt.jl", :ipopt, - false, true, true, true, @@ -226,7 +220,6 @@ push!( "DCISolver.jl", :dci, false, - false, true, false, false, @@ -245,7 +238,6 @@ push!( "FletcherPenaltySolver.jl", :fps_solve, false, - false, true, false, false, @@ -266,7 +258,6 @@ push!( true, true, true, - true, false, true, true, @@ -282,7 +273,6 @@ push!( :RipQPSolver, "RipQP.jl", :ripqp, - false, true, true, true, @@ -295,9 +285,41 @@ push!( ), ) # need to check linear constraints and quadratic constraints +""" + is_available(name::String) + is_available(name::Symbol) + is_available(::Val{name}) + + is_available(df::DataFrame) + +Return `true` if the solver `name` is available. +""" +is_available(name::String) = is_available(Symbol(name)) +is_available(name::Symbol) = is_available(Val(name)) + +is_available(::Val{name}) where {name} = false +is_available(::Val{:R2}) = true +is_available(::Val{:LBFGS}) = true +is_available(::Val{:TRON}) = true +is_available(::Val{:TRUNK}) = true +is_available(::Val{:TRONNLS}) = true +is_available(::Val{:TRUNKNLS}) = true +is_available(::Val{:Percival}) = true +is_available(::Val{:CaNNOLeS}) = !isnothing(Base.get_extension(JSOSuite, :CaNNOLeSExt)) +is_available(::Val{:DCISolver}) = !isnothing(Base.get_extension(JSOSuite, :DCISolverExt)) +is_available(::Val{:FletcherPenaltySolver}) = !isnothing(Base.get_extension(JSOSuite, :FletcherPenaltySolverExt)) +is_available(::Val{:IPOPT}) = !isnothing(Base.get_extension(JSOSuite, :NLPModelsIpoptExt)) +is_available(::Val{:KNITRO}) = !isnothing(Base.get_extension(JSOSuite, :NLPModelsKnitroExt)) && KNITRO.has_knitro() +is_available(::Val{:RipQP}) = !isnothing(Base.get_extension(JSOSuite, :RipQPExt)) + +function is_available(df::DataFrame) + available = [is_available(name) for name in df[!,:name]] + return df[available, :] +end + include("selection.jl") -export minimize, solve!, feasible_point +export minimize, solve!, feasible_point, is_available """ stats = minimize(nlp::Union{AbstractNLPModel, JuMP.Model}; kwargs...) @@ -418,59 +440,6 @@ include("solve_model.jl") include("solve.jl") -@init begin - @require CaNNOLeS = "5a1c9e79-9c58-5ec0-afc4-3298fdea2875" begin - JSOSuite.optimizers[JSOSuite.optimizers.name .== "CaNNOLeS", :is_available] .= 1 - function minimize(::Val{:CaNNOLeS}, nlp; kwargs...) - return CaNNOLeS.cannoles(nlp; linsolve = :ldlfactorizations, kwargs...) - end - end -end - -@init begin - @require DCISolver = "bee2e536-65f6-11e9-3844-e5bb4c9c55c9" begin - JSOSuite.optimizers[JSOSuite.optimizers.name .== "DCISolver", :is_available] .= 1 - function minimize(::Val{:DCISolver}, nlp; kwargs...) - return DCISolver.dci(nlp; kwargs...) - end - end -end - -@init begin - @require FletcherPenaltySolver = "e59f0261-166d-4fee-8bf3-5e50457de5db" begin - JSOSuite.optimizers[JSOSuite.optimizers.name .== "FletcherPenaltySolver", :is_available] .= 1 - function minimize(::Val{:FletcherPenaltySolver}, nlp; kwargs...) - return FletcherPenaltySolver.fps_solve(nlp; kwargs...) - end - end -end - -@init begin - @require NLPModelsIpopt = "f4238b75-b362-5c4c-b852-0801c9a21d71" begin - JSOSuite.optimizers[JSOSuite.optimizers.name .== "IPOPT", :is_available] .= 1 - include("solvers/ipopt_solve.jl") - end -end - -@init begin - @require NLPModelsKnitro = "bec4dd0d-7755-52d5-9a02-22f0ffc7efcb" begin - @init begin - @require KNITRO = "67920dd8-b58e-52a8-8622-53c4cffbe346" begin - JSOSuite.optimizers[JSOSuite.optimizers.name .== "KNITRO", :is_available] .= - KNITRO.has_knitro() - end - end - include("solvers/knitro_solve.jl") - end -end - -@init begin - @require RipQP = "1e40b3f8-35eb-4cd8-8edd-3e515bb9de08" begin - JSOSuite.optimizers[JSOSuite.optimizers.name .== "RipQP", :is_available] .= 1 - include("solvers/ripqp_solve.jl") - end -end - """ bmark_solvers(problems, solver_names::Vector{String}; kwargs...) bmark_solvers(problems, solver_names::Vector{String}, solvers::Dict{Symbol, Function}; kwargs...) @@ -559,38 +528,6 @@ KeySet for a Dict{Symbol, DataFrames.DataFrame} with 3 entries. Keys: """ function bmark_solvers end -@init begin - @require SolverBenchmark = "581a75fa-a23a-52d0-a590-d6201de2218a" begin - function SolverBenchmark.bmark_solvers( - problems, - solver_names::Vector{String}, - solvers::Dict{Symbol, Function} = Dict{Symbol, Function}(); - atol::Real = √eps(), - rtol::Real = √eps(), - verbose::Integer = 0, - max_time::Float64 = 300.0, - max_eval::Integer = 10000, - max_iter::Integer = 10000, - kwargs..., - ) - for s in solver_names - solvers[Symbol(s)] = - nlp -> minimize( - s, - nlp; - atol = atol, - rtol = rtol, - verbose = verbose, - max_time = max_time, - max_eval = max_eval, - max_iter = max_iter, - ) - end - return SolverBenchmark.bmark_solvers(solvers, problems; kwargs...) - end - end -end - """ stats = feasible_point(nlp::Union{AbstractNLPModel, JuMP.Model}; kwargs...) stats = feasible_point(nlp::Union{AbstractNLPModel, JuMP.Model}, solver_name::Symbol; kwargs...) diff --git a/src/selection.jl b/src/selection.jl index b0ec442d..ca1d6185 100644 --- a/src/selection.jl +++ b/src/selection.jl @@ -52,8 +52,8 @@ Algorithm selection: - nonlinear objective: ✓; - may use 2-th order derivative. There are 7 optimizers available: -["LBFGS", "R2", "TRON", "TRUNK", "TRON-NLS", "TRUNK-NLS", "Percival"]. -["LBFGS", "R2", "TRON", "TRUNK", "TRON-NLS", "TRUNK-NLS", "Percival"] +["LBFGS", "R2", "TRON", "TRUNK", "TRONNLS", "TRUNKNLS", "Percival"]. +["LBFGS", "R2", "TRON", "TRUNK", "TRONNLS", "TRUNKNLS", "Percival"] ``` """ function select_optimizers( @@ -107,7 +107,7 @@ function select_optimizers( all_select = copy(select) nsolvers_total_before_derivative = nrow(all_select) - select = select[select.is_available, :] + select = is_available(select) nsolvers_before_derivative = nrow(select) if nsolvers_before_derivative == 0 diff --git a/src/solve.jl b/src/solve.jl index 818a9da1..a03593d0 100644 --- a/src/solve.jl +++ b/src/solve.jl @@ -54,11 +54,3 @@ function minimize(::Val{solver_name}, nlp; kwargs...) where {solver_name} end return eval(solver.solve_function[1])(nlp; kwargs...) end - -function is_available(solver::Symbol) - solver_isa = optimizers[optimizers.name .== string(solver), :is_available] - if solver_isa == [] - return false - end - return solver_isa[1] -end diff --git a/test/runtests.jl b/test/runtests.jl index 3ae3b766..385e929b 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -61,21 +61,20 @@ end nlp = OptimizationProblems.ADNLPProblems.arglina() model = OptimizationProblems.PureJuMP.arglina() @testset "Test $solver_name" for solver_name in JSOSuite.optimizers[!, :name_solver] + name = JSOSuite.optimizers[JSOSuite.optimizers.name_solver .== solver_name, :name][1] solver_name == :DCIWorkspace && continue solver_name == :RipQPSolver && continue - is_available = - JSOSuite.optimizers[JSOSuite.optimizers.name_solver .== solver_name, :is_available] can_solve_nlp = JSOSuite.optimizers[JSOSuite.optimizers.name_solver .== solver_name, :can_solve_nlp] spec_nls = JSOSuite.optimizers[JSOSuite.optimizers.name_solver .== solver_name, :specialized_nls] - if is_available[1] && can_solve_nlp[1] + if is_available(name) && can_solve_nlp[1] test_in_place_solve(nlp, solver_name) test_in_place_solve(model, solver_name) - elseif is_available[1] && spec_nls[1] # NLS + elseif is_available(name) && spec_nls[1] # NLS nls = OptimizationProblems.ADNLPProblems.arglina(use_nls = true) test_in_place_solve(nls, solver_name) - elseif is_available[1] # RipQP + elseif is_available(name) # RipQP nlp_qm = QuadraticModel(nlp, nlp.meta.x0) test_in_place_solve(nlp_qm, solver_name) end @@ -114,10 +113,8 @@ end OptimizationProblems.ADNLPProblems.eval(Meta.parse(problem))() for problem ∈ meta[(5 .<= meta.nvar .<= 10) .& (meta.ncon .== 0) .& (.!meta.has_bounds), :name] ] - select = JSOSuite.optimizers[ - JSOSuite.optimizers.can_solve_nlp .& JSOSuite.optimizers.is_available, - :name, - ] + select = is_available(JSOSuite.optimizers) + select = select[JSOSuite.optimizers.can_solve_nlp, :name] stats = bmark_solvers(ad_problems, select, atol = 1e-3, max_time = 10.0, verbose = 0) @test true # just test that it runs end @@ -160,7 +157,7 @@ for solver in eachrow(JSOSuite.optimizers) nlp = mgh17() @testset "Test options in $(solver.name)" begin # We just test that the solver runs with the options - if solver.is_available + if is_available(solver.name) if solver.nonlinear_obj minimize( solver.name, @@ -195,7 +192,7 @@ end callback = (args...) -> nothing for solver in eachrow(JSOSuite.optimizers) @testset "Test options in $(solver.name)" begin - solver.is_available || continue + is_available(solver.name) || continue ((nlp.meta.ncon > 0) && (!solver.equalities)) && continue # We just test that the solver runs with the options if solver.can_solve_nlp