Skip to content

Commit 3720713

Browse files
committed
All extension algorithms are working again
1 parent 9206a3f commit 3720713

File tree

9 files changed

+189
-205
lines changed

9 files changed

+189
-205
lines changed

ext/NonlinearSolveFastLevenbergMarquardtExt.jl

Lines changed: 16 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,9 @@ module NonlinearSolveFastLevenbergMarquardtExt
22

33
using ArrayInterface, NonlinearSolve, SciMLBase
44
import ConcreteStructs: @concrete
5+
import FastClosures: @closure
56
import FastLevenbergMarquardt as FastLM
6-
import FiniteDiff, ForwardDiff
7+
import StaticArraysCore: StaticArray
78

89
@inline function _fast_lm_solver(::FastLevenbergMarquardtJL{linsolve}, x) where {linsolve}
910
if linsolve === :cholesky
@@ -28,31 +29,25 @@ end
2829

2930
function SciMLBase.__init(prob::NonlinearLeastSquaresProblem,
3031
alg::FastLevenbergMarquardtJL, args...; alias_u0 = false, abstol = nothing,
31-
reltol = nothing, maxiters = 1000, kwargs...)
32-
# FIXME: Support scalar u0
33-
prob.u0 isa Number &&
34-
throw(ArgumentError("FastLevenbergMarquardtJL does not support scalar `u0`"))
35-
iip = SciMLBase.isinplace(prob)
36-
u = NonlinearSolve.__maybe_unaliased(prob.u0, alias_u0)
37-
fu = NonlinearSolve.evaluate_f(prob, u)
38-
39-
f! = NonlinearSolve.__make_inplace{iip}(prob.f, nothing)
32+
reltol = nothing, maxiters = 1000, termination_condition = nothing, kwargs...)
33+
NonlinearSolve.__test_termination_condition(termination_condition,
34+
:FastLevenbergMarquardt)
35+
if prob.u0 isa StaticArray # FIXME
36+
error("FastLevenbergMarquardtJL does not support StaticArrays yet.")
37+
end
4038

39+
_f!, u, resid = NonlinearSolve.__construct_extension_f(prob; alias_u0)
40+
f! = @closure (du, u, p) -> _f!(du, u)
4141
abstol = NonlinearSolve.DEFAULT_TOLERANCE(abstol, eltype(u))
4242
reltol = NonlinearSolve.DEFAULT_TOLERANCE(reltol, eltype(u))
4343

44-
if prob.f.jac === nothing
45-
alg = NonlinearSolve.get_concrete_algorithm(alg, prob)
46-
J! = NonlinearSolve.__construct_jac(prob, alg, u;
47-
can_handle_arbitrary_dims = Val(true))
48-
else
49-
J! = NonlinearSolve.__make_inplace{iip}(prob.f.jac, nothing)
50-
end
51-
52-
J = similar(u, length(fu), length(u))
44+
_J! = NonlinearSolve.__construct_extension_jac(prob, alg, u, resid; alg.autodiff)
45+
J! = @closure (J, u, p) -> _J!(J, u)
46+
J = prob.f.jac_prototype === nothing ? similar(u, length(resid), length(u)) :
47+
zero(prob.f.jac_prototype)
5348

5449
solver = _fast_lm_solver(alg, u)
55-
LM = FastLM.LMWorkspace(u, fu, J)
50+
LM = FastLM.LMWorkspace(u, resid, J)
5651

5752
return FastLevenbergMarquardtJLCache(f!, J!, prob, alg, LM, solver,
5853
(; xtol = reltol, ftol = reltol, gtol = abstol, maxit = maxiters, alg.factor,
@@ -62,7 +57,7 @@ end
6257

6358
function SciMLBase.solve!(cache::FastLevenbergMarquardtJLCache)
6459
res, fx, info, iter, nfev, njev, LM, solver = FastLM.lmsolve!(cache.f!, cache.J!,
65-
cache.lmworkspace, cache.prob.p; cache.solver, cache.kwargs...)
60+
cache.lmworkspace; cache.solver, cache.kwargs...)
6661
stats = SciMLBase.NLStats(nfev, njev, -1, -1, iter)
6762
retcode = info == -1 ? ReturnCode.MaxIters : ReturnCode.Success
6863
return SciMLBase.build_solution(cache.prob, cache.alg, res, fx;

ext/NonlinearSolveLeastSquaresOptimExt.jl

Lines changed: 20 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -4,14 +4,13 @@ using NonlinearSolve, SciMLBase
44
import ConcreteStructs: @concrete
55
import LeastSquaresOptim as LSO
66

7-
@inline function _lso_solver(::LeastSquaresOptimJL{alg, linsolve}) where {alg, linsolve}
8-
ls = linsolve === :qr ? LSO.QR() :
9-
(linsolve === :cholesky ? LSO.Cholesky() :
10-
(linsolve === :lsmr ? LSO.LSMR() : nothing))
7+
@inline function _lso_solver(::LeastSquaresOptimJL{alg, ls}) where {alg, ls}
8+
linsolve = ls === :qr ? LSO.QR() :
9+
(ls === :cholesky ? LSO.Cholesky() : (ls === :lsmr ? LSO.LSMR() : nothing))
1110
if alg === :lm
12-
return LSO.LevenbergMarquardt(ls)
11+
return LSO.LevenbergMarquardt(linsolve)
1312
elseif alg === :dogleg
14-
return LSO.Dogleg(ls)
13+
return LSO.Dogleg(linsolve)
1514
else
1615
throw(ArgumentError("Unknown LeastSquaresOptim Algorithm: $alg"))
1716
end
@@ -25,24 +24,26 @@ end
2524
kwargs
2625
end
2726

28-
function SciMLBase.__init(prob::NonlinearLeastSquaresProblem, alg::LeastSquaresOptimJL,
29-
args...; alias_u0 = false, abstol = nothing, show_trace::Val{ShT} = Val(false),
30-
trace_level = TraceMinimal(), store_trace::Val{StT} = Val(false), maxiters = 1000,
31-
reltol = nothing, kwargs...) where {ShT, StT}
32-
iip = SciMLBase.isinplace(prob)
33-
u = NonlinearSolve.__maybe_unaliased(prob.u0, alias_u0)
27+
function SciMLBase.__init(prob::Union{NonlinearLeastSquaresProblem, NonlinearProblem},
28+
alg::LeastSquaresOptimJL, args...; alias_u0 = false, abstol = nothing,
29+
show_trace::Val{ShT} = Val(false), trace_level = TraceMinimal(), reltol = nothing,
30+
store_trace::Val{StT} = Val(false), maxiters = 1000,
31+
termination_condition = nothing, kwargs...) where {ShT, StT}
32+
NonlinearSolve.__test_termination_condition(termination_condition, :LeastSquaresOptim)
3433

34+
f!, u, resid = NonlinearSolve.__construct_extension_f(prob; alias_u0)
3535
abstol = NonlinearSolve.DEFAULT_TOLERANCE(abstol, eltype(u))
3636
reltol = NonlinearSolve.DEFAULT_TOLERANCE(reltol, eltype(u))
3737

38-
f! = NonlinearSolve.__make_inplace{iip}(prob.f, prob.p)
39-
g! = NonlinearSolve.__make_inplace{iip}(prob.f.jac, prob.p)
40-
41-
resid_prototype = prob.f.resid_prototype === nothing ?
42-
(!iip ? prob.f(u, prob.p) : zeros(u)) : prob.f.resid_prototype
38+
if prob.f.jac === nothing && alg.autodiff isa Symbol
39+
lsoprob = LSO.LeastSquaresProblem(; x = u, f!, y = resid, alg.autodiff,
40+
J = prob.f.jac_prototype, output_length = length(resid))
41+
else
42+
g! = NonlinearSolve.__construct_extension_jac(prob, alg, u, resid; alg.autodiff)
43+
lsoprob = LSO.LeastSquaresProblem(; x = u, f!, y = resid, g!,
44+
J = prob.f.jac_prototype, output_length = length(resid))
45+
end
4346

44-
lsoprob = LSO.LeastSquaresProblem(; x = u, f!, y = resid_prototype, g!,
45-
J = prob.f.jac_prototype, alg.autodiff, output_length = length(resid_prototype))
4647
allocated_prob = LSO.LeastSquaresProblemAllocated(lsoprob, _lso_solver(alg))
4748

4849
return LeastSquaresOptimJLCache(prob, alg, allocated_prob,

ext/NonlinearSolveNLsolveExt.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ function SciMLBase.__solve(prob::NonlinearProblem, alg::NLsolveJL, args...;
1111

1212
f!, u0, resid = NonlinearSolve.__construct_extension_f(prob; alias_u0)
1313

14-
if prob.f.jac === nothing
14+
if prob.f.jac === nothing && alg.autodiff isa Symbol
1515
df = OnceDifferentiable(f!, u0, resid; alg.autodiff)
1616
else
1717
jac! = NonlinearSolve.__construct_extension_jac(prob, alg, u0, resid; alg.autodiff)

src/algorithms/extension_algs.jl

Lines changed: 113 additions & 92 deletions
Original file line numberDiff line numberDiff line change
@@ -1,97 +1,98 @@
11
# This file only include the algorithm struct to be exported by NonlinearSolve.jl. The main
22
# functionality is implemented as package extensions
3-
# """
4-
# LeastSquaresOptimJL(alg = :lm; linsolve = nothing, autodiff::Symbol = :central)
5-
6-
# Wrapper over [LeastSquaresOptim.jl](https://github.com/matthieugomez/LeastSquaresOptim.jl)
7-
# for solving `NonlinearLeastSquaresProblem`.
8-
9-
# ## Arguments:
10-
11-
# - `alg`: Algorithm to use. Can be `:lm` or `:dogleg`.
12-
# - `linsolve`: Linear solver to use. Can be `:qr`, `:cholesky` or `:lsmr`. If `nothing`,
13-
# then `LeastSquaresOptim.jl` will choose the best linear solver based on the Jacobian
14-
# structure.
15-
# - `autodiff`: Automatic differentiation / Finite Differences. Can be `:central` or
16-
# `:forward`.
17-
18-
# !!! note
19-
20-
# This algorithm is only available if `LeastSquaresOptim.jl` is installed.
21-
# """
22-
# struct LeastSquaresOptimJL{alg, linsolve} <: AbstractNonlinearSolveExtensionAlgorithm
23-
# autodiff::Symbol
24-
# end
25-
26-
# function LeastSquaresOptimJL(alg = :lm; linsolve = nothing, autodiff::Symbol = :central)
27-
# @assert alg in (:lm, :dogleg)
28-
# @assert linsolve === nothing || linsolve in (:qr, :cholesky, :lsmr)
29-
# @assert autodiff in (:central, :forward)
30-
31-
# if Base.get_extension(@__MODULE__, :NonlinearSolveLeastSquaresOptimExt) === nothing
32-
# error("LeastSquaresOptimJL requires LeastSquaresOptim.jl to be loaded")
33-
# end
34-
35-
# return LeastSquaresOptimJL{alg, linsolve}(autodiff)
36-
# end
37-
38-
# """
39-
# FastLevenbergMarquardtJL(linsolve = :cholesky; autodiff = nothing)
40-
41-
# Wrapper over [FastLevenbergMarquardt.jl](https://github.com/kamesy/FastLevenbergMarquardt.jl)
42-
# for solving `NonlinearLeastSquaresProblem`.
43-
44-
# !!! warning
45-
46-
# This is not really the fastest solver. It is called that since the original package
47-
# is called "Fast". `LevenbergMarquardt()` is almost always a better choice.
48-
49-
# ## Arguments:
50-
51-
# - `linsolve`: Linear solver to use. Can be `:qr` or `:cholesky`.
52-
# - `autodiff`: determines the backend used for the Jacobian. Note that this argument is
53-
# ignored if an analytical Jacobian is passed, as that will be used instead. Defaults to
54-
# `nothing` which means that a default is selected according to the problem specification!
55-
# Valid choices are `nothing`, `AutoForwardDiff` or `AutoFiniteDiff`.
56-
57-
# !!! note
58-
59-
# This algorithm is only available if `FastLevenbergMarquardt.jl` is installed.
60-
# """
61-
# @concrete struct FastLevenbergMarquardtJL{linsolve} <: AbstractNonlinearSolveExtensionAlgorithm
62-
# ad
63-
# factor
64-
# factoraccept
65-
# factorreject
66-
# factorupdate::Symbol
67-
# minscale
68-
# maxscale
69-
# minfactor
70-
# maxfactor
71-
# end
72-
73-
# function set_ad(alg::FastLevenbergMarquardtJL{linsolve}, ad) where {linsolve}
74-
# return FastLevenbergMarquardtJL{linsolve}(ad, alg.factor, alg.factoraccept,
75-
# alg.factorreject, alg.factorupdate, alg.minscale, alg.maxscale, alg.minfactor,
76-
# alg.maxfactor)
77-
# end
78-
79-
# function FastLevenbergMarquardtJL(linsolve::Symbol = :cholesky; factor = 1e-6,
80-
# factoraccept = 13.0, factorreject = 3.0, factorupdate = :marquardt,
81-
# minscale = 1e-12, maxscale = 1e16, minfactor = 1e-28, maxfactor = 1e32,
82-
# autodiff = nothing)
83-
# @assert linsolve in (:qr, :cholesky)
84-
# @assert factorupdate in (:marquardt, :nielson)
85-
# @assert autodiff === nothing || autodiff isa AutoFiniteDiff ||
86-
# autodiff isa AutoForwardDiff
87-
88-
# if Base.get_extension(@__MODULE__, :NonlinearSolveFastLevenbergMarquardtExt) === nothing
89-
# error("FastLevenbergMarquardtJL requires FastLevenbergMarquardt.jl to be loaded")
90-
# end
91-
92-
# return FastLevenbergMarquardtJL{linsolve}(autodiff, factor, factoraccept, factorreject,
93-
# factorupdate, minscale, maxscale, minfactor, maxfactor)
94-
# end
3+
"""
4+
LeastSquaresOptimJL(alg = :lm; linsolve = nothing, autodiff::Symbol = :central)
5+
6+
Wrapper over [LeastSquaresOptim.jl](https://github.com/matthieugomez/LeastSquaresOptim.jl)
7+
for solving `NonlinearLeastSquaresProblem`.
8+
9+
### Arguments
10+
11+
- `alg`: Algorithm to use. Can be `:lm` or `:dogleg`.
12+
13+
### Keyword Arguments
14+
15+
- `linsolve`: Linear solver to use. Can be `:qr`, `:cholesky` or `:lsmr`. If `nothing`,
16+
then `LeastSquaresOptim.jl` will choose the best linear solver based on the Jacobian
17+
structure.
18+
- `autodiff`: Automatic differentiation / Finite Differences. Can be `:central` or
19+
`:forward`.
20+
21+
!!! note
22+
23+
This algorithm is only available if `LeastSquaresOptim.jl` is installed.
24+
"""
25+
struct LeastSquaresOptimJL{alg, linsolve} <: AbstractNonlinearSolveExtensionAlgorithm
26+
autodiff
27+
end
28+
29+
function LeastSquaresOptimJL(alg = :lm; linsolve = nothing, autodiff = :central)
30+
@assert alg in (:lm, :dogleg)
31+
@assert linsolve === nothing || linsolve in (:qr, :cholesky, :lsmr)
32+
autodiff isa Symbol && @assert autodiff in (:central, :forward)
33+
34+
if Base.get_extension(@__MODULE__, :NonlinearSolveLeastSquaresOptimExt) === nothing
35+
error("LeastSquaresOptimJL requires LeastSquaresOptim.jl to be loaded")
36+
end
37+
38+
return LeastSquaresOptimJL{alg, linsolve}(autodiff)
39+
end
40+
41+
"""
42+
FastLevenbergMarquardtJL(linsolve::Symbol = :cholesky; factor = 1e-6,
43+
factoraccept = 13.0, factorreject = 3.0, factorupdate = :marquardt,
44+
minscale = 1e-12, maxscale = 1e16, minfactor = 1e-28, maxfactor = 1e32,
45+
autodiff = nothing)
46+
47+
Wrapper over [FastLevenbergMarquardt.jl](https://github.com/kamesy/FastLevenbergMarquardt.jl)
48+
for solving `NonlinearLeastSquaresProblem`. For details about the other keyword arguments
49+
see the documentation for `FastLevenbergMarquardt.jl`.
50+
51+
!!! warning
52+
53+
This is not really the fastest solver. It is called that since the original package
54+
is called "Fast". `LevenbergMarquardt()` is almost always a better choice.
55+
56+
### Arguments
57+
58+
- `linsolve`: Linear solver to use. Can be `:qr` or `:cholesky`.
59+
60+
### Keyword Arguments
61+
62+
- `autodiff`: determines the backend used for the Jacobian. Note that this argument is
63+
ignored if an analytical Jacobian is passed, as that will be used instead. Defaults to
64+
`nothing` which means that a default is selected according to the problem specification!
65+
66+
!!! note
67+
68+
This algorithm is only available if `FastLevenbergMarquardt.jl` is installed.
69+
"""
70+
@concrete struct FastLevenbergMarquardtJL{linsolve} <: AbstractNonlinearSolveExtensionAlgorithm
71+
autodiff
72+
factor
73+
factoraccept
74+
factorreject
75+
factorupdate::Symbol
76+
minscale
77+
maxscale
78+
minfactor
79+
maxfactor
80+
end
81+
82+
function FastLevenbergMarquardtJL(linsolve::Symbol = :cholesky; factor = 1e-6,
83+
factoraccept = 13.0, factorreject = 3.0, factorupdate = :marquardt,
84+
minscale = 1e-12, maxscale = 1e16, minfactor = 1e-28, maxfactor = 1e32,
85+
autodiff = nothing)
86+
@assert linsolve in (:qr, :cholesky)
87+
@assert factorupdate in (:marquardt, :nielson)
88+
89+
if Base.get_extension(@__MODULE__, :NonlinearSolveFastLevenbergMarquardtExt) === nothing
90+
error("FastLevenbergMarquardtJL requires FastLevenbergMarquardt.jl to be loaded")
91+
end
92+
93+
return FastLevenbergMarquardtJL{linsolve}(autodiff, factor, factoraccept, factorreject,
94+
factorupdate, minscale, maxscale, minfactor, maxfactor)
95+
end
9596

9697
"""
9798
CMINPACK(; method::Symbol = :auto, autodiff = missing)
@@ -134,6 +135,10 @@ then the following methods are allowed:
134135
135136
The default choice of `:auto` selects `:hybr` for NonlinearProblem and `:lm` for
136137
NonlinearLeastSquaresProblem.
138+
139+
!!! note
140+
141+
This algorithm is only available if `MINPACK.jl` is installed.
137142
"""
138143
@concrete struct CMINPACK <: AbstractNonlinearSolveExtensionAlgorithm
139144
show_trace::Bool
@@ -206,6 +211,10 @@ Choices for methods in `NLsolveJL`:
206211
207212
For more information on these arguments, consult the
208213
[NLsolve.jl documentation](https://github.com/JuliaNLSolvers/NLsolve.jl).
214+
215+
!!! note
216+
217+
This algorithm is only available if `NLsolve.jl` is installed.
209218
"""
210219
@concrete struct NLsolveJL <: AbstractNonlinearSolveExtensionAlgorithm
211220
method::Symbol
@@ -289,6 +298,10 @@ Fixed Point Problems. We allow using this algorithm to solve root finding proble
289298
290299
[1] N. Lepage-Saucier, Alternating cyclic extrapolation methods for optimization algorithms,
291300
arXiv:2104.04974 (2021). https://arxiv.org/abs/2104.04974.
301+
302+
!!! note
303+
304+
This algorithm is only available if `SpeedMapping.jl` is installed.
292305
"""
293306
@concrete struct SpeedMappingJL <: AbstractNonlinearSolveExtensionAlgorithm
294307
σ_min
@@ -337,6 +350,10 @@ problems as well.
337350
`:SEA` and `:VEA`. For `:SEA` and `:VEA`, this must be a multiple of `2`.
338351
- `replace_invalids`: The method to use for replacing invalid iterates. Can be
339352
`:ReplaceInvalids`, `:ReplaceVector` or `:NoAction`.
353+
354+
!!! note
355+
356+
This algorithm is only available if `FixedPointAcceleration.jl` is installed.
340357
"""
341358
@concrete struct FixedPointAccelerationJL <: AbstractNonlinearSolveExtensionAlgorithm
342359
algorithm::Symbol
@@ -411,6 +428,10 @@ end
411428
- `:pseudotransient`: Pseudo transient method.
412429
- `:secant`: Secant method for scalar equations.
413430
- `:anderson`: Anderson acceleration for fixed point iterations.
431+
432+
!!! note
433+
434+
This algorithm is only available if `SIAMFANLEquations.jl` is installed.
414435
"""
415436
@concrete struct SIAMFANLEquationsJL{L <: Union{Symbol, Nothing}} <:
416437
AbstractNonlinearSolveExtensionAlgorithm

0 commit comments

Comments
 (0)