Skip to content

Commit 1c30db0

Browse files
Merge pull request #526 from j-fu/abstractsparsefactorization
Introduce AbstractSparseFactorization and AbstractDenseFactorization
2 parents 43fc8d3 + 8646b3d commit 1c30db0

File tree

5 files changed

+55
-20
lines changed

5 files changed

+55
-20
lines changed

src/LinearSolve.jl

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,12 +62,15 @@ using SciMLBase: _unwrap_val
6262

6363
abstract type SciMLLinearSolveAlgorithm <: SciMLBase.AbstractLinearAlgorithm end
6464
abstract type AbstractFactorization <: SciMLLinearSolveAlgorithm end
65+
abstract type AbstractSparseFactorization <: AbstractFactorization end
66+
abstract type AbstractDenseFactorization <: AbstractFactorization end
6567
abstract type AbstractKrylovSubspaceMethod <: SciMLLinearSolveAlgorithm end
6668
abstract type AbstractSolveFunction <: SciMLLinearSolveAlgorithm end
6769

6870
# Traits
6971

7072
needs_concrete_A(alg::AbstractFactorization) = true
73+
needs_concrete_A(alg::AbstractSparseFactorization) = true
7174
needs_concrete_A(alg::AbstractKrylovSubspaceMethod) = false
7275
needs_concrete_A(alg::AbstractSolveFunction) = false
7376

src/common.jl

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -132,6 +132,9 @@ default_alias_b(::Any, ::Any, ::Any) = false
132132
default_alias_A(::AbstractKrylovSubspaceMethod, ::Any, ::Any) = true
133133
default_alias_b(::AbstractKrylovSubspaceMethod, ::Any, ::Any) = true
134134

135+
default_alias_A(::AbstractSparseFactorization, ::Any, ::Any) = true
136+
default_alias_b(::AbstractSparseFactorization, ::Any, ::Any) = true
137+
135138
DEFAULT_PRECS(A, p) = IdentityOperator(size(A)[1]), IdentityOperator(size(A)[2])
136139

137140
function __init_u0_from_Ab(A, b)

src/factorization.jl

Lines changed: 20 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ Julia's built in `lu`. Equivalent to calling `lu!(A)`
4949
- pivot: The choice of pivoting. Defaults to `LinearAlgebra.RowMaximum()`. The other choice is
5050
`LinearAlgebra.NoPivot()`.
5151
"""
52-
Base.@kwdef struct LUFactorization{P} <: AbstractFactorization
52+
Base.@kwdef struct LUFactorization{P} <: AbstractDenseFactorization
5353
pivot::P = LinearAlgebra.RowMaximum()
5454
reuse_symbolic::Bool = true
5555
check_pattern::Bool = true # Check factorization re-use
@@ -70,7 +70,7 @@ Has low overhead and is good for small matrices.
7070
- pivot: The choice of pivoting. Defaults to `LinearAlgebra.RowMaximum()`. The other choice is
7171
`LinearAlgebra.NoPivot()`.
7272
"""
73-
struct GenericLUFactorization{P} <: AbstractFactorization
73+
struct GenericLUFactorization{P} <: AbstractDenseFactorization
7474
pivot::P
7575
end
7676

@@ -177,7 +177,7 @@ Julia's built in `qr`. Equivalent to calling `qr!(A)`.
177177
- On CuMatrix, it will use a CUDA-accelerated QR from CuSolver.
178178
- On BandedMatrix and BlockBandedMatrix, it will use a banded QR.
179179
"""
180-
struct QRFactorization{P} <: AbstractFactorization
180+
struct QRFactorization{P} <: AbstractDenseFactorization
181181
pivot::P
182182
blocksize::Int
183183
inplace::Bool
@@ -260,7 +260,7 @@ Julia's built in `cholesky`. Equivalent to calling `cholesky!(A)`.
260260
- shift: the shift argument in CHOLMOD. Only used for sparse matrices.
261261
- perm: the perm argument in CHOLMOD. Only used for sparse matrices.
262262
"""
263-
struct CholeskyFactorization{P, P2} <: AbstractFactorization
263+
struct CholeskyFactorization{P, P2} <: AbstractDenseFactorization
264264
pivot::P
265265
tol::Int
266266
shift::Float64
@@ -319,7 +319,7 @@ end
319319

320320
## LDLtFactorization
321321

322-
struct LDLtFactorization{T} <: AbstractFactorization
322+
struct LDLtFactorization{T} <: AbstractDenseFactorization
323323
shift::Float64
324324
perm::T
325325
end
@@ -361,7 +361,7 @@ Julia's built in `svd`. Equivalent to `svd!(A)`.
361361
which by default is OpenBLAS but will use MKL if the user does `using MKL` in their
362362
system.
363363
"""
364-
struct SVDFactorization{A} <: AbstractFactorization
364+
struct SVDFactorization{A} <: AbstractDenseFactorization
365365
full::Bool
366366
alg::A
367367
end
@@ -410,7 +410,7 @@ Only for Symmetric matrices.
410410
411411
- rook: whether to perform rook pivoting. Defaults to false.
412412
"""
413-
Base.@kwdef struct BunchKaufmanFactorization <: AbstractFactorization
413+
Base.@kwdef struct BunchKaufmanFactorization <: AbstractDenseFactorization
414414
rook::Bool = false
415415
end
416416

@@ -464,7 +464,7 @@ factorization API. Quoting from Base:
464464
- fact_alg: the factorization algorithm to use. Defaults to `LinearAlgebra.factorize`, but can be
465465
swapped to choices like `lu`, `qr`
466466
"""
467-
struct GenericFactorization{F} <: AbstractFactorization
467+
struct GenericFactorization{F} <: AbstractDenseFactorization
468468
fact_alg::F
469469
end
470470

@@ -781,7 +781,7 @@ patterns with “more structure”.
781781
`A` has the same sparsity pattern as the previous `A`. If this algorithm is to
782782
be used in a context where that assumption does not hold, set `reuse_symbolic=false`.
783783
"""
784-
Base.@kwdef struct UMFPACKFactorization <: AbstractFactorization
784+
Base.@kwdef struct UMFPACKFactorization <: AbstractSparseFactorization
785785
reuse_symbolic::Bool = true
786786
check_pattern::Bool = true # Check factorization re-use
787787
end
@@ -860,7 +860,7 @@ A fast sparse LU-factorization which specializes on sparsity patterns with “le
860860
`A` has the same sparsity pattern as the previous `A`. If this algorithm is to
861861
be used in a context where that assumption does not hold, set `reuse_symbolic=false`.
862862
"""
863-
Base.@kwdef struct KLUFactorization <: AbstractFactorization
863+
Base.@kwdef struct KLUFactorization <: AbstractSparseFactorization
864864
reuse_symbolic::Bool = true
865865
check_pattern::Bool = true
866866
end
@@ -941,7 +941,7 @@ Only supports sparse matrices.
941941
- shift: the shift argument in CHOLMOD.
942942
- perm: the perm argument in CHOLMOD
943943
"""
944-
Base.@kwdef struct CHOLMODFactorization{T} <: AbstractFactorization
944+
Base.@kwdef struct CHOLMODFactorization{T} <: AbstractSparseFactorization
945945
shift::Float64 = 0.0
946946
perm::T = nothing
947947
end
@@ -993,7 +993,7 @@ implementation, usually outperforming OpenBLAS and MKL for smaller matrices
993993
(<500x500), but currently optimized only for Base `Array` with `Float32` or `Float64`.
994994
Additional optimization for complex matrices is in the works.
995995
"""
996-
struct RFLUFactorization{P, T} <: AbstractFactorization
996+
struct RFLUFactorization{P, T} <: AbstractDenseFactorization
997997
RFLUFactorization(::Val{P}, ::Val{T}) where {P, T} = new{P, T}()
998998
end
999999

@@ -1064,7 +1064,7 @@ be applied to well-conditioned matrices.
10641064
10651065
- pivot: Defaults to RowMaximum(), but can be NoPivot()
10661066
"""
1067-
struct NormalCholeskyFactorization{P} <: AbstractFactorization
1067+
struct NormalCholeskyFactorization{P} <: AbstractDenseFactorization
10681068
pivot::P
10691069
end
10701070

@@ -1152,7 +1152,7 @@ be applied to well-conditioned matrices.
11521152
11531153
- rook: whether to perform rook pivoting. Defaults to false.
11541154
"""
1155-
struct NormalBunchKaufmanFactorization <: AbstractFactorization
1155+
struct NormalBunchKaufmanFactorization <: AbstractDenseFactorization
11561156
rook::Bool
11571157
end
11581158

@@ -1189,7 +1189,7 @@ end
11891189
11901190
A special implementation only for solving `Diagonal` matrices fast.
11911191
"""
1192-
struct DiagonalFactorization <: AbstractFactorization end
1192+
struct DiagonalFactorization <: AbstractDenseFactorization end
11931193

11941194
function init_cacheval(alg::DiagonalFactorization, A, b, u, Pl, Pr, maxiters::Int,
11951195
abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions)
@@ -1225,7 +1225,7 @@ end
12251225
The FastLapackInterface.jl version of the LU factorization. Notably,
12261226
this version does not allow for choice of pivoting method.
12271227
"""
1228-
struct FastLUFactorization <: AbstractFactorization end
1228+
struct FastLUFactorization <: AbstractDenseFactorization end
12291229

12301230
function init_cacheval(::FastLUFactorization, A, b, u, Pl, Pr,
12311231
maxiters::Int, abstol, reltol, verbose::Bool,
@@ -1255,7 +1255,7 @@ end
12551255
12561256
The FastLapackInterface.jl version of the QR factorization.
12571257
"""
1258-
struct FastQRFactorization{P} <: AbstractFactorization
1258+
struct FastQRFactorization{P} <: AbstractDenseFactorization
12591259
pivot::P
12601260
blocksize::Int
12611261
end
@@ -1329,7 +1329,7 @@ dispatch to route around standard BLAS routines in the case e.g. of arbitrary-pr
13291329
floating point numbers or ForwardDiff.Dual.
13301330
This e.g. allows for Automatic Differentiation (AD) of a sparse-matrix solve.
13311331
"""
1332-
Base.@kwdef struct SparspakFactorization <: AbstractFactorization
1332+
Base.@kwdef struct SparspakFactorization <: AbstractSparseFactorization
13331333
reuse_symbolic::Bool = true
13341334
end
13351335

@@ -1388,7 +1388,8 @@ function SciMLBase.solve!(cache::LinearCache, alg::SparspakFactorization; kwargs
13881388
SciMLBase.build_linear_solution(alg, y, nothing, cache)
13891389
end
13901390

1391-
for alg in InteractiveUtils.subtypes(AbstractFactorization)
1391+
for alg in vcat(InteractiveUtils.subtypes(AbstractDenseFactorization),
1392+
InteractiveUtils.subtypes(AbstractSparseFactorization))
13921393
@eval function init_cacheval(alg::$alg, A::MatrixOperator, b, u, Pl, Pr,
13931394
maxiters::Int, abstol, reltol, verbose::Bool,
13941395
assumptions::OperatorAssumptions)

test/basictests.jl

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -547,9 +547,36 @@ end
547547
N = 10_000
548548
A = spdiagm(1 => -ones(N - 1), 0 => fill(10.0, N), -1 => -ones(N - 1))
549549
u0 = ones(size(A, 2))
550+
550551
b = A * u0
551552
B = MySparseMatrixCSC(A)
552553
pr = LinearProblem(B, b)
554+
555+
# test default algorithn
553556
@time "solve MySparseMatrixCSC" u=solve(pr)
554557
@test norm(u - u0, Inf) < 1.0e-13
558+
559+
# test Krylov algorithm with reinit!
560+
pr = LinearProblem(B, b)
561+
solver=KrylovJL_CG()
562+
cache=init(pr,solver,maxiters=1000,reltol=1.0e-10)
563+
u=solve!(cache)
564+
A1 = spdiagm(1 => -ones(N - 1), 0 => fill(100.0, N), -1 => -ones(N - 1))
565+
b1=A1*u0
566+
B1= MySparseMatrixCSC(A1)
567+
@test norm(u - u0, Inf) < 1.0e-8
568+
reinit!(cache; A=B1, b=b1)
569+
u=solve!(cache)
570+
@test norm(u - u0, Inf) < 1.0e-8
571+
572+
# test factorization with reinit!
573+
pr = LinearProblem(B, b)
574+
solver=SparspakFactorization()
575+
cache=init(pr,solver)
576+
u=solve!(cache)
577+
@test norm(u - u0, Inf) < 1.0e-8
578+
reinit!(cache; A=B1, b=b1)
579+
u=solve!(cache)
580+
@test norm(u - u0, Inf) < 1.0e-8
581+
555582
end

test/resolve.jl

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
using LinearSolve, LinearAlgebra, SparseArrays, InteractiveUtils, Test
2+
using LinearSolve: AbstractDenseFactorization, AbstractSparseFactorization
23

3-
for alg in subtypes(LinearSolve.AbstractFactorization)
4+
for alg in vcat(InteractiveUtils.subtypes(AbstractDenseFactorization),InteractiveUtils.subtypes(AbstractSparseFactorization))
45
@show alg
56
if !(alg in [
67
DiagonalFactorization,

0 commit comments

Comments
 (0)