@@ -49,7 +49,7 @@ Julia's built in `lu`. Equivalent to calling `lu!(A)`
49
49
- pivot: The choice of pivoting. Defaults to `LinearAlgebra.RowMaximum()`. The other choice is
50
50
`LinearAlgebra.NoPivot()`.
51
51
"""
52
- Base. @kwdef struct LUFactorization{P} <: AbstractFactorization
52
+ Base. @kwdef struct LUFactorization{P} <: AbstractDenseFactorization
53
53
pivot:: P = LinearAlgebra. RowMaximum ()
54
54
reuse_symbolic:: Bool = true
55
55
check_pattern:: Bool = true # Check factorization re-use
@@ -70,7 +70,7 @@ Has low overhead and is good for small matrices.
70
70
- pivot: The choice of pivoting. Defaults to `LinearAlgebra.RowMaximum()`. The other choice is
71
71
`LinearAlgebra.NoPivot()`.
72
72
"""
73
- struct GenericLUFactorization{P} <: AbstractFactorization
73
+ struct GenericLUFactorization{P} <: AbstractDenseFactorization
74
74
pivot:: P
75
75
end
76
76
@@ -177,7 +177,7 @@ Julia's built in `qr`. Equivalent to calling `qr!(A)`.
177
177
- On CuMatrix, it will use a CUDA-accelerated QR from CuSolver.
178
178
- On BandedMatrix and BlockBandedMatrix, it will use a banded QR.
179
179
"""
180
- struct QRFactorization{P} <: AbstractFactorization
180
+ struct QRFactorization{P} <: AbstractDenseFactorization
181
181
pivot:: P
182
182
blocksize:: Int
183
183
inplace:: Bool
@@ -260,7 +260,7 @@ Julia's built in `cholesky`. Equivalent to calling `cholesky!(A)`.
260
260
- shift: the shift argument in CHOLMOD. Only used for sparse matrices.
261
261
- perm: the perm argument in CHOLMOD. Only used for sparse matrices.
262
262
"""
263
- struct CholeskyFactorization{P, P2} <: AbstractFactorization
263
+ struct CholeskyFactorization{P, P2} <: AbstractDenseFactorization
264
264
pivot:: P
265
265
tol:: Int
266
266
shift:: Float64
319
319
320
320
# # LDLtFactorization
321
321
322
- struct LDLtFactorization{T} <: AbstractFactorization
322
+ struct LDLtFactorization{T} <: AbstractDenseFactorization
323
323
shift:: Float64
324
324
perm:: T
325
325
end
@@ -361,7 +361,7 @@ Julia's built in `svd`. Equivalent to `svd!(A)`.
361
361
which by default is OpenBLAS but will use MKL if the user does `using MKL` in their
362
362
system.
363
363
"""
364
- struct SVDFactorization{A} <: AbstractFactorization
364
+ struct SVDFactorization{A} <: AbstractDenseFactorization
365
365
full:: Bool
366
366
alg:: A
367
367
end
@@ -410,7 +410,7 @@ Only for Symmetric matrices.
410
410
411
411
- rook: whether to perform rook pivoting. Defaults to false.
412
412
"""
413
- Base. @kwdef struct BunchKaufmanFactorization <: AbstractFactorization
413
+ Base. @kwdef struct BunchKaufmanFactorization <: AbstractDenseFactorization
414
414
rook:: Bool = false
415
415
end
416
416
@@ -464,7 +464,7 @@ factorization API. Quoting from Base:
464
464
- fact_alg: the factorization algorithm to use. Defaults to `LinearAlgebra.factorize`, but can be
465
465
swapped to choices like `lu`, `qr`
466
466
"""
467
- struct GenericFactorization{F} <: AbstractFactorization
467
+ struct GenericFactorization{F} <: AbstractDenseFactorization
468
468
fact_alg:: F
469
469
end
470
470
@@ -781,7 +781,7 @@ patterns with “more structure”.
781
781
`A` has the same sparsity pattern as the previous `A`. If this algorithm is to
782
782
be used in a context where that assumption does not hold, set `reuse_symbolic=false`.
783
783
"""
784
- Base. @kwdef struct UMFPACKFactorization <: AbstractFactorization
784
+ Base. @kwdef struct UMFPACKFactorization <: AbstractSparseFactorization
785
785
reuse_symbolic:: Bool = true
786
786
check_pattern:: Bool = true # Check factorization re-use
787
787
end
@@ -860,7 +860,7 @@ A fast sparse LU-factorization which specializes on sparsity patterns with “le
860
860
`A` has the same sparsity pattern as the previous `A`. If this algorithm is to
861
861
be used in a context where that assumption does not hold, set `reuse_symbolic=false`.
862
862
"""
863
- Base. @kwdef struct KLUFactorization <: AbstractFactorization
863
+ Base. @kwdef struct KLUFactorization <: AbstractSparseFactorization
864
864
reuse_symbolic:: Bool = true
865
865
check_pattern:: Bool = true
866
866
end
@@ -941,7 +941,7 @@ Only supports sparse matrices.
941
941
- shift: the shift argument in CHOLMOD.
942
942
- perm: the perm argument in CHOLMOD
943
943
"""
944
- Base. @kwdef struct CHOLMODFactorization{T} <: AbstractFactorization
944
+ Base. @kwdef struct CHOLMODFactorization{T} <: AbstractSparseFactorization
945
945
shift:: Float64 = 0.0
946
946
perm:: T = nothing
947
947
end
@@ -993,7 +993,7 @@ implementation, usually outperforming OpenBLAS and MKL for smaller matrices
993
993
(<500x500), but currently optimized only for Base `Array` with `Float32` or `Float64`.
994
994
Additional optimization for complex matrices is in the works.
995
995
"""
996
- struct RFLUFactorization{P, T} <: AbstractFactorization
996
+ struct RFLUFactorization{P, T} <: AbstractDenseFactorization
997
997
RFLUFactorization (:: Val{P} , :: Val{T} ) where {P, T} = new {P, T} ()
998
998
end
999
999
@@ -1064,7 +1064,7 @@ be applied to well-conditioned matrices.
1064
1064
1065
1065
- pivot: Defaults to RowMaximum(), but can be NoPivot()
1066
1066
"""
1067
- struct NormalCholeskyFactorization{P} <: AbstractFactorization
1067
+ struct NormalCholeskyFactorization{P} <: AbstractDenseFactorization
1068
1068
pivot:: P
1069
1069
end
1070
1070
@@ -1152,7 +1152,7 @@ be applied to well-conditioned matrices.
1152
1152
1153
1153
- rook: whether to perform rook pivoting. Defaults to false.
1154
1154
"""
1155
- struct NormalBunchKaufmanFactorization <: AbstractFactorization
1155
+ struct NormalBunchKaufmanFactorization <: AbstractDenseFactorization
1156
1156
rook:: Bool
1157
1157
end
1158
1158
@@ -1189,7 +1189,7 @@ end
1189
1189
1190
1190
A special implementation only for solving `Diagonal` matrices fast.
1191
1191
"""
1192
- struct DiagonalFactorization <: AbstractFactorization end
1192
+ struct DiagonalFactorization <: AbstractDenseFactorization end
1193
1193
1194
1194
function init_cacheval (alg:: DiagonalFactorization , A, b, u, Pl, Pr, maxiters:: Int ,
1195
1195
abstol, reltol, verbose:: Bool , assumptions:: OperatorAssumptions )
@@ -1225,7 +1225,7 @@ end
1225
1225
The FastLapackInterface.jl version of the LU factorization. Notably,
1226
1226
this version does not allow for choice of pivoting method.
1227
1227
"""
1228
- struct FastLUFactorization <: AbstractFactorization end
1228
+ struct FastLUFactorization <: AbstractDenseFactorization end
1229
1229
1230
1230
function init_cacheval (:: FastLUFactorization , A, b, u, Pl, Pr,
1231
1231
maxiters:: Int , abstol, reltol, verbose:: Bool ,
@@ -1255,7 +1255,7 @@ end
1255
1255
1256
1256
The FastLapackInterface.jl version of the QR factorization.
1257
1257
"""
1258
- struct FastQRFactorization{P} <: AbstractFactorization
1258
+ struct FastQRFactorization{P} <: AbstractDenseFactorization
1259
1259
pivot:: P
1260
1260
blocksize:: Int
1261
1261
end
@@ -1329,7 +1329,7 @@ dispatch to route around standard BLAS routines in the case e.g. of arbitrary-pr
1329
1329
floating point numbers or ForwardDiff.Dual.
1330
1330
This e.g. allows for Automatic Differentiation (AD) of a sparse-matrix solve.
1331
1331
"""
1332
- Base. @kwdef struct SparspakFactorization <: AbstractFactorization
1332
+ Base. @kwdef struct SparspakFactorization <: AbstractSparseFactorization
1333
1333
reuse_symbolic:: Bool = true
1334
1334
end
1335
1335
@@ -1388,7 +1388,8 @@ function SciMLBase.solve!(cache::LinearCache, alg::SparspakFactorization; kwargs
1388
1388
SciMLBase. build_linear_solution (alg, y, nothing , cache)
1389
1389
end
1390
1390
1391
- for alg in InteractiveUtils. subtypes (AbstractFactorization)
1391
+ for alg in vcat (InteractiveUtils. subtypes (AbstractDenseFactorization),
1392
+ InteractiveUtils. subtypes (AbstractSparseFactorization))
1392
1393
@eval function init_cacheval (alg:: $alg , A:: MatrixOperator , b, u, Pl, Pr,
1393
1394
maxiters:: Int , abstol, reltol, verbose:: Bool ,
1394
1395
assumptions:: OperatorAssumptions )
0 commit comments