diff --git a/contents/gaussian_elimination/code/python/gaussian_elimination.py b/contents/gaussian_elimination/code/python/gaussian_elimination.py new file mode 100644 index 000000000..3f11cd761 --- /dev/null +++ b/contents/gaussian_elimination/code/python/gaussian_elimination.py @@ -0,0 +1,89 @@ +import numpy as np + +def gaussian_elimination(A): + + pivot_row = 0 + + # Step 1: Go by column + for pivot_col in range(min(A.shape[0], A.shape[1])): + + # Step 2: Swap row with highest element in col + max_i = np.argmax(abs(A[pivot_row:, pivot_col])) + pivot_row + + temp = A[pivot_row, :].copy() + A[pivot_row, :] = A[max_i, :] + A[max_i, :] = temp + + # Skip on singular matrix, not actually a pivot + if A[pivot_row, pivot_col] == 0: + continue + + # Steps 3 & 4: Zero out elements below pivot + for r in range(pivot_row + 1, A.shape[0]): + # Step 3: Get fraction + frac = -A[r, pivot_col] / A[pivot_row, pivot_col] + # Step 4: Add rows + A[r, :] += frac * A[pivot_row, :] + + pivot_row += 1 + + +# Assumes A is already row echelon form +def gauss_jordan_elimination(A): + + col = 0 + + # Scan for pivots + for row in range(A.shape[0]): + while col < A.shape[1] and A[row, col] == 0: + col += 1 + + if col >= A.shape[1]: + continue + + # Set each pivot to one via row scaling + A[row, :] /= A[row, col] + + # Zero out elements above pivot + for r in range(row): + A[r, :] -= A[r, col] * A[row, :] + + +# Assumes A has a unique solution and A in row echelon form +def back_substitution(A): + + sol = np.zeros(A.shape[0]).T + + # Go by pivots along diagonal + for pivot_i in range(A.shape[0] - 1, -1, -1): + s = 0 + for col in range(pivot_i + 1, A.shape[1] - 1): + s += A[pivot_i, col] * sol[col] + sol[pivot_i] = (A[pivot_i, A.shape[1] - 1] - s) / A[pivot_i, pivot_i] + + return sol + + +def main(): + A = np.array([[2, 3, 4, 6], + [1, 2, 3, 4,], + [3, -4, 0, 10]], dtype=float) + + print("Original") + print(A, "\n") + + gaussian_elimination(A) + print("Gaussian elimination") + print(A, "\n") + + print("Back subsitution") + print(back_substitution(A), "\n") + + gauss_jordan_elimination(A) + print("Gauss-Jordan") + print(A, "\n") + + +if __name__ == "__main__": + main() + diff --git a/contents/gaussian_elimination/gaussian_elimination.md b/contents/gaussian_elimination/gaussian_elimination.md index d48b46df4..7976a0c7e 100644 --- a/contents/gaussian_elimination/gaussian_elimination.md +++ b/contents/gaussian_elimination/gaussian_elimination.md @@ -360,6 +360,8 @@ In code, this looks like: [import:41-78, lang:"rust"](code/rust/gaussian_elimination.rs) {% sample lang="hs" %} [import:10-36, lang:"haskell"](code/haskell/gaussianElimination.hs) +{% sample lang="py" %} +[import:3-28, lang:"python"](code/python/gaussian_elimination.py) {% endmethod %} Now, to be clear: this algorithm creates an upper-triangular matrix. @@ -393,6 +395,8 @@ This code does not exist yet in rust, so here's Julia code (sorry for the inconv [import:67-93, lang:"julia"](code/julia/gaussian_elimination.jl) {% sample lang="hs" %} [import:38-46, lang:"haskell"](code/haskell/gaussianElimination.hs) +{% sample lang="py" %} +[import:31-49, lang:"python"](code/python/gaussian_elimination.py) {% endmethod %} ## Back-substitution @@ -423,6 +427,8 @@ In code, this involves keeping a rolling sum of all the values we substitute in [import:79-94, lang:"rust"](code/rust/gaussian_elimination.rs) {% sample lang="hs" %} [import:48-53, lang:"haskell"](code/haskell/gaussianElimination.hs) +{% sample lang="py" %} +[import:52-64, lang:"python"](code/python/gaussian_elimination.py) {% endmethod %} ## Conclusions @@ -445,6 +451,8 @@ As for what's next... Well, we are in for a treat! The above algorithm clearly h [import, lang:"rust"](code/rust/gaussian_elimination.rs) {% sample lang="hs" %} [import, lang:"haskell"](code/haskell/gaussianElimination.hs) +{% sample lang="py" %} +[import, lang:"python"](code/python/gaussian_elimination.py) {% endmethod %}