diff --git a/chapters/matrix_methods/gaussian_elimination/code/python/gaussian_elimination.py b/chapters/matrix_methods/gaussian_elimination/code/python/gaussian_elimination.py new file mode 100644 index 000000000..1527b8133 --- /dev/null +++ b/chapters/matrix_methods/gaussian_elimination/code/python/gaussian_elimination.py @@ -0,0 +1,71 @@ +import numpy as np + +def gaussian_elimination(A): + + rows = len(A) + cols = len(A[0]) + n = min(rows, cols) + + # Main loop going through all columns + for k in range(n): + + # Step 1: finding the maximum element for each column + max_index = np.argmax(np.abs([A[i][k] for i in range(k,n)])) + k + + # Check to make sure matrix is good! + if (A[max_index][k] == 0): + print("matrix is singular! End!") + return + + # Step 2: swap row with highest value for that column to the top + A[max_index], A[k] = A[k], A[max_index] + + # Loop for all remaining rows + for i in range(k+1,rows): + + # Step 3: finding fraction + fraction = A[i][k] / A[k][k] + + # loop through all columns for that row + for j in range(k+1,cols): + + # Step 4: re-evaluate each element + A[i][j] -= A[k][j]*fraction + + # Step 5: Set lower elements to 0 + A[i][k] = 0 + + +def back_substitution(A): + + rows = len(A) + cols = len(A[0]) + + # Creating the solution Vector + soln = [0]*rows + + # initialize the final element + soln[-1] = A[-1][-1] / A[-1][-2] + + for i in range(rows - 2,-1,-1): + sum = 0 + for j in range(rows-1,-1,-1): + sum += soln[j]*A[i][j] + soln[i] = (A[i][-1] - sum) / A[i][i] + + return soln + +def main(): + A = [[ 2, 3, 4, 6], + [ 1, 2, 3, 4], + [ 3, -4, 0, 10]] + + gaussian_elimination(A) + print(A) + soln = back_substitution(A) + + for element in soln: + print(element) + +if __name__ == '__main__': + main() diff --git a/chapters/matrix_methods/gaussian_elimination/gaussian_elimination.md b/chapters/matrix_methods/gaussian_elimination/gaussian_elimination.md index d0d4c010e..a369ed43d 100644 --- a/chapters/matrix_methods/gaussian_elimination/gaussian_elimination.md +++ b/chapters/matrix_methods/gaussian_elimination/gaussian_elimination.md @@ -257,6 +257,8 @@ In code, this looks like: {% method %} {% sample lang="jl" %} [import:1-42, lang:"julia"](code/julia/gaussian_elimination.jl) +{% sample lang="py" %} +[import:3-36, lang:"python"](code/python/gaussian_elimination.py) {% endmethod %} As with all code, it takes time to fully absorb what is going on and why everything is happening; however, I have tried to comment the above psuedocode with the necessary steps. Let me know if anything is unclear! @@ -270,6 +272,8 @@ Even though this seems straightforward, the pseudocode might not be as simple as {% method %} {% sample lang="jl" %} [import:44-64, lang:"julia"](code/julia/gaussian_elimination.jl) +{% sample lang="py" %} +[import:39-56, lang:"python"](code/python/gaussian_elimination.py) {% endmethod %} Now, as for what's next... Well, we are in for a treat! The above algorithm clearly has 3 `for` loops, and will thus have a complexity of $$\sim O(n^3)$$, which is abysmal! If we can reduce the matrix to a specifically **tridiagonal** matrix, we can actually solve the system in $$\sim O(n)$$! How? Well, we can use an algorithm known as the _Tri-Diagonal Matrix Algorithm_ \(TDMA\) also known as the _Thomas Algorithm_. @@ -281,6 +285,8 @@ The full code can be seen here: {% method %} {% sample lang="jl" %} [import, lang:"julia"](code/julia/gaussian_elimination.jl) +{% sample lang="py" %} +[import, lang:"python"](code/python/gaussian_elimination.py) {% sample lang="rs" %} [import, lang:"rust"](code/rust/gaussian_elimination.rs) {% endmethod %}