diff --git a/chapters/matrix_methods/gaussian_elimination/code/c/gaussian_elimination.c b/chapters/matrix_methods/gaussian_elimination/code/c/gaussian_elimination.c new file mode 100644 index 000000000..049e0f5d1 --- /dev/null +++ b/chapters/matrix_methods/gaussian_elimination/code/c/gaussian_elimination.c @@ -0,0 +1,83 @@ +#include +#include +#include + +void swap_rows(double * const a, size_t i, size_t pivot, size_t cols) { + for (size_t j = 0; j < cols; ++j) { + double tmp = a[i * cols + j]; + a[i * cols + j] = a[pivot * cols + j]; + a[pivot * cols + j] = tmp; + } +} + +void gaussian_elimination(double *a, const size_t rows, const size_t cols) { + size_t min_dim = (rows < cols)? rows: cols; + + for (size_t k = 0; k < min_dim; ++k) { + size_t pivot = k; + + for (size_t i = k + 1; i < rows; ++i) { + if (fabs(a[i * cols + k]) > fabs(a[pivot * cols + k])) { + pivot = i; + } + } + + if (a[pivot * cols + k] == 0) { + printf("The matrix is singular.\n"); + exit(0); + } + + if (k != pivot) { + swap_rows(a, k, pivot, cols); + } + + for (size_t i = k + 1; i < rows; ++i) { + double scale = a[i * cols + k] / a[k * cols + k]; + + for (size_t j = k + 1; j < cols; ++j) { + a[i * cols + j] -= a[k * cols + j] * scale; + } + + a[i * cols + k] = 0; + } + } +} + +void back_substitution(const double * const a, double * const x, + const size_t rows, const size_t cols) { + + for (int i = rows - 1; i >= 0; --i) { + double sum = 0.0; + + for (size_t j = cols - 2; j > i; --j) { + sum += x[j] * a[i * cols + j]; + } + + x[i] = (a[i * cols + cols - 1] - sum) / a[i * cols + i]; + } +} + +int main() { + double a[3][4] = {{3.0, 2.0, -4.0, 3.0}, + {2.0, 3.0, 3.0, 15.0}, + {5.0, -3.0, 1.0, 14.0}}; + + gaussian_elimination((double *)a, 3, 4); + + for (size_t i = 0; i < 3; ++i) { + printf("["); + for (size_t j = 0; j < 4; ++j) { + printf("%f ", a[i][j]); + } + printf("]\n"); + } + + printf("\n"); + + double x[3] = {0, 0, 0}; + back_substitution((double *)a, x, 3, 4); + + printf("(%f,%f,%f)\n", x[0], x[1], x[2]); + + return 0; +} diff --git a/chapters/matrix_methods/gaussian_elimination/gaussian_elimination.md b/chapters/matrix_methods/gaussian_elimination/gaussian_elimination.md index d0d4c010e..6c7c838c9 100644 --- a/chapters/matrix_methods/gaussian_elimination/gaussian_elimination.md +++ b/chapters/matrix_methods/gaussian_elimination/gaussian_elimination.md @@ -257,6 +257,8 @@ In code, this looks like: {% method %} {% sample lang="jl" %} [import:1-42, lang:"julia"](code/julia/gaussian_elimination.jl) +{% sample lang="c" %} +[import:13-44, lang:"c_cpp"](code/c/gaussian_elimination.c) {% endmethod %} As with all code, it takes time to fully absorb what is going on and why everything is happening; however, I have tried to comment the above psuedocode with the necessary steps. Let me know if anything is unclear! @@ -270,6 +272,8 @@ Even though this seems straightforward, the pseudocode might not be as simple as {% method %} {% sample lang="jl" %} [import:44-64, lang:"julia"](code/julia/gaussian_elimination.jl) +{% sample lang="c" %} +[import:46-58, lang:"c_cpp"](code/c/gaussian_elimination.c) {% endmethod %} Now, as for what's next... Well, we are in for a treat! The above algorithm clearly has 3 `for` loops, and will thus have a complexity of $$\sim O(n^3)$$, which is abysmal! If we can reduce the matrix to a specifically **tridiagonal** matrix, we can actually solve the system in $$\sim O(n)$$! How? Well, we can use an algorithm known as the _Tri-Diagonal Matrix Algorithm_ \(TDMA\) also known as the _Thomas Algorithm_. @@ -281,6 +285,8 @@ The full code can be seen here: {% method %} {% sample lang="jl" %} [import, lang:"julia"](code/julia/gaussian_elimination.jl) +{% sample lang="c" %} +[import, lang:"c_cpp"](code/c/gaussian_elimination.c) {% sample lang="rs" %} [import, lang:"rust"](code/rust/gaussian_elimination.rs) {% endmethod %} diff --git a/chapters/matrix_methods/thomas/code/c/thomas.c b/chapters/matrix_methods/thomas/code/c/thomas.c new file mode 100644 index 000000000..3aed94e8e --- /dev/null +++ b/chapters/matrix_methods/thomas/code/c/thomas.c @@ -0,0 +1,43 @@ +#include +#include + +void thomas(double * const a, double * const b, double * const c, + double * const x, const size_t size) { + + double y[size]; + memset(y, 0, size * sizeof(double)); + + y[0] = c[0] / b[0]; + x[0] = x[0] / b[0]; + + for (size_t i = 1; i < size; ++i) { + double scale = 1.0 / (b[i] - a[i] * y[i - 1]); + y[i] = c[i] * scale; + x[i] = (x[i] - a[i] * x[i - 1]) * scale; + } + + for (int i = size - 2; i >= 0; --i) { + x[i] -= y[i] * x[i + 1]; + } +} + +int main() { + double a[] = {0.0, 2.0, 3.0}; + double b[] = {1.0, 3.0, 6.0}; + double c[] = {4.0, 5.0, 0.0}; + double x[] = {7.0, 5.0, 3.0}; + + printf("The system,\n"); + printf("[1.0 4.0 0.0][x] = [7.0]\n"); + printf("[2.0 3.0 5.0][y] = [5.0]\n"); + printf("[0.0 3.0 6.0][z] = [3.0]\n"); + printf("has the solution:\n"); + + thomas(a, b, c, x, 3); + + for (size_t i = 0; i < 3; ++i) { + printf("[%f]\n", x[i]); + } + + return 0; +} diff --git a/chapters/matrix_methods/thomas/thomas.md b/chapters/matrix_methods/thomas/thomas.md index dfd3bd4b4..cad56538f 100644 --- a/chapters/matrix_methods/thomas/thomas.md +++ b/chapters/matrix_methods/thomas/thomas.md @@ -40,6 +40,8 @@ In code, this will look like this: {% method %} {% sample lang="jl" %} [import, lang:"julia"](code/julia/thomas.jl) +{% sample lang="c" %} +[import, lang:"c_cpp"](code/c/thomas.c) {% sample lang="py" %} [import, lang:"python"](code/python/thomas.py) {% endmethod %}