This repository has been archived on 2021-03-28. You can view files and clone it, but cannot push or open issues or pull requests.
ICS/notes/lecture6.tex
2020-04-19 23:30:55 +02:00

147 lines
No EOL
5.6 KiB
TeX

% vim: set ts=2 sw=2 et tw=80:
\documentclass[12pt,a4paper]{article}
\usepackage[utf8]{inputenc} \usepackage[margin=2cm]{geometry}
\usepackage{amstext}
\usepackage{amsmath}
\usepackage{array}
\usepackage[utf8]{inputenc}
\usepackage[margin=2cm]{geometry}
\usepackage{amstext}
\usepackage{array}
\newcommand{\lra}{\Leftrightarrow}
\newcolumntype{L}{>{$}l<{$}}
\DeclareMathOperator*{\argmax}{arg\,max}
\DeclareMathOperator*{\argmin}{arg\,min}
\title{Lecture notes 6 -- Introduction to Computational Science}
\author{Micheal Multerer \\ Copied by: Claudio Maggioni}
\begin{document}
\maketitle
\section*{2.5: Partial pivoting}
Obviously, Algorithm 2.8 fails if one of the pivots becomes zero. In this case, we need to choose a different pivot element.
\paragraph{Simple approach:} Column pivoting choose $|a^{(i)}_{k,i}|=\max_{i \leq l \leq n}|a^{(i)}_{l,i}|$
In order to move the pivot element and the corresponding row we switch row k and row i by a \textit{primitive matrix}:
\[
\bar{P}_i := \begin{bmatrix}
1_1 \\
& \ddots \\
&& 1_{i-1} \\
&&& 0 & \hdots & 1_k \\
&&&\vdots &1_{i+1} \\
&&& 1_i && 0 \\
&&&&&& \ddots \\
&&&&&&& 1_n \\
\end{bmatrix}
\]
The following rules apply:
\begin{enumerate}
\item Multiplication by $P_i$ from the left $\Rightarrow$ interchange rows i and k
\item Multiplication by $P_i$ from the right $\Rightarrow$ interchange columns i and k
\item $(\bar{P}_i)^2 = \bar{P}_i \cdot \bar{P}_i = \bar{I}$
\end{enumerate}
Then performing an LU decompisition with pivoting can be
written in matrix notation as:
$$A_{i+1} = L_i \cdot \bar{P}_i \cdot A_i$$
\paragraph{Note:} For $j < i$, it holds $\bar{P}_i \bar{L}_j = \widetilde{L}_j \bar{P}_i$ where $\widetilde{L}$ is the same matrix as $\bar{L}_j$ except that $[\widetilde{l}_j]_i$ and $[\widetilde{L}_j]_k$ are interchanged:
\[
\bar{L}_i := \begin{bmatrix}
1\\
&\ddots\\
&&1\\
&&&\hat{l}^{(j)}_2\\
&&&\hat{l}^{(j)}_i&&1\\
&&&\hat{l}^{(j)}_k&&&\ddots\\
&&&\hat{l}^{(j)}_n&&&&1\\
\end{bmatrix}
\text{\hspace{1cm}}
\widetilde{L}_i := \begin{bmatrix}
1\\
&\ddots\\
&&1\\
&&&\hat{l}^{(j)}_2\\
&&&\hat{l}^{(j)}_k&&1\\
&&&\hat{l}^{(j)}_i&&&\ddots\\
&&&\hat{l}^{(j)}_n&&&&1\\
\end{bmatrix}
\]
Resolving (*) then yields:
$$\bar{A}_{i+1} = \bar{L}_i \bar{P}_i \bar{L}_{i-1} \bar{P}_{i-1} \ldots \bar{L}_1 \bar{P}_1 A$$
or
$$\bar{L}_{n-1} \bar{P}_{n-1} \bar{L}_{n-2} \bar{P}_{n-2} \ldots \bar{L}_1 \bar{P}_1 \bar{A} = \bar{U}$$
Now we can exploit $P_2 \bar{L}_1 \bar{P}_1 = \widetilde{L}_1 \bar{P}_2 \bar{P}_1$ and so on. This yields:
$$\bar{P}\bar{A} = \bar{L}\bar{U}$$
with
$$\bar{P} = \bar{P}_{n-1} \bar{P}_{n-2} \ldots \bar{P}_1$$
and
$$\widetilde{L} = \widetilde{L}^{-1}_1 \ldots \widetilde{L}^{-1}_{n-1}$$
and $$ \widetilde{L}_{n-1} = \bar{L}_{n-1} $$$$
\widetilde{L}_{n-2} = \bar{P}_{n-1} \bar{L}_{n-2} \bar{P}_{n-1} $$$$
\vdots $$$$
\widetilde{L}_1 = \bar{P}_{n-1} \bar{P}_{n-2} \ldots \bar{P}_{2} \bar{L}_1 \bar{P}_2 \ldots \bar{P}_{n-2} \bar{P}_{n-1} $$
\paragraph{Note:} If $\bar{A} \in R^{n \times n}$ is non-singular, the pivoted LU decomposition $\bar{P}\bar{A} = \bar{L}\bar{U}$ always exists.
\\\\
We can easily add column priority in Algorithm 2.8:
\paragraph{Algorithm 2.9}(Outer product LU decomposition with column pivoting) \\
input: matrix $\bar{A} = [a_{i,j}]^n_{i,j = 1} \in R^{n\times n}$ \\
output: pivoted LU decomposition $ \bar{L}\bar{U} = \bar{P}\bar{A}$
\begin{enumerate}
\item Set $\bar{A}_1 = \bar{A}, \bar{p} = [1,2,\ldots,n]$
\item For $i = 1,2,\ldots,n$ \begin{itemize}
\item compute: k = $\argmax_{1 \leq j \leq n} | a^{(i)}_{p_j,i} |$ \% find pivot
\item swap: $ p_i \gets\to p_k$
\item $\bar{l}_i := \bar{a}^{(i)}_{:,i} / a^{(i)}_{p_i,i}$
\item $\bar{u}_i := a^{(i)}_{p_i,:}$
\item compute: $\bar{A}_{i+1} = \bar{A}_i - \bar{l}_i \cdot \bar{u}_i$
\end{itemize}
\item set $\bar{P} := [\bar{e}_{p_1},\bar{e}_{p_2},\ldots, \bar{e}_{p_n}]^T$ \% $\bar{e}_i$ is i-th unit vector
\item set $\bar{L} = \bar{P}[\bar{l}_1,\bar{l}_2,\cdots,\bar{l}_n]$
\end{enumerate}
\paragraph{Example 2.10} \textit{(omitted)}
\section*{2.6: Cholesky decomposition}
If $\bar{A}$ is symmetric and positive definite, i.e. all eigenvalues of $\bar{A}$ are bigger than zero or equivalently $\bar{x}^T\bar{A}\bar{x} > 0$ for all $\bar{x} \neq 0$, we can compute a symmetric decomposition of $\bar{A}$.
\paragraph{Note:} if $\bar{A}$ is symmetric and positive definite, then the \textit{Schur complement} $\bar{S}:=\bar{A}_{2:n,2:n} - (\bar{a}_{2:n},1/a_{1,1})\bar{a}^T_{2:n,1}$, is symmetric and positive definite as well. In particular, it holds $s_{i,i} > 0$ and $a_{i,i} > 0$ !
\paragraph{Definition 2.11} A decomposition $\bar{A} = \bar{L}\bar{L}^T$ with a lower triangular matrix $\bar{L}$ with positive diagonal elements is called \textit{Cholesky decomposition of $\bar{A}$}.
\paragraph{Note:} A Cholesky decomposition exists, if $\bar{A}$ is symmetric and positive definite.
\paragraph{Algorithm 2.12} (outer product of Cholesky decomposition) \\
input: matrix $\bar{A}$ symmetric and positive definite \\
output: Cholesky decomposition $\bar{A} = \bar{L}\bar{L}^T =
[\bar{l}_1,\bar{l}_2,\ldots,\bar{l}_n] [\bar{l}_1,\bar{l}_2,\ldots,\bar{l}_n]^T$
\begin{enumerate}
\item set: $\bar{A}_1 := \bar{A}$
\item for $i = 1,2,\ldots,n$ \begin{itemize}
\item set: $\bar{l}_i := a^{(i)}_{:,i} / \sqrt{a^{(i)}_{i,i}}$
\item set: $\bar{A}_{i+1} := \bar{A}_{i} - \bar{l}_i\bar{l}^T_i$
\end{itemize}
\item set: $\bar{L} = [\bar{l}_1,\bar{l}_2,\ldots,\bar{l}_n]$
\end{enumerate}
The computational cost is $\frac{1}{6}n^{3}+O(n^{2})$ and thus only half the cost of LU decomposition.
\paragraph{Example 2.13} \textit{(omitted)}
\end{document}