\documentclass[12pt]{report}
%Structure
\setcounter{tocdepth}{5}
\setcounter{secnumdepth}{5}
%Structure
%Layout
\usepackage{geometry}
\geometry{
a4paper,
total={170mm,257mm},
left=20mm,
top=20mm,
}
\setlength{\footskip}{45pt}
%Layout
%Text
\renewcommand{\familydefault}{\sfdefault}
\usepackage{sfmath}
\usepackage{parskip}
\usepackage{physics}
\usepackage{graphicx}
%Text
%Extra
%Extra
%Technical Stuff
\usepackage{float}
\usepackage{amsmath}
\newcommand{\genstirlingI}[3]{%
\genfrac{[}{]}{0pt}{#1}{#2}{#3}%
}
\newcommand{\genstirlingII}[3]{%
\genfrac{{}{}}{0pt}{#1}{#2}{#3}%
}
\newcommand{\geneulerI}[3]{%
\genfrac{\langle}{\rangle}{0pt}{#1}{#2}{#3}%
}
\newcommand{\stirlingI}[2]{\genstirlingI{}{#1}{#2}}
\newcommand{\stirlingII}[2]{\genstirlingII{}{#1}{#2}}
\newcommand{\eulerI}[2]{\geneulerI{}{#1}{#2}}
\everymath{\displaystyle}
\DeclareMathOperator{\cosec}{cosec}
\DeclareMathOperator{\cosech}{cosech}
\DeclareMathOperator{\arccosec}{arccosec}
\DeclareMathOperator{\arcsinh}{arcsinh}
\DeclareMathOperator{\arccosh}{arccosh}
\DeclareMathOperator{\arctanh}{arctanh}
\DeclareMathOperator{\arcsech}{arcsech}
\DeclareMathOperator{\arccoth}{arccoth}
\DeclareMathOperator{\arccosech}{arccosech}
\DeclareMathOperator{\Ln}{Ln}
\DeclareMathOperator{\sinc}{sinc}
%Technical Stuff
\begin{document}
\begin{titlepage}
\begin{center}{\Large \bfseries A CALCULUS REVISION} \par \includegraphics[width=0.4\textwidth]{hqdefault.jpg}\par~\
“The calculus was the first achievement of modern mathematics and it is difficult to overestimate its importance. I think it defines more unequivocally than anything else the inception of modern mathematics; and the system of mathematical analysis, which is its logical development, still constitutes the greatest technical advance in exact thinking.”
\par {\bfseries-John Von Neumann} \par~\\
“I never failed in mathematics. Before I was fifteen, I had mastered differential and integral calculus.”\par
{\bfseries-Albert Einstein}\par\\
“But just as much as it is easy to find the differential of a given quantity, so it is difficult to find the integral of a given differential. Moreover, sometimes we cannot say with certainty whether the integral of a given quantity can be found or not.”\par
{\bfseries -Johann Bernoulli}\par\~\
“Who has not been amazed to learn that the function , like a phoenix rising from its own ashes, is its own derivative?”
\par {\bfseries -Francois Le Lionnais}\par \\
“The standard high school curriculum traditionally has been focused towards physics and engineering. So calculus, differential equations, and linear algebra have always been the most emphasized, and for good reason - these are very important.”\par {\bfseries -Terrence Tao}
\vfill
by Manoj Khatri
\end{center}
\end{titlepage}
\tableofcontents
\chapter{PreCalc Stuffs}
\section{Discrete Number Theory}
\begin{enumerate}
\item {\bfseries Natural Functions}
\item {\bfseries Complex Numbers}
\item {Counting Numbers}
\item {\bfseries Positive Integer Pairs}
\item {\bfseries Recurrence Break}
\item {\bfseries Finite Calculus}
\item {\bfseries Binomial Numbers}
\item {\bfseries Striling Numbers}
\item {\bfseries Harmonic Numbers}
\item {\bfseries Fibonacci Numbers}
\item {\bfseries Generating Functions}
\left[B_t(x)=\sum_{k\ge 0}\binom{tk+1}{k}\frac{1}{tk+1}{x^k}: B_t(x)^r=\sum_{k\ge 0}\binom{tk+r}{k}\frac{r}{tk+r}x^k\right]$$$$\left[\frac{B_t(x)^r}{1-t+tB_t(x)^{-1}}=\sum_{k\ge 0}\binom{tk+r}{k}x^k\right]
\item {\bfseries Probability Theory}
\item {\bfseries Asymptotes }
\left[\ln(1+O(f(n)))=O(f(n))[f(n)\prec 1],~e^{O(f(n))}=1+O(f(n))[f(n)=O(1)]\right]$$$$\left[(1+O(f(n)))^{O(g(n))}=1+O(f(n)g(n))[f(n)\prec
1,~f(n)g(n)=O(1)]\right]
\end{enumerate}
\section{Analytic Geometry}
\subsection{First Degree}
\begin{enumerate}
\item {\bfseries 2D Straight Lines:}
\item {\bfseries 3D Straight Planes:}
[The St. Line in 3D]:
\item {\bfseries Attached Strings:}
\item {\bfseries 2D Circles:}
[Tangent]:
\item {\bfseries 3D Spheres:}
[Tangent]:
\end{enumerate}
\subsection{Rotation of Axes:}
\begin{enumerate}
\item ~[Translation]:
\item ~[Rotations]:
\end{enumerate}
\subsection{ Second Degree:}
\begin{enumerate}
\item {\bfseries General Equation:}
\item {\bfseries Pair of St. Lines:}
\item {\bfseries Parabola:}
\item {\bfseries Ellipse:}
\item {\bfseries Hyperbola:}
\end{enumerate}
{\bfseries Standards:}
\begin{center}
\begin{tabular}{|c|c|c|c|}
\hline
Standards& Parabola & Ellipse & Hyperbola \[0.5cm]\hline&&&\[0.2cm]
Center & - & & \[0.5cm]
Vertices & & & \[0.5cm]
Eccentricity & & & \[0.5cm]
Foci & & & \[0.8cm]
Directrices & & & \[0.5cm]
Major Axis & -& & \[0.5cm]
Minor Axis & -& & \[0.5cm]
Lactus Rectum & & & \[0.5cm]
Tangents & & &
\[0.5cm]\hline
\end{tabular}
\end{center}
\section{Vector Algebra}
[non spatial geometric object with magnitude and direction]\par
\begin{enumerate}
\item {\bfseries Linearity:}\par [a set of vectors can linearly combine to generate a span of vectors, each linearly independent basis adding a new dimension, which generally live in coordinates for positioning the vectors through the center of the system such that the spans are the locus or the grids of their heads]\par
{[you need to make sense of this somewher: the k number of vectors in n dimesnsion are liearly depnedent if ]}\par
\par
this is a story: the pairs of numbers in coordinate system showed some exciting property, so they though they are some other entities called vector, in other words they represent the vector which are the platonic forms of themselves ie (1,2) is the 1 times some vector and 2 times another vector that depends on what you choose as the basis for that: now when you choose the basis they form a family of vectors called span … now usually for intially understanding the vectors are the arrows in space (so the basis vectors will be some arrows) then we have the base of some arrow of length one along x and y axis called i and j, now when we have other basis still we represent them in our ij system by representing those basis in the form of i and j for understanding purposes and shits\par
\item {\bfseries Matrix Multiplication:}\par
comment: parallel transformation keeps the grid line evenly spaced and parallel, now the vectors functioned around by some linear transformation which can be encapsulated inside a big matrix\par
things to observe from the representation of a vector: the number of components means its dimension ie the space it lives in
[so the matrices do is transform the vectors which we have two ways to think about:]\par
1) that our bases (which is usually i and j) remains same and the coordinates changes to get those coordinates just mmultiply the matrix with a vector\par
2) another is coordinates remains and our bases changes to get those bases in terms of our coordinates the colum vector will give those
\par
note the first one is more popular while second one play some role in change of basis stuff
after thinking matrix multiplicaiton for a single vector dont forget to think it for whole span
A BRAK HERE:\par by a speial class of linar transformation in dual space, by the virtue of which
out basis likes to be orthonormal (mention inner and outerproduct here). If they are ortonal then the matrices can be splitted like shit. And those matrice whose oclums ar themselbves orthogonal(not necesarilyt normal) then their invesse will be their trnspose?
to find whiches solve the characteristics eqution |A-LI| = 0 or
where S1 is the sum of main diagonal elements, S2 is the sum of minors of the main diagonal ements, S3 is the dterimant while for 2d
where s1 is sum of main diagonal elemtns and s2 is the determinant
[eigen values of upper and lower triangluar matrices are their diangonal elmeents]\par
properities:\par
1) every square matrix satisfies its characteristics equaiton (ie replace L by matrix) used to find powers of matrix or inverse\par
2)sum of diagonal eement = sum of eign values while the product is determinant\par
3)is invertible if all eigen values are non zero\par
>>a linear transformation can be made neat ie diagonal by representing it in eigen basis such that the resulting matrxi would act on the vectors of the eigen basis (but so many eigen vectors which one to use no matter whatever you use the resulting diagonalization will have the eigen valus as its diagonal element i guess but pretty sure), the above basis change idea applies
>>a example is if you want to calculate 100th power of the matrix [1 3 4 5](2 by 2) then: notice the flow
>>the matrix represent a linear transformation which we would convert in eigen basis do the power operation there then again convert that linear transformation in our basis\par
-the another way of tinking about eigen diagonaliztiaon is to this that every square matrix can be broken as A = transfromation * diagonal * transformation inverse\par
-ie then this can be put as the matrix A transfroms the column vectors of into the scaled (by the factor gioven in E) from column vectors of U, haha thats just the definition of eigen vectos)\par
-the transformation matrix will be orthogal for ysmmetric (or hermitina in complexr caes) matrices ie thos AT = A or ATA or AAT
\par while for again taking it further and endds the loop here. ie the unitary marix can also decompose like hermitian but not necssairly real eigen alues A = unitary transfromation * diagonal * unitary transformation inverse\par
\par now the more formal and rigorous stuffs come
\begin{enumerate}
\item ~[Cartesian Coordinates]:
\item ~[Cylindrical Coordinates]:
\item ~[Spherical Coordinates]:
\end{enumerate}
\item {\bfseries Linear Equations}
\item {\bfseries Scalar Multiplication:}
\item {\bfseries Vector Addition:}
\item {\bfseries Scalar Product:}
\item {\bfseries Vector Product:}
\item {\bfseries Scalar Triple Product:}
\item {\bfseries Vector Triple Product:}
\item {\bfseries Scalar Quadruple Product:}
\item {\bfseries Vector Quadruple Product:}
\item {\bfseries Reciprocal System:}
[such that],
[any vector can be expressed as]
\end{enumerate}
\chapter{Linear Algebra}
\begin{enumerate}
\item Vector Space\par
{[A vector space is a set where the addition of two vectors and multiplication by an element of a field K are defined, thereby a vector denoted by is expressed as a column of complex numbers]}
The set of linearly independent vectors is called a basis of and the
vectors are called basis vectors. The vector space spanned by a basis {}
is often denoted as Span({})
\item Linear Operators and Matrices\par
A map is a linear operator if linear for arbitrary vectors in . Since be an arbrirary vector in . Linearity implies that
can be expanded as
ignore the above jargon and just remember that the column vectors are where basis will lands
\item Dual Vector Space\par
{[A function satisfying the linearity condition is called a linear function which in component form is a row vector]}
whcih defines the inner product of a ket and bra
this is a comment: it represnets the amount of component of the vector x along the vector x
The vector space of linear functions on a vector space V (Cn in the present
case) is called the dual vector space, or simply the dual space, of V
and denoted by V*.
An important linear function is a bra vector obtained from a ket vector.
so the norm of a vector is defined by
\item Orthonormal Basis\par
{[A basis that satisfies]}
is the orthonormal basis which gives the completeness relation (also shows how operators can be written as) this shows that the orthonomal vectors satisfy the completeness relation
The projection operator
the sexiness of orthonormal basis is that a operator acting on those basis can be repsented easily in terms of the bra vectors (note that now this operator is not in any particular basis instead its a general operator and whatever basis we expand our bras and kets will change into that basis)\par
Given an orthonormal basis then by multiplying I:
what this essentially means is that matrix can be writte as the Ajk wala cofficients times the matrix jasko euta position ma matra 1 xa, pretty obvious tho.
\item Gram-Schmidt Orthonormalization\par
to construct k orthonomal vectors given k linearly idententepnt vectors
first step is: make the first vector normalized so the first one is e1 then for second one (u2) do subract the component of u2 along e1 in the form of e1 vector then finally normalize the resulting vector thats your e2
for the third one subtract the component of u3 along e1 and e2 in the form of their respective vector\par the matrix can be QR factorized like A = QU where Q is the orthogonal (hence can be made orthonormal) matrix and U is the upper triangular matrix \item Normal Matrices\par Given a linear operator its Hermitian conjugate is defined by A matrix is said to be Hermitian matrix if it satisfies \par in commenting word hermitian matrix does something something whose in matrix form is the hermitian conjugate\par Let be an orthonormal basis in on which a matrix satisfies then by operating on basis vectors we obtain another orthonormal basis vectors, also also unitary if \par also those dumb matrices whose inverse is their hermiitian \item Eigenvalue Problem usually normalized [solved by using characteristics equation] \item Special Matrices\par All the eigenvalues of a Hermitian matrix are real numbers. (not vice versa tho) Moreover, two eigenvectors corresponding to different eigenvalues are orthogonal. (so can be made orthonormal)\par further info that a single eigen value can have multiple eigen vectors so if having k’s then it will be called k degenerate so k independent vectors will be there, which can be made into ortonal by gggg wala formula finally trying to say that eigen vectors of a hermitian can be made into a completeness relationship\par NOT SURE THOUGH (NOW SURE) but i have guesses that the unitary matrices diagonalizes the hermitian matrices idk see how yet (beceuase as the eigen vectors are orthogonal (or nomal)))\par [the unitary matrices prsees the norm of the vectors, the eigen values may not be real but eigen vectors are orthogonal]\par IGNORE BELOW: In case is k fold degenerate then there are k independent eigenvectors corresponding to , so invoke to Gram Schmidt ortho normalization to to obtain an orthonormal basis in this k-dimensional space. Therefore the set can be made into a complete set \item Spectral Decomposition\par normal matrices ko lagi eigen vectors orthonormal bhayeko hunale there is direct consequences from the completeness theorem for normal matrices:\par is sexy as when you find the matrix elements in that coordiantes it will be diagonal so the matrxi being able to be represnted becomes neat: A be a normal matrix with normalized eigen vectors then A is decomposed as as a consequences of completeness theorem to find this decomposition\par The spectral decomposition claims that the operation of A in the one-dimensional subspace spanned by is equivalent with a multiplication by a scalar . \par again some words when the eigen values are degnerate is: if same then the projection operator is onto a (alpha) dimensional subspace now IGNORE THE BELOW JARGON:\par Consider the following expression: This is a projection operator onto the g-dimensional space corresponding to eigenvalue \par Therefore we conclude that is a projection operator onto the -dimensional subspace corresponding to the eigenvalue . Also that, (THis is just the consseuqnces of hanvin orthonngal(orhtongal) vetors as the eigenvectors) VERY VERY IMPORTNAT: The eigen decompoiitioatn of hermtian that gives us the spectral decopisiotn in the sense that you can write, A = (col from U)*(row from U& as a reuslt will give a matrix)*corresponding eigen value + … so on. Like this. \item Spectral decomposiiton nature:\par
the is just like the diagonalization idk what its implicaitn is \item Single Value Decomposition a matrix with complex entrie mn can be decomposed as (this notion generalizes the eigen decomposition for the non squrae matrices) whre U and V will be the unitary matrices and Sigma’s diagonal will contain no nonnegative elements (a very general stuff than the eigevn diagonamizatlino)\par INTERPRETATIOTN: the matrix A transfrom sthe column vector of V into the column vectors of U but scaling by a factor given by the diagonal of E.\par a SVD wont necesaalirly be unique because it has involved the finindg of eigen value in the beining which wuold make V not unique\par WHAT DOES THIS MATRICES MEAN? just like the above eigen decomposiotn explainaing, the vectors on the V are taken to U’s column vector with the coefficnt given by the diagonal of the E matrix\par EIGEN KO JASTO ARKO: NOw ht resulting SVD decomposiiton can be conveted into another decompositno like eigen decomosiotn for hermitian could be convertd into spectral decomop\par a revise here:\par -in the original eigen value (not hermtian so not symmetric) the side transformation matrifces may not be unitary (but if the orignial matrix was symmetric or hermiitain ie of form trans(A)A or Atrans(A) or AT = A then they will be orthogonal)\par -IMP: hence a non symmetric matrix also got a orthogonal SVD -how to do it?\par -first solve the eigen value prolem for AA which gives us V(the eigen orthonomral matrix) and E(put those eigen alues in square roots along the diagonal and remaining zero) -to get U do some orthogonal shits but i think inverse halera ni garn milxa\par \item Tensor Product\par Let A be an matrix and B be an marix then
a_{11}B & a_{12} \\ a_{21}B & .. \end{array}\right]$$ is a (mp)*(nq) matrix called tensor product\par -can be specilaly applied to vectors and vectors tensor product -some properties regarding it, not intuitive right now \end{enumerate} \chapter{Single Variable} \section{The Limit} A limit is the value that a function (or sequence) approaches as the input approaches some value. The limit implies that $f(x)$ can be made arbitrarily close to $L$ by taking $x$ to be sufficiently close to $a$ but not equal to it $$\lim_{x\to a}f(x)=L$$ \subsection{Calculation Methods} \begin{enumerate} \item {[Numerical Method]}\par [indication what the limit might be] \item {[One Sided Limits]}\par [the one sided limits approaches strictly from the right or left of it] $$\left[\lim_{x\to a^+}f(x)=\lim_{x\to a^-}f(x)=L\right]\rightarrow \left[\lim_{x\to a}f(x)=L\right]$$ \item {[Basic Operations]} $$\left[\lim_{x \to a} (f(x)*g(x)) = \lim_{x \to a} f(x)* \lim_{x \to a} g(x)\right]$$ \item {[Sandwich Theorem]} $$[\text{on } a\epsilon I_o,~ f(x)\le g(x)\le h(x), \text{ but n,n at }a]~\&~ \left[\lim_{x \to a} f(x) = \lim_{x \to a} h(x) = L\right]: \left[\lim_{x \to a} g(x) = L\right]$$ \item {[Squeeze Theorem]} $$[\text{on } a\epsilon I_o,~ |f(x)-L|\le g(x), \text{ but n,n at }a]~\&~ \left[\lim_{x \to a} g(x) = 0\right] :\left[\lim_{x \to a} g(x) = L\right]$$ \end{enumerate} \section{The Continuity} [The continuity implies that the limiting value of $f(x)$ equals the functional value of $f(x)$ at $a$] $$\left[\lim_{x\to a}f(x)=f(a)\right]$$ \par {\bfseries NOTE:} \par [continuity on open, closed and domain are defined accordingly] \par \subsection{Properties} \begin{enumerate} \item {[Algebra]} $$[\text{continuity of } f(x) \text{ at }a] ~\&~ [\text{continuity of } g(x) \text{ at } a] : [\text{ continuity of } f(x)*g(x) \text{ at } a]$$ \item {[Composition]} $$[\text{continuity of } g(x)\text{ at } a] ~\&~ [\text{continuity of } f(x)\text{ at } g(a)] : [\text{ continuity of } f(g(x)) \text{ at } a]$$ \item {[Pay-back]} $$\left[\lim_{x \to a} g(x) = L\right]~\&~ [\text{continuity of }f(x)\text{ at } L] :\left[\lim_{x \to a} f(g(x)) = f \left (\lim_{x \to a} g(x)\right) = f(L)\right]$$ \end{enumerate} \section{The Derivative} [The derivative implies the sensitivity of $f(x)$ with respect to $x$] $$\left[\left.\frac{d}{dx}f(x)\right|_{x=a} = \lim_{h \to 0} \frac{f(a+h)-f(a)}{h}=\lim_{x\to a}\frac{f(x)-f(a)}{x-a}\right]$$ \par {\bfseries NOTE:}\par [differentiability on open, closed and domain are defined accordingly]\par {\bfseries THEOREM:}\par [if a function is diffble at a point then it is continuous at the same point but vice-versa is not true] \subsection{Properties} \begin{enumerate} \item {[Algebra]} \par $$\left[~\left.\frac{d}{dx} (f(x)+g(x))\right|_{x=a}=\left. \frac{d}{dx} f(x)\right|_{x=a}+\left.\frac {d}{dx}g(x) \right|_{x=a}~\right]$$ $$\left[~\left.\frac{d}{dx}(f(x)\times g(x))\right|_{x=a}=g(a) \left.\frac{d}{dx}f(x)\right|_{x=a} +f(a) \left.\frac{d}{dx}g(x)\right|_{x=a}~\right]$$ \item {[Chain Rule]} \par $$\left[\left.\frac{d}{dx}(f(g(x)) \right|_{x=a}=\left.\frac{d}{dx}f(x)\right|_{x=g(a)} \left.\frac{d}{dx}g(x)\right|_{x=a}\right]$$ \item {[L'Hopital]} \begin{center} [on $a \epsilon I_o$,~$f(x)$ and $g(x)$ are dffble but n,n at $a$]~\&~ $\left[\lim_{x\to a}f(x)=\lim_{x\to a}g(x)=0\right]$ \end{center} $$:\left[\lim_{x \to a}\frac{f(x)}{g(x)} = \lim_{x \to a}\frac{f'(x)}{g'(x)}\right]$$ \par {[Other Forms]}\par [the other indeterminate forms are as following] \begin{enumerate} \item ~$\left[\text{Form: $\frac{\infty }{\infty}$}\right]$: $$\left[\lim _{x \to a}\frac {f(x)}{g(x)} = \lim _{x \to a}\frac {\frac {1}{f(x)}}{\frac {1}{g(x)}}\right]$$ \item ~$\left[\text{Form: $0\times \infty$}\right]$: $$\left[\lim_{x \to a}[f(x)\cdot g(x)]= \lim_{x \to a}\frac{f(x)}{\frac{1}{g(x)}}\right]$$ \item ~$\left[\text{Form: $\infty-\infty$}\right]$: $$\left[\lim_{x \to a}[f(x)-g(x)]= \lim_{x \to a}\frac {\frac {1}{g(x)}-\frac {1}{f(x)}}{\frac {1}{f(x)}\cdot \frac{1}{g(x)}}\right]$$ \item ~$\left[\text{Form: $0^0,{\infty}^0, 1^{\infty}$}\right]$: $$\left[\lim_{x \to a}{{f(x)}^{g(x)}}=\lim_{x \to a} e^{\left(g(x)\ln (f(x))\right)}= e^{\lim_{x\to a}g(x)\ln (f(x))}\right]$$ \end{enumerate} \end{enumerate} \subsection{Nice Functions} \begin{enumerate} \item {[Power functions]} \par $$\left[\frac{d}{dx}x^n=nx^{n-1}\right]$$ \item {[Exponential and Logarithms]} $$\left[\frac{d}{dx}a^x=a^x\ln a,~ \frac{d}{dx}\log_{a}x=\frac{1}{x} \log_{a}e, \text{ actually, } \frac{d}{dx}\log_{a} |x|=\frac{\log_{a}e}{|x|}\right]$$ \item {[Trigonometric and their inverses]}\par \begin{tabular}{ccc} $\left[\frac{d}{dx}\sin x=\cos x\right]$ & $\left[\frac{d}{dx}\cos x=-\sin x\right]$ & $\left[\frac{d}{dx}\tan x=\sec ^2 x\right]$ \\[0.5cm] $\left[\frac{d}{dx}\cot x=-\cosec^2 x\right]$ & $\left[\frac{d}{dx}\cosec x=-\cosec x\cot x\right]$ & $\left[\frac{d}{dx}\sec x=\sec x\tan x\right]$ \\[0.5cm] $\left[\frac{d}{dx}\arcsin x=\frac{1}{\sqrt{1-x^2}}\right]$ & $\left[\frac{d}{dx}\arccos x=-\frac{1}{\sqrt{1-x^2}}\right]$ & $\left[\frac{d}{dx}\arctan x=\frac{1}{1+x^2}\right]$ \\[0.5cm] $\left[\frac{d}{dx}\arccot x=-\frac{1}{1+x^2}\right]$ & $\left[\frac{d}{dx}\arccosec x=-\frac{1}{|x|\sqrt{x^2-1}}\right]$ & $\left[\frac{d}{dx}\arcsec x=\frac{1}{|x|\sqrt{x^2-1}}\right]$ \end{tabular} \item {[Hyperbolic and their inverses]} \par \begin{tabular}{ccc} $\left[\frac{d}{dx}\sinh x=\cosh x\right]$ & $\left[\frac{d}{dx}\cosh x=\sinh x\right]$ & $\left[\frac{d}{dx}\tanh x=\sech ^2 x\right]$ \\[0.5cm] $\left[\frac{d}{dx}\coth x=-\cosech ^2 x\right]$ & $\left[\frac{d}{dx}\cosech x=-\cosech x\coth x\right]$ & $\left[\frac{d}{dx}\sech x=-\sech x\tanh x\right]$ \\[0.5cm] $\left[\frac{d}{dx}\arcsinh x=\frac{1}{\sqrt{x^2+1}}\right]$ & $\left[\frac{d}{dx}\arccosh x=\frac{1}{\sqrt{x^2-1}}\right]$ & $\left[\frac{d}{dx}\arctanh x=\frac{1}{1-x^2}\right]$ \\[0.5cm] $\left[\frac{d}{dx}\arccoth x=\frac{1}{1-x^2}\right]$ & $\left[\frac{d}{dx}\arccosech x=\frac{1}{|x|\sqrt{1+x^2}}\right]$ & $\left[\frac{d}{dx}\arcsech x=\frac{1}{|x|\sqrt{1-x^2}}\right]$ \end{tabular} \item {[Absolute Valued]} $$\left[\frac{d}{dx}|x|=\begin{cases} 1 & \text{if } x > 0 \\ -1 & \text{if } x < 0 \end{cases};~~~~~~~\frac{d}{dx}|f(x)|=\begin{cases} f'(x) & \text{if } f(x) > 0 \\ -f'(x) & \text{if } f(x) < 0 \end{cases}\right]$$ \item {[Inverses]} \par [if a diffble function is invertible with non-zero derivatives then, it's inverse is a diffble function] $$\left[\frac{d}{dx}f^{-1}(x)=\frac{1}{f'(f^{-1}(x))}\right]$$ \end{enumerate} \subsection{Leibniz Theorem} [if $u(x)$ and $v(x)$ are $n$ times diffble at $a$ then $u(x)v(x)$ is also $n$ times diffble at $a$ given by] $$\left[(uv)^n|_{x=a}=(u^nv+{}^nC_1u^{n-1}v^1+{}^nC_2u^{n-2}v^2+\text{...}+{}^nC_{n-1}u^1v^{n-1}+uv^n)|_{x=a}\right]$$ \section{The Antiderivative} [The antiderivative is a diffble $F(x)$ whose derivative is the original $f(x)$] $$\left[\int f(x)dx=F(x)+C\right]$$ \subsection{Basic Rules of Integration} \begin{enumerate} \item {[Linearity]} $$\left[\int \left(af(x)+bg(x)\right)dx=a\int f(x)dx+b\int g(x)dx\right]$$ \item {[Substitution]} $$[\text{a continuous } f(x)] ~\&~ [\text{a continuously differentiable } g(x)]$$ $$:\left[\int f(g(x))g'(x)dx=\left.\int f(u)du\right|_{u=g(x)}\right]$$ $$[\text{a continuous } f(x)] ~\&~ [\text{a invertible continuously diffble }x=g(t) \text{ with non-zero derivatives}]$$ $$:\left[\int f(x)dx=\left.\int f(g(t))g'(t)dt\right|_{t=g^{-1}(x)}\right]$$ [the integrals are substituted to eradicate the radicals, large powers and complicated parts] \item {[Byparts]} $$[\text{continuously differentiable }u(x)] ~\&~ [\text{continuously differentiable }v(x)]$$ $$:\left[\int u(x)d(v(x))=u(x)v(x)-\int v(x)d(u(x))\right]$$ [the $u(x)$ and $v(x)$ are opted such that $u'(x)$ could be easily integrated, in order for $u(x)$:] \begin{center} [Inverses, Logarithms, Algebraic, Trigonometric/Exponential]\end{center} \item {[Tables]} \begin{enumerate} \item {[Power Function]}$$\left[\int x^ndx=\frac{x^{n+1}}{n+1};~\text{For } p =-1, \int \frac{1}{x}dx=\ln|x|\right]$$ \item {[Exponential and Logarithms]} $$\left[\int a^xdx=a^x\log_{a}e;~\int \log_{a}x=x\log_{a}\left(\frac{x}{e}\right)\right]$$ \item {[Trigonometric Functions]} $$\left[\int \sin xdx=-\cos x,~\int \cos xdx=\sin x,~\int \tan xdx=\ln|\sec x|\right]$$ $$\left[\int \cot xdx=\ln|\sin x| \right]$$ $$\left[\int \cosec xdx=\ln|\cosec x-\cot x|=\ln\left|\tan\left(\frac{x}{2}\right)\right|\right]$$ $$\left[\int \sec xdx=\ln|\sec x+\tan x|=\ln\left|\tan \left(\frac{x}{2}+\frac{\pi}{4}\right)\right|\right]$$ \item {[Hyperbolic Functions]} $$\left[\int \sinh xdx=\cosh x,~\int \cosh xdx=\sinh x,~\int \tanh xdx=\ln|\cosh x|\right]$$ $\left[\int \coth xdx=\ln|\sinh x|,~\int \cosech xdx=\ln\left|\tanh \left(\frac{x}{2}\right)\right|,~\int \sech xdx=\arctan(\sinh x)\right]$ \item {[Some Standard Functions]} \par $$\left[\int \frac{1}{x^2+a^2}dx=\frac{1}{a}\arctan\frac{x}{a},~\int \frac{\pm 1}{x^2-a^2}dx=\frac{1}{a}\arccoth\frac{x}{a},~\frac{1}{a}\arctanh\frac{x}{a}\right]$$ $$\left[\int \frac{1}{\sqrt{a^2-x^2}}dx=\arcsin\frac{x}{a},~\int \frac{1}{\sqrt{x^2\pm a^2}}dx=\arcsinh\frac{x}{a},~\arccosh\frac{x}{a}\right]$$ $$\left[\int \sqrt{a^2-x^2}dx=\frac{x}{2}\sqrt{a^2-x^2}+\frac{a^2}{2}\arcsin\frac{x}{a}\right]$$ $$\left[\int \sqrt{x^2\pm a^2}dx=\frac{x}{2}\sqrt{x^2\pm a^2}\pm\frac{a^2}{2}\left[\arcsinh\frac{x}{a},~\arccosh\frac{x}{a}\right]\right]$$ $$\left[\int e^{ax}\sin bxdx=\frac{1}{a^2+b^2}\left[\sin bx\frac{d}{dx}e^{ax}-e^{ax}\frac{d}{dx}\sin bx\right]\right]$$ $$\left[\int e^{ax}\cos bxdx=\frac{1}{a^2+b^2}\left[\cos bx\frac{d}{dx}e^{ax}-e^{ax}\frac{d}{dx}\cos bx\right]\right]$$ \item {[Inverse of Continuous Functions]}$$\left[\int f^{-1}(x)dx =xf^{-1}(x)-\left.\int f(u)du\right|_{u=f^{-1}(x)}\right]$$ \end{enumerate} \subsection{Other Techniques} \begin{enumerate} \item ~[Reduction Formulas] $$\left[I_n=\int f(x,n)dx:~I_m=\int f(x,m)dx,~[m<n]\right]$$ \item ~[Rational Functions] $$\left[Q(x)=(x-a)^m;~\frac{P(x)}{Q(x)}=\frac{A_1}{x-a}+\ldots +\frac{A_m}{(x-a)^m}\right]$$ \item ~[Trigonometric Functions] $$\left[\int \text{R}\left(\sin x,\cos x\right)dx:~u=\tan \frac{x}{2};~\text{R}(-\sin x, -\cos x)=\text{R} (\sin x,\cos x):~u=\tan x\right]$$ \item ~[Non Continuous] $$\text{[a continuous certainly n,n elementary,~ pathological except jumps may nevertheless]}$$ \end{enumerate} \end{enumerate} \section{The Integral} \begin{enumerate} \item ~[Informal Definition]\par [The integral can be interpreted as the signed area of the graph of $f(x)$ bounded on $I_c[a,b]$] $$\left[(A)\int_{a}^{b}f(x)dx\right]$$\par [condition for (A) integrability]: [$f(x)$ must be continuous on $I_c[a,b]$] \item ~[Formal Definition]\par [The projected sum of $f(x)$ on $I_c[a,b]$ as is divided into an arbitrary partition given by] $$\left[a=x_0<x_1<\ldots <x_n=b\right]$$ $$\left[S_n=\sum_{i=0}^{n-1}f(x^*_i)\Delta x_i\right]$$ [where], $$\left[x_i \le x^*_i\le x_{i+1},~ \Delta x_i=x_{i+1}-x_i ~[i=0,1,2,\ldots]\right]$$ \par [is riemann sum of $f(x)$ on $I_c[a,b]$ in which each term is consequently the signed area of the rectangles with height $f(x_i^*)$ and width $\Delta x_i$ under the graph of $f(x)$ on $I_c[a,b]$]\par [and the integral] $$\left[(R)\int_{a}^{b}f(x)dx=\lim_{max \Delta x_i \to 0} \sum_{i=0}^{n-1}f(x^*_i) \Delta x_i\right]$$ [of $f(x)$ on $I_c[a,b]$ is the limit of the sum $S_n$, provided that the number of sub-divisions tends to infinity and the largest of them tends to zero which implies the signed area of the limiting rectangles under the graph of $f(x)$ on $I_c[a,b]$]\par [condition for (R) integrability]: [$f(x)$ must be bounded with finite discontinuities on $I_c[a,b]$]\par [{\bfseries Fundamental Theorem Of Calculus}]\par [if an anti derivative'd function $f(x)$ is integrable on $I_c[a,b]$ then] $$\left[(x)\int_{a}^{b}f(x)dx=\left.\int f(x)dx\right|_{x=b}-\left.\int f(x)dx\right|_{x=a}\right]$$ \end{enumerate} \subsection{Extended Riemann Integrals} \begin{enumerate} \item {[Unbounded Functions]} $$\left[\int_{a}^{b}f(x)dx=\lim_{\epsilon \to 0^+}\int_{a}^{c-\epsilon}f(x)dx+\lim_{\epsilon ' \to 0^+}\int_{c+\epsilon '}^{b}f(x)dx\right]$$ $$\left[PV\int_{a}^{b} f(x)dx=\lim_{\epsilon \to 0^+}\left[\int_{a}^{c-\epsilon}f(x)dx+\int_{c+\epsilon}^{b}f(x)dx\right ]\right]$$ \item {[Infinite Intervals]} $$\left[\int_{a}^{\infty}f(x)dx=\lim_{b \to \infty}\int_{a}^{b}f(x)dx,~\int_{-\infty}^{b}f(x)dx=\lim_{a \to -\infty }\int_{a}^{b}f(x)dx\right]$$ $$\left[PV\int_{-\infty}^{\infty}f(x)dx=\lim_{m \to \infty}\int_{-m}^{m}f(x)dx\right]$$ \item {[Riemann Integrals]}\par [the improper integral of integrable $f(x)$ on $I_c[a,b]$ coincide with riemann integrals] \end{enumerate} \subsection{Properties of Riemann Integrals} \begin{enumerate} \item {[Linearity]} $$\left[\int_{a}^{b}(mf(x)+ng(x))dx=m\int_{a}^{b} f(x)dx+n\int_{a}^{b} g(x)dx\right]$$ \item {[Convention]} $$\left[\int_{a}^{b}f(x)dx=-\int_{b}^{a}f(x)dx\right]$$ \item {[Interval Addition]} $$\left[\int_{a}^{b}f(x)dx=\int_{a}^{c}f(x)dx+\int_{c}^{b}f(x)dx; ~~(a<c<b)\right]$$ \item {[Mean Value Theorem]}\par \begin{center}[a continuous $f(x)$ on $I_c[a,b]\epsilon I_o$ attains min and max of $m$ and $M$ respectively on $I_c[a,b]$]\end{center} $$:\left[m(b-a) \le \int_{a}^{b}f(x)dx \le M(b-a)\right]$$ \item {[Change of Variables]} \par \begin{center}[a continuously differentiable $g(x)$ on $I_c[a,b]\epsilon I_o$]] ~$\&$~ [a continuous $f(x)$ on $p[I_c[a,b]]\epsilon I_o$]\end{center} $$:\left[\int_{a}^{b}f(g(x))g'(x)dx=\int_{g(b)}^{g(a)}f(u)du\right]$$ \begin{center}[a continuous $f(x)$ on $I_c[a,b]\epsilon I_0$] $\&$ \par [an invertible continuously diffble $g(t)$ on $p[I_c[a,b]]\epsilon I_o$ with non-zero derivatives]\end{center} $$:\left[\int_{a}^{b}f(x)dx=\int_{g^{-1}(b)}^{g^{-1}(a)}f(g(t))g'(t)dt\right]$$ \item {[Mirror Properties]}\par \begin{enumerate} \item {[King Rule]} \begin{center}[a continuous $f(x)$ on $I_c[a,b]\epsilon I_o$]\end{center} $$: \left[\int_{a}^{b}f(x)dx=\int_{a}^{b}f(a+b-x)dx\right]$$ \item {[Queen Rule]} \begin{center}[a continuous $f(x)$ on $I_c[0,a]\epsilon I_o$]\end{center} $$: \left[\int_{0}^{a}f(x)dx=\begin{cases}2\int_{0}^{\frac{a}{2}}f(x)dx & \text{if } f(a-x)=f(x)\\0& \text{if } f(a-x)=-f(x)\end{cases}\right]$$ \item {[Jack Rule]} \begin{center}[a continuous $f(x)$ on $I_c[-a,a]\epsilon I_o$]\end{center} $$: \left[\int_{-a}^{a}f(x)dx=\int_{0}^{a}(f(x)+f(-x))dx=\begin{cases}2\int_{0}^{a}f(x)dx & \text{if } f(-x)=f(x)\\0& \text{if } f(-x)=-f(x)\end{cases}\right]$$ \end{enumerate} \item {[Additional Techniques]} \begin{enumerate} \item {[Beta and Gamma Functions]}\par [the functions, respectively are called beta and gamma functions] $$\left[\beta (m,n)=\int_{0}^{1}x^{m-1}(1-x)^{n-1}dx;~~m,n>0\right]$$ $$\left[\Gamma (n)=\int_{0}^{\infty}e^{-x}x^{n-1}dx;~~n>0\right]$$ \begin{enumerate} \item ~$\left[\beta (m,n)=\beta (n,m)\right],~~~~\left[\beta (m,n)=\frac{\Gamma (m)\Gamma (n)}{\Gamma (m+n)}\right]$ \item $\left[\Gamma (n+1)=n\Gamma (n) \text{ for n is positive integer}\right]$,~~~~$\left[\Gamma (1)=1,~~\Gamma (\frac{1}{2})=\sqrt\pi\right]$ \item ~[Euler's Reflection]: $\left[\Gamma (m)\Gamma (1-m)=\frac{\pi}{\sin m\pi},~~~0<m<1\right]$ \item ~[Gamma's Sinusoidal]: $\left[\int_0^{\frac{\pi}{2}}\sin ^px\cos ^qxdx=\frac{\Gamma \left(\frac{p+1}{2}\right)\Gamma \left(\frac{q+1}{2}\right)}{2\Gamma \left(\frac{p+q+2}{2}\right)}~~p,q>-1\right]$ \end{enumerate} \item {[Important Results from Reduction Formulas]} \begin{enumerate} \item ~$\left[I_n=\int_0^{\frac{\pi}{2}}\sin ^nxdx=\frac{n-1}{n}I_{n-2}\right]$ \item ~$\left[I_n=\int_0^{\frac{\pi}{2}}\cos ^nxdx=\frac{n-1}{n}I_{n-2}\right]$ \end{enumerate} \end{enumerate} \item{[Leibniz Rule]}\par \begin{center}[a continuously diffble $a(t)$ on $I_c[t_1,t_2]\epsilon I_o$ \& a continuously diffble $b(t)$ on $I_c[t_1,t_2]\epsilon I_o$] ~$\&$\par [a $f(x,t)$ and its pd $f_t(x,t)$ continuous on $D_c([a(t),b(t)]\times[t_1,t_2])\epsilon D_o$]\end{center} $\left[\text{for }t\epsilon I_c[t_1,t_2] \right]$ $$\left[\frac{d}{dt}\int_{a(t)}^{b(t)}f(x,t)dx=f(b(t),t)\frac{d}{dt}b(t)-f(a(t),t)\frac{d}{dt}a(t)+\int_{a(t)}^{b(t)}\frac{\partial}{\partial t}f(x,t)dx\right]$$ \end{enumerate} \chapter{Multi Variable} \section{The Derivative} Total derivative bhaneko partial derivatives lai row vector ko form ma arrange garni ho, aba teslai gradient ma convert garnu paryo bhane take transpose \begin{enumerate} \item {[Partial Derivatives]}\par [The partial derivative implies the sensitivity of $f(x,y)$ with respect to $x$ at $(a,b)$] $$\left[\left.\frac{\partial}{\partial x}f(x,y)\right|_{(x,y)=(a,b)} = \lim_{h \to 0} \frac{f(a+h,b)-f(a,b)}{h}\right]$$ \item {[Directional Derivatives]} \par [The directional derivative implies the sensitivity of $f(x,y)$ in the direction $\vec {u}$ at $(a,b)$] $$\left[\left.\frac{\partial}{\partial u}f(x,y)\right|_{(x,y)=(a,b)} = \lim_{h \to 0} \frac{f(a+hm,b+hn)-f(a,b)}{h}\right]$$ \item {[Differentiability]}\par [The differentiability implies that $f(x,y)$ is well approximated by a linear map at $(a,b)$] $$\left[\lim_{(h,k)\to (0,0)}\frac{f(a+h,b+h)-f(a,b)+\left.\frac{\partial}{\partial x}f(x,y)\right|_{(x,y)=(a,b)}h+\left.\frac{\partial}{\partial y}f(x,y)\right|_{(x,y)=(a,b)}k}{\sqrt{h^2+k^2}}=0\right]$$ or, $$\left[\lim_{(x,y)\to (a,b)}\frac{f(x,y)-f(a,b)+\left.\frac{\partial}{\partial x}f(x,y)\right|_{(x,y)=(a,b)}(x-a)+\left.\frac{\partial}{\partial y}f(x,y)\right|_{(x,y)=(a,b)}(y-b)}{\sqrt{(x-a)^2+(x-b)^2}}=0\right]$$ \end{enumerate} {\bfseries NOTE:}\par [differentiability on open, closed and domain are defined accordingly]\par {\bfseries THEOREM: }\par [if a function is diffble at a point then it's continuous at the same point but vice-versa is not true] \par [if a function is differentiable at a point the partial derivatives of the function exist at the point but the existence of the partial derivatives do not guarantee the differentiability of the function]\par [if the partial derivatives are continuous on an open point the function is differentiable at the point] $$[f_x(x,y)=[\text{regard y as a constant and dfft wrt x}]]$$ $$[f_u(x,y)=\cos\alpha f_x(x,y)+\sin\alpha f_y(x,y)]$$ \subsection{Properties} \begin{enumerate} \item {[Euler's Properties]} $$\left[f(kx,ky)=k^cf(x,y)\right]:\left[x\frac{\partial}{\partial x}f(x,y)+y\frac{\partial}{\partial y}f(x,y)=cf(x,y)\right]$$ \item {[Clairaut's Properties]}\par \begin{center}[the partial derivatives $f_x(x,y)$ and $f_y(x,y)$ are continuous on $(a,b)\epsilon D_o$]\end{center} $$:\left[\frac{\partial}{\partial x}\left(\frac{\partial}{\partial y}f(x,y)\middle)\right|_{(x,y)=(a,b)}=\frac{\partial}{\partial y}\left(\frac{\partial}{\partial x}f(x,y)\middle)\right|_{(x,y)=(a,b)}\right]$$ \item {[Chain Rules]}\par \begin{center}[$x=x(t)$ \& $y=y(t)$ are diffble at $t_0$] ~$\&$~[$f(x,y)$ is differentiable at $(x(t_0),y(t_0))$]\end{center} $$\left[\left.\frac{d}{dt}f(x(t),y(t))\right|_{t=t_0}\right]$$$$\left[=\left.\frac{\partial}{\partial x}f(x,y)\right|_{(x,y)=(x_0,y_0)}\left.\frac{d}{dt}x(t)\right|_{t=t_0}+\left.\frac{\partial}{\partial y}f(x,y)\right|_{(x,y)=(x_0,y_0)}\left.\frac{d}{dt}y(t)\right|_{t=t_0}\right]$$ [general]: \begin{center}[$x=x(u,v)$ and $y=y(u,v)$ are diffble at $(u_0,v_0)$] ~$\&$~\par[$f(x,y)$ is diffble at $(x(u_0,v_0),y(u_0,v_0))$]\end{center} $$\left[\left.\frac{\partial}{\partial u}f(x(u,v),y(u,v))\right|_{(u,v)=(u_0,v_0)}\right]$$ $$\left[=\left.\frac{\partial}{\partial x}f(x,y)\right|_{(x,y)=(x_0,y_0)}\left.\frac{\partial}{\partial u}x(u,v)\right|_{(u,v)=(u_0,v_0)}+\left.\frac{\partial}{\partial y}f(x,y)\right|_{(x,y)=(x_0,y_0)}\left.\frac{\partial}{\partial u}y(u,v)\right|_{(u,v)=(u_0,v_0)}\right]$$ \end{enumerate} \section{The Integral} \begin{enumerate} \item {[Informal Definition]}\par [The integral can be intped as signed volume of graph of $f(x,y)$ bounded on $D_c([a,b]\times [c,d])$] $$\left[\int_{[a,b]\times[c,d]}f(x,y)dxdy\right]$$ [condition for (V)integrability]: [$f(x,y)$ must be continuous on $D_c([a,b]\times [c,d])$] \item {[Formal Definition]}\par [The projected sum of $f(x,y)$ on $D_c([a,b]\times[c,d])$ as is divided into an arbitrary partition] $$\left[a=x_0<x_1<\ldots <x_n=b\right]$$$$\left[c=y_0<y_1<\ldots <y_n=d\right]$$ $$\left[S_{n,m}=\sum_{i=0}^{n-1}\sum_{j=0}^{m-1}f(x^*_i,y^*_j)~\Delta x_i\Delta y_j\right]$$ [where], $$\left[x_i \le x^*_i\le x_{i+1}, ~\Delta x_i=x_{i+1}-x_i ~[i=0,1,2,\ldots]\right]$$$$\left[y_j \le y^*_j\le y_{j+1},~ \Delta y_j=y_{j+1}-y_j ~[j=0,1,2,\ldots]\right]$$\par [is riemann sum of $f(x,y)$ on $D_c([a,b]\times[c,d])$ in which each term consequently the signed volume of a cuboid with height $f(x^*_i,y^*_j)$ and base $\Delta x_i \Delta y_i$ under the graph of $f(x,y)$ on $D_c([a,b]\times[c,d])$] \par [and the integral] $$\left[(R)\int_{[a,b]\times[c,d]}f(x,y)dxdy=\lim_{\genfrac{0}{0}{0pt}{1}{max \Delta x_i \to 0}{max \Delta x_j \to 0}}\sum_{i=0}^{n-1}\sum_{j=0}^{m-1}f(x^*_i,y^*_j)\Delta x_i\Delta y_j\right]$$ [of $f(x,y)$ on $D_c([a,b]\times[c,d])$ is the limit of the sum $S_{n,m}$, provided that the number of sub-divisions tends to infinity and the largest of them tends to zero which implies the signed volume of the limiting cuboids under the graph of $f(x,y)$ on $D_c([a,b]\times [c,d])$]\par [condition for (R) integrability]: [$f(x,y)$ bounded with finite discont on $D_c([a,b]\times [c,d])$]\par {\bfseries [Fubini's Integrals]}\par [if a function $f(x,y)$ is continuous on $D_c([a,b]\times[c,d])$ then] $$\left[\int_{[a,b]\times[c,d]}f(x,y)dxdy=\int_a^bdx\int_c^df(x,y)dy=\int_c^ddy\int_a^bf(x,y)dx\right]$$ \end{enumerate} \subsection{General Regions} [The integral of $f(x,y)$ implies the integral of zerod $F(x,y)$ on general $D_c(R)$] $$\left[\int_{R}f(x,y)dxdy=\int_{[a,b]\times[c,d]}F(x,y)dxdy\right]$$ [where], $$\left[F(x,y)=\begin{cases}f(x,y)& \text{ on } D_c(R)\\ 0 &\text{ on } [D_c([a,b]\times[c,d])- D_c(R)]\end{cases}\right]$$ [which if $f(x,y)$ is continuous on $D_c(R)$ as following]: \begin{enumerate} \item {[Type 1]} $$\left[D_c(R)=\{(x,y)|~a\le x\le b,~g_1(x)\le y\le g_2(x)\}\right]$$ $$[\text{where, $g_1(x)$ and $g_2(x)$ are continuous on $I_c[a,b]$ then}]$$ $$\left[\int_{R}f(x,y)dxdy=\int_{[a,b]\times[c,d]}F(x,y)dxdy=\int_a^bdx\int_c^dF(x,y)dy\right]$$ $$\left[=\int_a^bdx\int_{g_1(x)}^{g_2(x)}F(x,y)dy=\int_a^bdx\int_{g_1(x)}^{g_2(x)}f(x,y)dy\right]$$ \item {[Type 2]} $$\left[D_c(R)=\{(x,y)|~c\le y\le d,~h_1(y)\le x\le h_2(y)\}\right]$$ $$\text{[where, $h_1(y)$ and $h_2(y)$ are continuous on $I_c[c,d]$ then]}$$ $$\left[\int_{R}f(x,y)dxdy=\int_{[a,b]\times[c,d]}F(x,y)dxdy=\int_c^ddy\int_a^bF(x,y)dx\right]$$ $$\left[=\int_c^ddy\int_{h_1(y)}^{h_2(y)}F(x,y)dx=\int_c^ddy\int_{h_1(y)}^{h_2(y)}f(x,y)dx\right]$$ \end{enumerate} \subsection{Properties of Riemann integrals} \begin{enumerate} \item {[Mean Value Theorem]}\par [a cont $f(x,y)$ on $D_c(R)\epsilon D_o$ attains the mini and maxi value of m and M resp on $D_c(R)$] $$:\left[m\text{Ar}(D) \le \int_Df(x,y)dxdy \le M\text{Ar}(D)\right]$$ \item {[Change of Variables]} \par \begin{center}[a continuous $f(x,y)$ on $D_c(R)\epsilon D_o$] ~$\&$\par [an invtble continuously diffble $[x=x(u,v),y=y(u,v)]$ on $p[D_c(R)]\epsilon D_o$ with non zero Js] \end{center} $$:\left[\int_{R}f(x,y)dxdy=\int_{p[R]}f(x(u,v),y(u,v))\frac{\partial (x,y)}{\partial (u,v)}dudv\right]$$ [where], $$\left[\text{J w.r.t } (u,v)=\frac{\partial (x,y)}{\partial (u,v)}= \left| \begin{array}{ccc} \frac{\partial x}{\partial u} &\frac{\partial y}{\partial u}\\[0.3cm] \frac{\partial x}{\partial v} &\frac{\partial y}{\partial v} \end{array} \right|\right]$$ [passing from $(x,y)$ to $(r,\theta)$] $$\left[x=r\cos \theta, ~y=r\sin \theta;~[0\le r<\infty,~0\le \theta < 2\pi]\right]$$ [then], $$\left[\int_Rf(x,y)dxdy=\int_{p[R]}f(r\cos \theta,r\sin \theta)rdrd \theta\right]$$ [three dimensions]:\par [passing from $(x,y,z)$ to $(r,\theta,\phi)$] $$[x=r\sin \theta\cos \phi, ~~y=r\sin \theta \sin \phi,~~ z=r\cos \theta]$$$$[0\le r<\infty,~0\le \theta \le \pi,~0\le \phi < 2\pi]$$ $$\left[\int_Ef(x,y)dxdydz=\int_{p[E]}f(r\sin \theta\cos \phi,r\sin \theta \sin \phi,r\cos \theta)r^2\sin\theta drd\theta d\phi\right]$$ [passing from $(x,y,z)$ to $(s,\phi, z)$] $$[x=s\cos \phi, ~~y=r\sin \phi, ~~z=z]$$$$[0\le r<\infty,~0\le \phi < 2\pi,-\infty< z<\infty]$$ $$\left[\int_Ef(x,y)dxdydz=\int_{[p[E]]}f(r\sin \theta\cos \phi,r\sin \theta \sin \phi,r\cos \theta)sdsd\phi dz\right]$$ [whence, pullbacks and the associated limits are determined by geometry and the earlier tips] \item {[Mirror Properties]} $$[\text{unsymmetric }D_c(R)\text{ wrt x or y}]~\&~[\text{symmetric continuous } f(x,y) \text{ on }D_c(R)]$$ $$:\left[\int_Rf(x,y)=0\right]$$ $$[\text{symmetric }D_c(R)\text{ wrt x or y}]~\&~[\text{symmetric continuous } f(x,y) \text{ on }D_c(R)]$$ $$:\left[\int_Rf(x,y)=\int_{\frac{R}{2}}f(x,y)\right]$$ \item {[Special Techniques]}\par \begin{enumerate} \item {[Dirichlet's Integral]} $$[D_c(V)={(x,y)|~ x\ge 0, y\ge 0, z\ge 0, x+y+z\le 1}]$$ $$:\left[\int_V x^{l-1}y^{m-1}z^{n-1}dxdydz=\frac{\Gamma(l)\Gamma(m)\Gamma(n)}{\Gamma(l+m+n+1)}\right]$$ \item {[Liouville's Extension]} $$[D_c(V)={(x,y)|~ x\ge 0, y\ge 0, z\ge 0, h_1\le x+y+z\le h_2}]$$ $$:\left[\int_V x^{l-1}y^{m-1}z^{n-1}f(x+y+z)dxdydz=\frac{\Gamma(l)\Gamma(m)\Gamma(n)}{\Gamma(l+m+n)}\int^{h_2}_{h_1}f(h)h^{l+m+n-1}dh\right]$$ \end{enumerate} \end{enumerate} \chapter{The Applications} \section{Mean Value Theorems} \subsection{Rolle's Theorem} [if a function $f(x)$ is] \begin{enumerate} \centering \item ~[continuous on $[a,b]$] \item ~[differentiable on $(a,b)$] \item ~[$f(a)=f(b)$] \end{enumerate} [then there exist at least a $c$ on $(a,b)$ such that] $$[f'(c)=0]$$ \subsection{Lagrange's Theorem} [if a function $f(x)$ is] \begin{enumerate} \centering \item ~[continuous on $[a,b]$] \item ~[differentiable on $(a,b)$] \end{enumerate} [then there exist at least a $c$ on $(a,b)$ such that] $$\left[f'(c)=\frac{f(b)-f(a)}{b-a}\right]$$ \subsection{Taylor's Theorem} [if $f(x)$ is $n$ times differentiable on $I_c[a,b]\epsilon I_o$ then there exists a $c$ such that] $$\left[f(b)=f(a)+\frac{(b-a)}{1!}f'(a)+\frac{(b-a)^2}{2!}f''(a)+\ldots+\frac{(b-a)^{n-1}}{(n-1)!}f^{n-1}(a)+R_n(c)\right]$$ [where, $R_n(c)$ is the remainder term given by] \begin{enumerate} \item {\bfseries Lagrange's:} $$\left[R_n(c)=\frac{(b-a)^n}{n!}f^n(c)\right]$$ \item {\bfseries Cauchy's:} $$\left[R_n(c)=\frac{(b-a)}{(n-1)!}(b-c)^{n-1}f^n(c)\right]$$ \end{enumerate} [if put] $$[b=a+h,~c=a+\theta h],~[0<\theta <1]$$ [then], $$\left[f(a+h)=f(a)+\frac{h}{1!}f'(a)+\frac{h^2}{2!}f''(a)+\ldots+\frac{h^{n-1}}{(n-1)!}f^{n-1}(a)+R_n(a+\theta h)\right]$$ [{\bfseries Maclaurin Series}]\par[if $a=0$ and $h=x$ then], $$\left[f(x)=f(0)+\frac{x}{1!}f'(0)+\frac{x^2}{2!}f''(0)+\ldots+\frac{x^{n-1}}{(n-1)!}f^{n-1}(0)+R_n(\theta x)\right]$$ [which is of the finite form, provided that the remainder term] $$\left[R_n(\theta x)=\frac{x^n}{n!}f(\theta x) \text{ or } \frac{x^n}{(n-1)!}(1-\theta)^{n-1}f(\theta x)\right]$$ [tends to zero as n tends to infinity then, the series extended to infinity is valid given by] $$\left[f(x)=f(0)+\frac{x}{1!}f'(0)+\frac{x^2}{2!}f''(0)+\ldots+\frac{x^{n-1}}{(n-1)!}f^{n-1}(0)+\ldots\right]$$ [Some Maclaurin Series] $$\left[e^x=1+x+\frac{x^2}{2!}+\frac{x^3}{3!}+\ldots\right]$$ $$\left[\sin x=x-\frac{x^3}{3!}+\frac{x^5}{5!}-\frac{x^7}{7!}+\ldots\right]$$ $$\left[\cos x=1-\frac{x^2}{2!}+\frac{x^4}{4!}-\frac{x^6}{6!}+\ldots\right]$$ $$\left[\frac{1}{1-x}=1+x^2+x^3+x^4+\ldots~~[-1<x<1]\right]$$ $$\left[\ln (1+x)=x-\frac{x^2}{2}+\frac{x^3}{3}-\frac{x^5}{5}+\ldots~~[-1<x<1]\right]$$ \section{The Geometry} \subsection{Mensuration} \begin{enumerate} \item {[Arc Length]} $$\left[(ds)^2=(dx)^2+(dy)^2,~(ds)^2=(dr)^2+r^2(d\theta)^2\right]$$ \item {[Quadrature]} $$\left[A=\int_a^by(x)dx,~A=\int_\alpha^\beta \frac{1}{2}r(\theta)^2d\theta\right]$$ \item {[Volume Of Revolution]} $$\left[V=\int_a^b\pi y(x)^2dx,~V=\int_\alpha^\beta \frac{2}{3}\pi r(\theta)^3 \sin \theta d\theta\right]$$ \item {[Surface Of Revolution]} $$\left[S=\int_a^b2\pi y(x)\sqrt{1+y'(x)^2}dx,~S=\int_\alpha^\beta 2\pi r(\theta)\sin \theta \sqrt{r(\theta)^2+r'(\theta)^2} d\theta\right]$$ \end{enumerate} \subsection{Asymptotes} \begin{enumerate} \item {[Explicit Cartesian]} $$\left[x=a,~y=b,~y=mx+c\right]$$ $$\left[\lim_{x \to a^{\pm}}f(x)=\pm \infty,~\lim_{x \to \pm\infty}f(x)=b,~\lim_{x \to \pm\infty}[f(x)-(mx+c)]=0\right]$$ {[Algebraic Curves]} $$[HA: x^n=0,~VA:y^n=0,~m:\phi_n(m)=0]$$ $$\left[c\phi'_n(m)+\phi_{n-1}(m)=0\right]$$ $$\left[\frac{c^2}{2!}\phi_n''(m)+\frac{c}{1!}\phi_{n-1}'(m)+\phi_{n-2}(m)=0\right]$$ \item {[Explicit Polar]} $$\left[r\sin(\theta-\alpha)=p:~F(\alpha)=0\text{ and }p=\frac{1}{F'(\alpha)} \text{ where, } F(\theta)=\frac{1}{f(\theta)}\right]$$ \end{enumerate} \subsection{Tangents} \begin{enumerate} \item ~[Explicit] $$\left[y=y_0+f'(x_0)(x-x_0);~z=z_0+f_x(x_0,y_0)(x-x_0)+f_y(x_0,y_0)(y-y_0)\right]$$ \item ~[Implicit] $$\left[f_x(x_0,y_0)(x-x_0)+f_y(x_0,y_0)(y-y_0)=0\right]$$ $$\left[f_x(x_0,y_0,z_0)(x-x_0)+f_y(x_0,y_0,z_0)(y-y_0)+f_z(x_0,y_0,z_0)(z-z_0)=0\right]$$ \item ~[Parametric] $$\left[x=x(t_0)+x'(t_0)t,~y=y(t_0)+y'(t_0)t\right]$$ $$\left[x=x_0+ux_{u0}+vx_{v0},~y=y_0+uy_{u0}+vy_{v0},~z=z_0+uz_{u0}+vz_{v0}\right]$$ \end{enumerate} [{Algebraic Curve}] $$[\text{given the point of tangency is origin, equate the terms of lowest degree to zero}]$$ \subsection{Curvatures} \begin{enumerate} \item ~[Cartesians] $$\left[\rho=\frac{(1+f'(x_0)^2)^\frac{3}{2}}{f''(x_0)}\right]$$ $$\left[\rho=\frac{(f_x(x_0,y_0)^2+f_y(x_0,y_0)^2)^\frac{3}{2}}{f_{xx}(x_0,y_0)f_y(x_0,y_0)^2-2f_{xy}(x_0,y_0)f_x(x_0,y_0)f_y(x_0,y_0)+f_{yy}(x_0,y_0)f_x(x_0,y_0)^2}\right]$$ $$\left[\rho=\frac{(x'(t_0)^2+y'(t_0)^2)^\frac{3}{2}}{x''(t_0)y'(t_0)-y''(t_0)x'(t_0)}\right]$$ [{Algebraic Curve}]\par [At Origin] $$\left[y=px+q\frac{x^2}{2!}+\ldots,~\rho=\frac{(1+p^2)^\frac{3}{2}}{q}\right]$$ [Tangent At Origin Newton's Method]: $$\left[[x-]:\rho=\lim_{x\to 0,~ y \to 0}\frac{x^2}{2y},~ [y-]:\rho=\lim_{x\to 0,~ y \to 0}\frac{y^2}{2x}\right]$$ \item {[Explicit Polar]} $$\left[\rho=\frac{(r(\theta_0)^2+r'(\theta_0)^2)^\frac{3}{2}}{r(\theta_0)^2+2r'(\theta_0)^2-r(\theta_0)r''(\theta_0)}\right]$$ \item {[Pedal]} $$\left[\rho=r\frac{dr}{dp}\right]$$ $$\left[y=y,~~p^2=\frac{(y-xy')^2}{1+y'^2};~~r^2=x^2+y^2,~r=r,~~p^2=\frac{r^4}{r^2+r'^2}\right]$$ \end{enumerate} \section{Vectors} \subsection{Single Variable} \begin{enumerate} \item {[The Function]} $$\left[r(t)=x(t)i+y(t)j+z(t)k\right]$$ \item {[The Geometry]}\par [Arc Length]: $$\left[s=\int_{t_1}^{t_2}|r'(t)|dt\right]$$ [Unit Tangent, Normal, Curvature]: $$\left[T(t_0)=\frac{r'(t_0)}{|\vec{r'}(t_0)|},~n(t_0)=\frac{T'(t_0)}{|T'(t_0)|},~\kappa(t_0)=\left|\frac{\vec{T'}(t_0)}{r'(t_0)}\right|=\frac{|r'(t_0)\times r''(t_0)|}{|r'(t_0)|^3}\right]$$ \item {[The Derivative]}:$$\left[r'(t)=x'(t)i+y'(t)j+z'(t)k\right]$$ {\bfseries NOTE:}\par [Dot Product]: $$\left[(r_1\cdot r_2)'=r_1'\cdot r_2+r_1\cdot r_2'\right]$$ [Cross Product]: $$\left[(r_1\times r_2)'=r_1'\times r_2+r_1\times r_2'\right]$$ [Triple Dot Product]: $$\left[[r_1~ r_2~ r_3]'=[r_1'~ r_2 ~r_3]+[r_1~r_2' ~r_3]+[r_1~ r_2~ r_3']\right]$$ [Triple Cross Product]: $$\left[[r_1\times (r_2\times r_3)]'=r_1'\times(r_2\times r_3)+r_1\times(r_2'\times r_3)+r_1\times(r_2\times r_3')\right]$$ \item ~[The Integral]:$$\left[\int_a^br(t)dt=\left(\int_a^bx(t)dt\right)i+\left(\int_a^by(t)dt\right)j+\left(\int_a^bz(t)dt\right)k\right]$$ \end{enumerate} \subsection{Two Variable} \begin{enumerate} \item {[The Function]} $$\left[r(u,v)=x(u,v)i+y(u,v)j+z(u,v)k\right]$$ \item {[The Geometry]}\par [Surface Area]: $$\left[S=\int_D|r_u(u,v)\times r_v(u,v)|dudv\right]$$ [Normal] $$\left[n(u_0,v_0)=\frac{r_u(u_0,v_0)\times r_v(u_0,v_0)}{|r_u(u_0,v_0)\times r_v(u_0,v_0)|}\right]$$ \item {[The Derivative]}$$\left[r_u(u,v)=x_u(u,v)i+y_u(u,v)j+z_u(u,v)k\right]$$ \item {[The Integral]}$$\left[\int_Dr(u,v)dudv=\left(\int_Dr(u,v)dudv\right)i+\left(\int_Dr(u,v)dudv\right)j+\left(\int_Dr(u,v)dudv\right)k\right]$$ \end{enumerate} \subsection{Field Theory} \begin{enumerate} \item ~[The Fields]: $$[\text{associates a scalar or a vector to each point in a space – possibly physical space}]$$ \item ~[The Gradient]: $$\left[\text{in which the field increases the most}\right]$$ $$\left[\nabla U=\frac{\partial U}{\partial x}i+\frac{\partial U}{\partial y}j+\frac{\partial U}{\partial z}k\right]$$ [is perpendicular to the level sets, measures change in other directions, say a unit vector $v$] $$\left[v\cdot\nabla U=D_vU(x,y,z)\right]$$ \item ~[The Curl]: $$[\text{infinitesimal rotation of the field}]$$ $$\left[\nabla \times F=\left(\frac{\partial F_z}{\partial y}-\frac{\partial F_y}{\partial z}\right)i+ \left(\frac{\partial F_x}{\partial z}-\frac{\partial F_z}{\partial x}\right)j+ \left(\frac{\partial F_y}{\partial x}-\frac{\partial F_x}{\partial y}\right)k\right]$$ \item ~[The Divergence]: $$[\text{quantity of field's source}]$$ $$\left[\nabla \cdot F=\frac{\partial F_x}{\partial x}+\frac{\partial F_y}{\partial y}+\frac{\partial F_z}{\partial z}\right]$$ \item ~[The Laplacian]: $$[\text{flux density of the gradient flow of the field}]$$ $$\left[\nabla\cdot(\nabla U)=\nabla^2 \cdot U=\frac{\partial^2 U}{\partial x}+\frac{\partial^2 U}{\partial y}+\frac{\partial^2 U}{\partial z}\right]$$ \end{enumerate} [{Reductions}] \begin{enumerate} \item ~[Gradients]: $$\left[\nabla (\phi U)=U\nabla \phi+\phi\nabla U\right]$$ $$\left[\nabla (A\cdot B)=(A\cdot \nabla)B+(B\cdot \nabla)A+A\times(\nabla\times B)+B\times(\nabla \times A)\right]$$ \item ~[Curls]: $$\left[\nabla\times(\phi A)=\phi(\nabla\times A)+\nabla\phi\times A\right]$$ $$\left[\nabla\times(A\times B)=A(\nabla\cdot B)-B(\nabla\cdot A)+(B\cdot \nabla)A-(A\cdot \nabla )B\right]$$ \item ~[Divergences]: $$\left[\nabla\cdot(\phi A)=\phi\nabla\cdot A+\nabla\phi\cdot A\right]$$ $$\left[\nabla\cdot(A\times B)=(\nabla\times A)\cdot B-(\nabla\times B)\cdot A\right]$$ \item ~[Double Derivatives]: $$\left[\nabla\times(\nabla A)=0\right]$$ $$\left[\nabla\cdot (\nabla\times A)=0\right]$$ $$\left[\nabla\times(\nabla\times A)=\nabla(\nabla\cdot A)-\nabla^2A\right]$$ \end{enumerate} [Position Vectors] $$\left[\nabla( r^n)=nr^{n-1}\hat{r},~\nabla\times(r^n\hat{r})=0,~\nabla\cdot(r^n\hat{r})=(n+2)r^{n-1},~\nabla^2(r^n)=n(n+1)r^{n-2}\right]$$ \section{Optimization} \begin{enumerate} \item {[Single Variable]} \par [a suff diffably continuous function has zero derivatives at $c$ upto and including the n$^\text{th}$ then] \begin{enumerate} \centering \item ~[n is even: IP] \item ~[n is odd: $f^{(n+1)}(c)>0$ [Min], $f^{(n+1)}(c)<0$ [Max]]\par [minima if dec to left and inc to right of c, maxima if inc to left and dec to right] \end{enumerate} \item {[Multi Variable]}\par [a twice continuously differentiable function at a stationary point corresponds to the following] \begin{enumerate} \centering \item ~[$D_2<0$: IP] \item ~[$D_1>0,~ D_2>0, D_3>0$: [Min],~[$D_1<0,~ D_2>0,~D_3<0$: [Max] \item ~[else: inconclusive] \end{enumerate} [where], $$\left[~D_1=f_{xx},~D_2=\left| \begin{array}{ccc} f_{xx} & f_{xy}\\[0.3cm] f_{yx} & f_{yy} \end{array} \right|,~D_3=\left| \begin{array}{cccc} f_{xx} & f_{xy} & f_{xz}\\[0.3cm] f_{yx} & f_{yy} & f_{yz}\\[0.3cm] f_{zx} & f_{zy} & f_{zz}\\[0.3cm] \end{array} \right|~\right]$$ [under the conditions] $$\left[\phi_1(x,y,z)=0,~\phi_2(x,y,z)=0:~ff'=f(x,y,z)+\lambda_1\phi_1(x,y,z)+\lambda_2\phi_2(x,y,z)\right]$$ \end{enumerate} \chapter{Integrals Gone Wild} \section{Line Integrals} [The first line integral] $$\left[\int_{C}f(x,y)ds=\lim_{n \to \infty}\sum_{i=1}^{n}f(x^*_i,y^*_i)\Delta s_i\right]$$ [is the limiting projected riemann sum of $f(x,y)$ on $C$ where]\par \begin{center}[$(x^*_i, y^*_i)$ is any point on the $i^{th}$ element and $\Delta s_i~[i=1,2,\ldots n]$ is arc length of the $i^{th}$ element]\end{center} [The second line integral] $$\left[\int_{C}\left[P(x,y)dx+Q(x,y)dy\right]=\lim_{n \to \infty}\sum_{i=1}^{n}f(x^*_i,y^*_i)\Delta x_i+\lim_{n \to \infty}\sum_{i=1}^{n}f(x^*_i,y^*_i)\Delta y_i\right]$$ [is the joint consideration of alteration to the first integral of $P \& Q$ on $C$ where]\begin{center} [$\Delta x_i=x_i-x_{i-1}$ such, $x_i$ is the x- of $i^{th}$ element] [$\Delta y_i=y_i-y_{i-1}$ such, $y_i$ is the y- of $i^{th}$ element]\par\end{center} $$\left[\int_{-C}f(x,y)ds=\int_{C}f(x,y)ds \text{, but }\int_{-C}f(x,y)[dx][dy]=-\int_{C}f(x,y)[dx][dy]\right]$$ [The following continuous smooth expansion] $$\left[\int_{C_1\cup C_2}f=\int_{C_1}f+\int_{ C_2}f;~\int_Cf[ds][dx][dy][dz]=\text{[S to E][f][DDA]}\right]$$ \begin{enumerate} \item $[y=y(x)]$\par \begin{center}\begin{tabular}{|c|c|c|}\hline [f]&[DDA]&[DDA]\\[0.1cm] $f(x,y(x))$&$\left[\sqrt{1+(y'(x))^2}\right]dx$&$dx$\\[0.1cm] &&$y'(x)dx$\\[0.1cm]\hline \end{tabular}\end{center} \item $[x=x(t), y=y(t)]$\par \begin{center} \begin{tabular}{|c|c|c|}\hline [f]&[DDA]&[DDA]\\[0.1cm] $f(x(t),y(t))$&$\left[\sqrt{(x'(t))^2+(y'(t))^2}\right]dt$&$x'(t)dt$\\[0.1cm] &&$y'(t)dt$\\[0.1cm]\hline \end{tabular} \end{center} \item {[Differential Theorem]}\par [if continuously diffble $P(x,y)$ $\&$ $Q(x,y)$ differentiates as $P_y(x,y)=Q_x(x,y)$ on $C\epsilon D_o$ then] $$\left[\int_C\left[P(x,y)dx+Q(x,y)dy\right]=\int_CdU(x,y)=U(\text{E})-U(\text{S})\right]$$ [where] $$\left[U(x,y)=\int P(x,y)dx,~U_y(x,y)=Q(x,y);~U(x,y)=\int Q(x,y)dx,~U_x(x,y)=P(x,y)\right]$$ \item {[Green's Theorem]}\par [if p,w,s, $C_+$ encloses $D_c(R)$, and $P(x,y)$ \& $Q(x,y)$, are continuously diffble on $D_c(R)\epsilon D_o$] $$\left[\int_{C_o}[P(x,y)dx+Q(x,y)dy]=\int_R\left(\frac{\partial}{\partial x}Q(x,y)-\frac{\partial}{\partial y}P(x,y)\right)dxdy\right]$$ \end{enumerate} \section{Surface Integrals} [The first surface integral] $$\left[\int_{S}f(x,y)dS=\lim_{n \to \infty}\sum_{i=1}^{n}f(x^*_i,y^*_i,z^*_i)\Delta S_i\right]$$ [is the limiting projected riemann sum of $f(x,y,z)$ on S where] \begin{center}[$(x^*_i, y^*_i,z^*_i)$ is any point on the $i^{th}$ element and $\Delta S_i~[i=1,2,\ldots n]$ is surface area of $i^{th}$ element]\end{center} [The second surface integral] $$\left[\int_{S}[Pdxdy+Qdydz+Rdzdx]=\lim_{n \to \infty}\sum_{i=1}^{n}f\Delta x_i \Delta y_i+\lim_{n \to \infty}\sum_{i=1}^{n}f\Delta y_i \Delta z_i+\lim_{n \to \infty}\sum_{i=1}^{n}f\Delta z_i \Delta x_i\right]$$ [is the joint consideration of alteration to the first integral of $P,Q\&R$ on S where]\par [$\Delta x_i=x_i-x_{i-1}$; $x_i$ is x- of $i^{th}$][$\Delta y_i=y_i-y_{i-1}$; $y_i$ is y- of $i^{th}$][$\Delta z_i=z_i-z_{i-1}$; $z_i$ is z- of $i^{th}$] $$\left[\int_{-S}f(x,y,z)dS=\int_{S}f(x,y,z)dS, \text{ but }~\int_{-S}f[dxdy][dydz][dzdx]=-\int_{S}f[dxdy][dydz][dzdx]\right]$$ [The following smooth expansion] $$\left[\int_{S_1\cup S_2}f=\int_{S_1}f+\int_{ S_2}f;~\int_Sf[dS][dxdy][dydz][dzdx]=\text{[on D][f]}\text{[DDS]}\right]$$ \begin{enumerate} \item $[z=z(x,y)]$\par \begin{center}\begin{tabular}{|c|c|c|}\hline [f]&[DDS]&[DDS]\\[0.1cm] $f(x,y,z(x,y))$&$\left[\sqrt{(z_x(x,y))^2+(z_y(x,y))^2+1}\right]dxdy$&$-z_x(x,y)dxdy$\\[0.1cm] &&$-z_y(x,y)dxdy$\\[0.1cm] &&$dxdy$\\[0.1cm]\hline \end{tabular}\end{center} \item $[x=x(u,v), y=y(u,v), z=z(u,v)]$\par [f]:$$[f(x(u,v),y(u,v),z(u,v))]$$ [DDS]: $$\left[\sqrt{\left(\frac{\partial (y,z)}{\partial (u,v)}\right)^2+\left(\frac{\partial (z,x)}{\partial (u,v)}\right)^2+\left(\frac{\partial (x,y)}{\partial (u,v)}\right)^2}dudv\right]$$ [DDS]: $$\left[\frac{\partial (y,z)}{\partial (u,v)}dudv,~ \frac{\partial (z,x)}{\partial (u,v)}dudv,~ \frac{\partial (x,y)}{\partial (u,v)}dudv\right]$$ \item {[Stokes Theorem]}\par [if a p,w,s $C_+$ encloses $D_c(S)$ and $P,~Q,~\&~R$ are continuously diffble on $D_c(S)\epsilon D_o$ then] $$\left[\int_{C_o}[P(x,y,z)dx+Q(x,y,z)dy+R(x,y,z)dz]\right]$$ $$\left[=\int_S\left[\left(\frac{\partial}{\partial x}Q-\frac{\partial}{\partial y}P\right)dxdy+\left(\frac{\partial}{\partial x}Q-\frac{\partial}{\partial y}P\right)dydz+\left(\frac{\partial}{\partial x}Q-\frac{\partial}{\partial y}P\right)dzdx\right]\right]$$ \item {[Gauss's Theorem]}\par [if a p,w,s $S_+$ encloses $D_c(E)$ and $P,~Q~\&~R$ are continuously diffble on $D_c(E)\epsilon D_o$ then] $$\left[\int_{S_o}\left[\left(\frac{\partial}{\partial x}Q-\frac{\partial}{\partial y}P\right)dxdy+\left(\frac{\partial}{\partial x}Q-\frac{\partial}{\partial y}P\right)dydz+\left(\frac{\partial}{\partial x}Q-\frac{\partial}{\partial y}P\right)dzdx\right]\right]$$ $$\left[=\int_E\left(\frac{\partial}{\partial x}P(x,y,z)+\frac{\partial}{\partial y}Q(x,y,z)+\frac{\partial}{\partial z}R(x,y,z)\right)dxdydz\right]$$ \end{enumerate} \section{Field Theory} \begin{enumerate} \item {[Circulation]:} $$\left[\int_CUds=\text{[integral form S to E] [U] }[|r'(t)|]\text{ [dt]}\right]$$ $$\left[\int_C[F_xdx+F_ydy+F_zdz]=\int_CF\cdot dl=\int_CF\cdot Tds\right]$$ \item {[Flux]:} $$\left[\int_SUdS=\text{[integral on D] [U] }[|r_u(u,v)\times r_v(u,v)|]\text{ [dudv]}\right]$$ $$\left[\int_S[F_xdydz+F_ydzdx+F_zdxdy]=\int_SF\cdot ndS=\int_SF.da\right]$$ \item {[Theorems]:} $$\left[\int_C\nabla U.dl=U(E)-U(S),~\int_S\nabla \times F\cdot da=\int_CF.dl,~\int_E\nabla \cdot Fdxdydz=\int_SF.da\right]$$ [Green's Formulas]: $$\left[\int_E\nabla \cdot (U_1\nabla U_2)dxdydz=\int_E [U_1\nabla\cdot(\nabla U_2)-\nabla U_1\cdot\nabla U_2]dxdydz=\int_S(U_1\nabla U_2).da\right]$$ \item {[Cylindrical Coordinates]} $$\left[dl=ds~s+sd\phi~\phi+ dz~z\right]$$ $$\left[da=sd\phi dz~s+dzds~\phi+sdsd\phi~z\right]$$ $$\left[\nabla U=\frac{\partial U}{\partial s}~s+\frac{1}{s}\frac{\partial U}{\partial \phi}~\phi+\frac{\partial U}{\partial z}~z\right]$$ $$\left[\nabla \times F=\left[\frac{1}{s}\frac{\partial F_z}{\partial \phi}-\frac{\partial F_\phi}{\partial z}\right]s +\left[\frac{\partial F_s}{\partial z}-\frac{\partial F_z}{\partial s}\right]\phi +\frac{1}{s}\left[\frac{\partial (sF_\phi)}{\partial s}-\frac{\partial F_s}{\partial \phi}\right]z\right]$$ $$\left[\nabla \cdot F=\frac{1}{s}\frac{\partial (sF_s)}{\partial s}+\frac{1}{s}\frac{\partial F_\phi}{\partial \phi}+\frac{\partial F_z}{\partial z}\right]$$ $$\left[\nabla^2 U=\frac{1}{s}\frac{\partial}{\partial s}\left[s\frac{\partial U}{\partial s}\right]+\frac{1}{s^2}\frac{\partial^2U}{\partial \phi^2}+\frac{\partial^2 U}{\partial z^2}\right]$$ \item {[Spherical Coordinates]} $$\left[dl=dr~r+rd\theta~\theta+r\sin \theta d\phi~\phi\right]$$ $$\left[da=r^2\sin \theta d\theta d\phi~r+r\sin\theta d\phi dr~\theta+rdrd\phi~\phi\right]$$ $$\left[\nabla U=\frac{\partial U}{\partial r}~r+\frac{1}{r}\frac{\partial U}{\partial \theta}~\theta+\frac{1}{r\sin\theta}\frac{\partial U}{\partial \phi}~\phi\right]$$ $$\left[\nabla \times F=\frac{1}{r\sin\theta}\left[\frac{\partial (\sin\theta F_\phi)}{\partial \theta}-\frac{\partial F_\theta}{\partial \phi}\right]r+\frac{1}{r}\left[\frac{1}{\sin \theta}\frac{\partial F_r}{\partial \phi}-\frac{\partial (rF_\phi)}{\partial r}\right]\theta+\frac{1}{r}\left[\frac{\partial (rF_\theta)}{\partial r}-\frac{\partial F_r}{\partial \theta}\right]\phi\right]$$ $$\left[\nabla \cdot F=\frac{1}{r^2}\frac{\partial (r^2F_r)}{\partial r}+\frac{1}{r\sin\theta}\frac{\partial (\sin\theta F_\theta)}{\partial \theta}+\frac{1}{r\sin\theta}\frac{\partial F_\phi}{\partial \phi}\right]$$ $$\left[\nabla^2 U=\frac{1}{r^2}\frac{\partial}{\partial r}\left[r^2\frac{\partial T}{\partial r}\right]+\frac{1}{r^2\sin\theta}\frac{\partial}{\partial \theta}\left[\sin\theta\frac{\partial U}{\partial \theta}\right]+\frac{1}{r^2\sin^2\theta}\frac{\partial^2U}{\partial \phi^2}\right]$$ \end{enumerate} \chapter{Series} $\left[\text{The series $\sum_{n=1}^{\infty}x_n$ is convergent if partial $s_n=x_1+x_2\ldots+x_n$ is finite as, $n\to\infty$, else divergent}\right]$ \section{Test For Positive Series} \begin{enumerate} \item {[Comparison Tests]:} $$\left[[\text{for }a_n \le b_n]:~ \text{if }\sum_c b_n \rightarrow \sum_c a_n, \text{ else if, } \sum_d a_n \rightarrow \sum_d b_n\right]$$ $$\left[\lim_{n \to \infty}a_n\sim\lim_{n \to \infty}b_n\rightarrow \sum_{c|d}a_n, \sum_{c|d}b_n\right]$$ {[Tolkit]:} $$\left[\ln(\ln x))<\ln x<n^{\frac{1}{k}}<n^k<k^n<n!<n^n<n^{n^n}\right]$$ $$\left[\sum_{c: |q|<1,~ d: |q|\ge 1}q^n,~\sum_{c:p>1,~d:p\le 1}\frac{1}{n^p}\right]$$ \item {[Ratio or Root Test]:}\par $$\left[\lim_{n \to \infty}\frac{a_{n+1}}{a_n}=q \text{, or } \lim_{n \to \infty}\sqrt[n]{a_n}=q \rightarrow \sum_{c:q<1,~d:q>1}a_n\right]$$ \item {[Higher Tests]:} \par $$\left[\lim_{n \to \infty}n\left[\frac{a_{n}}{a_{n+1}}-1\right]=q \text{, or } \lim_{n \to \infty}n\left[\ln\frac{a_{n}}{a_{n+1}}\right]=q \rightarrow \sum_{c:q>1,~d:q<1}a_n\right]$$ \item {[Integrals Test]:} $$\left[\text{for a monotone decreasing,~}[c|d]\int_1^{\infty}a(x)dx\rightarrow \sum_{c|d}a_n\right]$$ \end{enumerate} \section{Test For Alternating Series:} \begin{enumerate} \item {[Absolute Test]:} $$\left[\sum_{c|d}|a_n| \rightarrow \sum_{c|d}a_n\right]$$ $$\left[\lim_{n \to \infty}\left|\frac{a_{n+1}}{a_n}\right|>1 \text{ or }\lim_{n \to \infty}\sqrt[n]{|a_n|}>1 \rightarrow \sum_{d}a_n\right]$$ \item {[Leibniz Test]:} $$\left[a_1\ge a_2\ge a_3\ldots,~~\lim_{n \to \infty}a_n=0 \rightarrow \sum_{c}(-1)^na_n\right]$$ \end{enumerate} \chapter{Fourier's} \section{Fourier Series} [A periodic signal $x(t)$ of fundtl period $T$ (time period for temporal and wavelength for spatial) as] $$\left[x(t)=x(t+T)\right]$$ [can expectedly be represented by a sum of sines or cosines. i.e] $$\left[x(t)=\frac{c_0}{2}+\sum_{n=1}^{\infty}\left[a_n\sin\left(2 \pi n\frac{t}{T}\right)+b_n\cos\left(2 \pi n\frac{t}{T}\right)\right]\right]$$ [where] $$\left[c_0=\frac{1}{\frac{T}{2}}\int_{\text{OaP}}x(t)dt\right]$$ $$\left[a_n=\frac{1}{\frac{T}{2}}\int_{\text{OaP}}x(t)\sin\left(2 \pi n\frac{t}{T}\right)dt,~b_n=\frac{1}{\frac{T}{2}}\int_{\text{OaP}}x(t)\cos\left(2 \pi n\frac{t}{T}\right)dt\right]$$ [or more compactly] $$\left[x(t)=\sum_{n=-\infty}^{\infty}c_ne^{2\pi in\frac{t}{T}}\right]$$ [where] $$\left[c_n=\frac{1}{T}\int_{OaP}x(t)e^{-2\pi in\frac{t}{T}},~~\text{conjugate of}~c_n=c_{-n}\right]$$ [The signal equals its series except at isolations, to average values on either side of discontinuity if] $$\left[\int_{\text{OaP}}|x(t)|dt<\infty\right]$$ $$\text{[no more than finite extremes] [only finite discontinuities, no infinite discontinuities]}$$ [for the finite terms, the over shoot at discontinuities is about 9\% of the jump] \par [Further, if] $$\left[\int_{\text{OaP}}|x(t)|^2dt<\infty\right]$$ [then] $$\left[\int_{\text{OaP}}\left[x(t)-\sum_{n=-\infty}^{\infty}c_ne^{2\pi in\frac{t}{T}}\right]^2=0\right]$$ \subsection{Properties} [if the signals and their series corresponds as] $$~~~~~~~\left[x(t)\xrightarrow{FS}\sum x_n,~y(t)\xrightarrow{FS}\sum y_n\right]$$ [then] \begin{enumerate} \item ~[Linearity] $$\left[Ax(t)+By(t)\xrightarrow{FS}A\sum x_n+B\sum y_n\right]$$ \item ~[Time Shift, Scale, Reversal] $$\left[x(t-t_0)\xrightarrow{FS}e^{-2\pi in\frac{t_0}{T}}\sum x_n\right],~\left[x(a t)\xrightarrow{FS}\sum x_n(at)\right],~\left[x(-t)\xrightarrow{FS}\sum x_{-n}\right]$$ \item ~[Multiplication] $$\left[x(t)y(t)\xrightarrow{FS}\sum_{k=-\infty}^{\infty}x_k y_{n-k}\right]$$ \item ~[Parseval's] $$\left[\frac{1}{T}\int_{\text{OaP}}|x(t)|^2dt=\sum_{n=-\infty}^{\infty}|x_n|^2\right]$$ \end{enumerate} \section{Fourier Transform} [A finite non periodic signal $x(t)$ accompanied by a periodic signal $x_{T}(t)$ with fundmental freqy $T$] $$\left[x(t)=\lim_{T \to \infty}x_{T}(t)\right]$$ [such that the series of $x_{T}(t)$ is] $$\left[x_{T}(t)=\sum_{n=-\infty}^{\infty}\left[\frac{1}{T}\int_{-\frac{T}{2}}^{\frac{T}{2}}x_{T_0}(t)e^{-2\pi in\frac{t}{T}}dt\right]e^{2\pi in\frac{t}{T}}\right]$$ [then, taking limits and, replacing closely packed discrete points $\frac{n}{T}$ by a continuous variable, $s$ then] $$\left[x(t)=\lim_{T \to \infty}x_{T}(t)=\lim_{T \to \infty}\sum_{n=-\frac{T}{2}}^{\frac{T}{2}}\left[\frac{1}{T}\int_{-\infty}^{\infty}x(t)e^{-2\pi ist}dt\right]e^{2\pi ist}=\lim_{T \to \infty}\sum_{n=-\infty}^{\infty}\frac{1}{T}X(s)e^{2\pi ist}\right]$$ [since, the discrete points are spaced $\frac{1}{T}$ apart, which equals $\Delta s$ then] $$\left[x(t)=\lim_{\Delta s \to 0}\sum_{n=-\infty}^{\infty}X(s)e^{2\pi ist}\Delta s=\int_{-\infty}^{\infty}X(s)e^{2 \pi ist}ds\right]$$ [i.e] $$\left[x(t)=\int_{-\infty}^{\infty}\left[\int_{-\infty}^{\infty}x(t)e^{-2\pi ist}dt\right]e^{-2\pi ist}ds\right]$$ [Hence the transform pairs are given by] $$\left[Fx(t)=X(s)=\int_{-\infty}^{\infty}x(t)e^{-2\pi ist}dt\right]$$ $$\left[F^{-1}X(s)=x(t)=\int_{-\infty}^{\infty}X(s)e^{2 \pi ist}ds\right]$$ [The signal equals its transform except at isolation, to average values on either side of disctity if] \begin{enumerate} \item $$\left[\int_{-\infty}^{\infty}|x(t)|dt<\infty\right]$$ \item ~[no more than finite number of maxima and minima within a finite interval] \item ~[only finite number of discontinuities within a finite interval, no infinite discontinuities] \end{enumerate} [Further, if] $$\left[\int_{-\infty}^{\infty}|x(t)|^2dt<\infty\right]$$ [then] $$\left[\int_{-\infty}^{\infty}\left[x(t)-F^{-1}Fx(t)\right]^2=0\right]$$ [Some Fourier Transforms]: $$\left[F~[\Pi(t)]=\sinc(s),~F~[\Lambda(t)]=\sinc^2(s),~F~[E(t)]=\frac{1}{2\pi isa},~F~[G(t)]=G(t)\right]$$ \subsection{Properties} [if the signals and their transforms correspond as] $$~~~~~\left[x(t)\xrightarrow{FT}X(s),~y(t)\xrightarrow{FT}Y(s)\right]$$ [then] \begin{enumerate} \item ~[Linearity] $$\left[Ax(t)+By(t)\xrightarrow{FT}AX(s)+BY(s)\right]$$ \item ~[The Shift Theorem] $$\left[x(t-t_0)\xrightarrow{FT}e^{-2\pi is{t_0}}X(s),~e^{2\pi i{s_0}}x(t)\xrightarrow{FT}X(s-s_0)\right]$$ \item ~[Duality] $$\left[X(t)\xrightarrow{FT}x(-s)\right]$$ [if considered $f^-(t)=f(-t)$ then, '-' moves over heads for inverse, dual, conjugate as]$$\left[FFX=(FF^{-1}X)^{-}=x^{-}\right]$$ \item ~[Differentiation] $$\left[\frac{d}{dt}x(t)\xrightarrow{FT}2\pi isX(s),~-2\pi itx(t)\xrightarrow{FT}\frac{d}{ds}X(s)\right]$$ \item ~[The Scale Theorem] $$\left[x(a t)\xrightarrow{FT}\frac{1}{|a|}X\left(\frac{s}{a}\right),~\frac{1}{|a|}x\left(\frac{t}{a} \right)\xrightarrow{FT}X(as)\right]$$ \item ~[Parseval's] $$\left[\int_{-\infty}^{\infty}|x(t)|^2dt=\int_{-\infty}^{\infty}|X(s)|^2ds\right]$$ \item {[Multiplication (1)]} $$\left[x(t)y(t)\xrightarrow{FT}(X*Y)(s)\right]$$ \item ~[Convolution (1)] $$\left[(x*y)(t)\xrightarrow{FT}X(s)Y(s)\right]$$ [where] $$\left[(x*y)(t)=\int_{-\infty}^{\infty}x(t-m)y(m)dm\right]$$ [repeated self convulation tends to Gaussian, convulation is using one to smooth and average out the other as the convulation outcome is as smooth as individuals separately, also smears] $$\left[(x*y)'(t)=(x*y')(t)\right]$$ \end{enumerate} \section{Distribution Theory} [The formulation of a theory which encompass $\delta$'s is to single out a set S for which Fourier integral converges such that the transform is itself in S such carefully established class constitutes of rapidly decreasing functions] \begin{itemize} \centering \item ~[is infinitely differentiable],~$\left[\text{ $\left|x^n\frac{d^n}{dx^n}f(x)\right|\to 0$; as $t\to \pm \infty$}\right]$\par [or, at once can be written as] $$\left[\left|x^m\frac{d^n}{dx^n}f(x)\right|\le C_{mn} \text{ as } x\to \pm \infty\right]$$ \end{itemize} [Examples: infinitely differentiable functions identically zero outside a finite interval, $e^{-x^2} $etc] [The distributions $T$s can be viewed as outcome based linear functionals on $S$s] \begin{enumerate} \item ~[From Functions]:\par ~[if exists for every $S$s, then $f(x)$ is said to induce $T_f$ in $T$ by means of the formula] $$\left[\langle T_f,\phi\rangle=\int_{-\infty}^{\infty}f(x)\phi(x)dx\right]$$ \item ~[The Deltas]: \par~[operates on $S$s as] $$\left[\langle \delta, \phi\rangle=\phi(0)\right]$$ ~[further] $$\left[\langle \delta_a, \phi\rangle=\phi(a)\right]$$ \item ~[Limiting Distributions]:\par ~[if $T_n$ is seq in $T$ such that $\langle T_n, \phi \rangle$ converges for every $S$s then $T_n$ converges to a $T$ in $T$] $$\left[\langle T, \phi\rangle=\lim_{n \to \infty}\langle T_n, \phi\rangle\right]$$ \end{enumerate} \section{Operations on T} \begin{enumerate} \item {\bfseries Derivatives:}\par [A large range of distributions can be differentiated within the distributions defined as pairings] $$\left[\langle T',\phi\rangle=-\langle T,\phi' \rangle\right]$$ {[Heaviside] $$\left[\langle H', \phi\rangle=-\langle H, \phi' \rangle=-\int_{-\infty}^{\infty}H(x)\phi'(x)dx=-\int_{-\infty}^{\infty}\phi'(x)=\phi(0)=\langle \delta, \phi\rangle\right]$$ \item {\bfseries Reversed Distributions}\par [The change of sign, as the distributions do not live on the points, is the defined pairings as] $$\left[\langle T^{-},\phi\rangle=\langle T,\phi^{-}\rangle\right]$$ \item {\bfseries The Shift and The Scale}\par [The shifting and scaling of functions are based on point properties so, they are to be paired] $$\left[\langle \tau_\beta T,\phi\rangle=\langle T,\tau_{-\beta}\phi\rangle\right]$$ $$\left[\langle \sigma_\alpha T,\phi\rangle=\langle T,\frac{1}{|\alpha|}\sigma_{\frac{1}{a}}\phi\rangle\right]$$ [The Deltas]: $$\left[\langle\tau_b\delta,\phi\rangle=\langle\delta,\tau_{-b}\phi\rangle=\tau_{-b}\phi(0)=\phi(b)=\langle \delta_a,\phi\rangle\right]$$ $$\left[\langle\sigma_a\delta,\phi\rangle=\langle\delta,\frac{1}{|a|}\sigma_{\frac{1}{a}}\phi\rangle=\frac{1}{|a|}\sigma_{\frac{1}{a}}\phi(0)=\frac{1}{|a|}\phi(0)=\langle \frac{1}{|a|}\delta,\phi\rangle\right]$$ \item {\bfseries Convolution}\par [The convolution of distributions presents problems as the conditions under which convolution exists is tricky, however the convolution of distributions with test functions is simply pairings] $$\left[\langle \varphi*T,\phi \rangle=\langle T,\varphi^-*\phi\rangle\right]$$ $$\left[\langle T_1*T_2,\phi\rangle=\langle T_1(y),\langle T_2(x),\phi(x+y)\rangle\right]$$ {[Deltas]:} $$\left[\langle \delta_a*\delta_b, \phi\rangle=\langle \delta_a(y),\langle \delta_b(x),\phi(x+y) \rangle=\langle \delta_a(y),\phi(b+y)\rangle=\phi(a+b)=\langle \delta_{a+b},\phi\rangle\right]$$ \item{\bfseries Transforms of T}\par [The generalzn of transforms to distributions is such that its transform is itself a distribution] $$\left[\langle FT,\phi \rangle=\langle \phi,F\phi \rangle\right]$$ [and further, with the same corresponding properties the inverse] $$\left\langle F^{-1}T,\phi \rangle=\langle \phi,F^{-1}\phi \rangle\right]$$ [Finally, the fourier integrals] $$\left[\langle F^{-1}(FT),\phi \rangle=\langle (FT),F^{-1}\phi \rangle=\langle T,F(F^{-1})\phi \rangle=\langle T,\phi \rangle\right]$$ \begin{itemize} \item ~[The deltas]: $$\left[\langle F\delta,\phi \rangle=\langle \delta,F\phi \rangle=F\phi(0)=\int_{-\infty}^{\infty}\phi(x)dx=\langle 1,\phi \rangle\right]$$ [also] $$\left[\langle F\delta_a,\phi \rangle=\langle \delta_a,F\phi \rangle=F\phi(a)=\int_{-\infty}^{\infty}e^{-2\pi iax}\phi(x)dx=\langle e^{-2\pi iax},\phi \rangle\right]$$ \item ~[One]: $$\left[\langle F1,\phi\rangle=\int_{-\infty}^{\infty}\phi(x)dx=\phi(0)=\langle \delta,\phi\rangle\right]$$ \item ~[Exponential]: $$\left[\langle Fe^{2\pi iax},\phi\rangle=\langle e^{2\pi iax},F\phi\rangle=\int_{\infty}^{\infty}e^{2\pi iax}F\phi(x)dx=F^{-1}F\phi(a)=\phi(a)=\langle \delta_a,\phi\rangle\right]$$ \item ~[Sines and Cosines]: $$\left[\langle F\cos2\pi ax,\phi \rangle=\langle F\left[\frac{1}{2}(Fe^{2\pi iax}+e^{-2\pi iax})\right],\phi\rangle=\langle\frac{1}{2}(\delta_a+\delta_{-a}),\phi\rangle\right]$$ $$\left[\langle F\sin2\pi ax,\phi \rangle=\langle F\left[\frac{1}{2i}(Fe^{2\pi iax}-e^{-2\pi iax})\right],\phi\rangle=\langle\frac{i}{2}(\delta_a-\delta_{-a}),\phi\rangle\right]$$ \end{itemize} \end{enumerate} \section{Signals} Discrete signals are represented mathematically as a sequence of numbers, in a practical setting, such sequences can arise from periodic sampling of an analog (i.e. continuous-time) signals $x_a(t)$: $$x[n]=x_a(nT)$$ where, the quantity T is the sampling period, and its reciprocal is sampling frequency. The sampling process is convenient to represent mathematically in two stages, consist of an impulse train modulator, followed by conversion of the impulse train to a sequence: $$x_s(t)=x_a(t)s(t) = \sum_{n=-\infty}^{\infty} x_a(t)\delta(t-nT)$$ Thus the frequency-domain relation between input and output of an ideal C/D converter is: $$X_s(j\Omega) = \frac{1}{2\pi} X_a(j\Omega)*S(j\Omega)= \frac{1}{T}\sum_{k=-\infty}^\infty X_a(j(\Omega-k\Omega_s))$$ as FT of the periodic impulse train $s(t)$ is also the periodic impulse train of frequency $\Omega_s=2\pi/T$: $$S(j\Omega) = \Omega_s \sum_{k=-\infty}^\infty \delta(\Omega-k\Omega_s)$$ which states that the Fourier transform of $x_s(t)$ consists of periodically repeated copies of $X_a(j\Omega)$ shifted by integer multiples of the sampling frequency, and then superimposed to produce periodic Fourier transform of the impulse train of samples. {\bfseries Nyquist-Shannon Sampling Theorem:} Let $x_a(t)$ be a and limited signal with $$X_a(j\Omega) = 0 \text{ for }|\Omega| \ge \Omega_N$$ then $x_a(t)$ is uniquely determined by its samples $x[n]=x_a(nT)$ if $$\Omega_s \ge 2\Omega_N$$ \subsection{Sinusoids} The complex exponential sequence: $$x[n]=A\alpha^n=|A||\alpha|^n\cos(\omega_0 n+\phi)+j|A||\alpha|^n\sin(\omega_0 n+\phi)$$ oscillates with an exp growing envelope if $|\alpha|>1$ or with an exp decaying envelope if $|\alpha|<1$. \begin{enumerate} \item {\bfseries Frequency:} The discrete complex sequences with frequencies $(\omega_0+2\pi r)$ where r is an integer are indistinguishable from one another, thus the interpretation of low and high frequencies is different, as $\omega_0$ increases from 0 to $\pi$, $x[n]$ oscillates progressively more rapidly, however as $\omega_0$ increases from $\pi$ to $2\pi$, the oscillations become slower; as a consequence, for values of $\omega_0$ in the vicinity of $2\pi k$ for any integer value of k are typically referred to as low frequencies, whereas in the vicinity of $(\pi + 2\pi k)$ for any integer value of k are typically referred to as high frequencies. \item {\bfseries Periodicity:} The periodicity with integer period N requires that $e^{j\omega_0(n+N)}=e^{j\omega_0n}$ which is true only for $\omega_0 N =2\pi k$ where k is an integer, consequently, discrete complex sequences are not necessarily periodic in $n$ with period $2\pi/\omega_0$ and depending on the value of $\omega_0$ may not be periodic at all; there are N distinguishable frequencies namely $\omega_k=2\pi k/N, k =0,1,\ldots,N-1$ for which the corresponding sequences are periodic with period N. \end{enumerate} Many sequences can be represented by a Fourier integral of the form: $$x[n]=\frac{1}{2\pi}\int_{-\pi}^{\pi}X(e^{j\omega})e^{j\omega n} d\omega$$ where, $$X(e^{j\omega}) = \sum_{n=-\infty}^\infty x[n]e^{-j\omega n}$$ together form a Fourier representation for the sequence, which is a superposition of small complex sequences of the form $\frac{1}{2\pi}X(e^{j\omega})e^{j\omega n}d\omega$ with $\omega$ ranging over an interval of length $2\pi$ and with $X(e^{j\omega})$ determining the relative amount of each complex sinusoidal component. It follows that the Fourier transform of the discrete-time sequence is related to the its continuous counterpart from where it is sampled at rate $T$ by $$X(e^{j\omega})=\frac{1}{T}\sum_{k=-\infty}^\infty X_a\left(j\frac{\omega}{T}-j\frac{2\pi}{T}k\right)$$ If the continuous-time transform is bandlimited, so that $X_a(j\Omega)=0, |\Omega|\ge \frac{2\pi}{T}$ then, $$X(e^{j\omega})=\frac{1}{T}X_a\left(j \frac{\omega}{T}\right)$$ i.e. the discrete-time and continuous-time Fourier transforms are related by a linear scaling of the frequency axis. \subsection{ Discrete Systems } A discrete-time system is defined mathematically as a transformation or operator that maps an input sequence with values x[n] into an output sequence with values y[n]: $$y[n]=T\{x[n]\}$$ where the value of the output sequence at each value of the index n may depend on input samples x[n] for all values of n. \begin{table}[] \centering \begin{tabular}{|c|c|c|c|c|} \hline Systems & Linear & Timein & Stable & Causal \\\hline&&&&\\ $y[n]=x[n-n_d]$ & YES & YES & YES & YES \\[0.5cm] $y[n]=\sum_{k=-\infty}^n x[k]$ & YES &YES & NO & YES \\[0.5cm] $y[n]=x[n+1]-x[n]$ & YES & YES& YES & NO \\[0.5cm] $y[n]=\frac{1}{M_1+M_2+1}\sum_{k=-M_1}^{k=M_2}x[n-k]$ & YES & YES & YES & YES (for $M_1=0$) \\[0.5cm] \hline \end{tabular} \label{tab:my_label} \end{table} \begin{enumerate} \item {\bfseries General response of LTI system in terms of impulse response:} Since any sequence can be represented as $x[n]=\sum_{k=-\infty}^{\infty}x[k]\delta[n-k]$, if the linearity property is combined with the representations of a general sequence as a linear combination of delayed impulses, it follows that a linear system can be completely characterized by its impulse response, let $h_k[n]$ be the response of the system to the input $\delta[n-k]$, an impulse occuring at $n=k$: $$y[n]=T\left[\sum_{k=-\infty}^{\infty}x[k]\delta[n-k]\right] = \sum_{k=-\infty}^{\infty}x[k]h_k[n]$$ If only the linearity is imposed then $h_k[n]$ will depend on both n and k, the property of time in-variance implies that if $h[n]$ is the response to $\delta[n]$, then the response to $\delta[h-k]$ is $h[n-k]$: $$y[n]=\sum_{k=-\infty}^\infty x[k]h[n-k]=x[n]*h[n]$$ For right ended sequences, $n >= M_1$ and $n >= M_2$ of sizes $N_1$ and $N_2$, the convolution extends from $n=M_1+M_2$ to $N_1+N_2-1$. \item {\bfseries Properties of LTI} \begin{itemize} \item Given two LTI systems with impulse response $h_1[n]$ and $h_2[n]$ are cascaded in either order, the equivalent overall impulse response $h[n]$ is $$h[n] = h_1[n]*h_2[n] = h_2[n]*h_1[n]$$ \item The connection of two LTI system in parallel is equivalent to a single system whose response is the sum of the individual impulse responses: $$h[n]=h_1[n]+h_2[n]$$ \item LTI systems are stable if and only if the impulse response is absolutely summable, $$B_h=\sum_{k=-\infty}^{\infty}|h[k]| < \infty$$ \item The condition $h[n] = 0, n < 0$ implies causality for the LTI systems. \end{itemize} \item {\bfseries General response of LTI system in terms of fourier transform of impulse response:} Complex exponential sequences are eigenfunctions of LTI systems, and the response to a complex sequence is complex sequence with same frequency as the input and with amplitude and phase determined by the system; with input $x[n]=e^{j\omega n}$ for $-\infty < n < \infty$, the corresponding output of an LTI system with impulse response $h[n]$ is easily shown to be $$y[n]=H(e^{j\omega})e^{j\omega n}$$ where, $H(e^{j\omega}) = \sum_{k=-\infty}^{\infty}h[k]e^{-j\omega k}$, the Fourier representation of the impulse response, is an eigen value of the system; describes the change in complex amplitude of a complex exponential input signal as a function of the frequency $\omega$, called the frequency response of the system. Since broad class of signals can be represented as a linear combination of complex exponentials in the form $$x[n]=\sum_k \alpha_k e^{j\omega_k n}$$ whose corresponding output of an LTI system is $$y[n]=\sum_k \alpha_k H(e^{j\omega_k})e^{j\omega_k n}$$ As the frequency response of LTI is always periodic of the frequency variable with period $2\pi$, need to only specify $H(e^{j\omega})$ over an interval of length 2$\pi$. {\bfseries Note: } The sufficient condition for existence of frequency response is the system to be stable. \item {\bfseries Transient analysis:} Can gain additional insight into LTI systems by considering inputs of the form $$x[n]=e^{j\omega n}u[n]$$ i.e. complex exponentials are suddenly appied at an arbitrary time, which for convenience here we choose as $n=0$, the corresponding output of a causal LTI with impulse response h[n] is: $$y[n]=\left( \sum_{k=0}^n h[k]e^{-j\omega k}\right)e^{j\omega n}, n\ge 0; \text{ else } 0 \text{ for } n < 0$$ Considering output for $n\ge 0$, can write: $$y[n]=\left(\sum_{k=0}^{\infty}h[k]e^{-j\omega k}\right)e^{j\omega n} - \left(\sum_{k=n+1}^\infty h[k]e^{-j\omega k}\right)e^{j\omega n} =H(e^{j\omega})e^{j\omega n} - \left(\sum_{k=n+1}^\infty h[k]e^{-j\omega k}\right)e^{j\omega n}$$ See that the output consists of the sum of two terms, $y_{ss}[n]+y_t[n]$; the first term is the steady-state response, is identical to the response of the system when the input is $e^{j\omega n}$ for all n, in a sense the second term is the amount by which the output differs from the eigenfunction result, this part corresponds to the transient response, which is bounded by the sum of the absolute values of all of the impulse response samples, it follows that, for stable systems, the transient response must become increasingly smaller as $n\to \infty$. {\bfseries Note: } No surprise that the condition for existence of the frequency response is the same as the condition for dominance of steady-state solution, indeed a complex exponential that exists for all n can be thought of as one that is applied at $n=-\infty$, so the eigenfunction property of complex exponentials depends on stability of the system, since at finite n, the transient response must have become zero. \item {\bfseries Difference equations of LTIs} An important class of LTI systems consists of causal and stable systems (or the finite impulse system) for which the input x[n] and the output y[n] satisfy the $N^\text{th}$ order linear constant-coefficent different equation of the form: $$\sum_{k=0}^N a_ky[n-k]=\sum_{k=0}^M b_k x[n-k]$$ To find the frequency response and thus impulse response, set $x[n]=\delta[n]$, and take Fourier transform. (not the other way round hai, polynomial ho bhandai ma stable hudaina) \end{enumerate} \subsection{Z-Transform} Motivation that the Fourier transform does not converge for all sequences, and useful to have a generalization of the FT that encompasses a broader class of signals. The z-transform of a sequence $x[n]$ is defined as $$X(z)=\sum_{n=-\infty}^\infty x[n]z^{-n}$$ Since z-transform is a function of a complex variable, it is convenient to describe and interpret it using the complex z-plane, in z-plane, the contour corresponding to $|z|=1$ is a circle of unit radius. For any given sequence, the set of values of z for which the z-transform power series converges is called the ROC: $$\sum_{n=-\infty}^\infty |x[n]r^{-n}|\le \infty$$ Because of the multiplicaiton of the sequence by the real exponential $r^{-n}$, it is possible for the z-transform to converge even if the Fourier transform does not. {\bfseries Properties of z-transform:} \begin{enumerate} \item The z-transform is the Laurent series, which represents an analytic function at every point inside the ROC. \item The inverse z-transform is the following contour integral: $$x[n]=\frac{1}{2\pi j}\int_C X(z)z^{n-1}dz$$ where, C represents a closed contour within the ROC of the z-transform. \item {[Linearity]} $$\left[ax_1[n]+bx_2[n]\leftrightarrow aX_1(z)+bX_2(z);~\text{ROC contains }R_{x1}\cap R_{x2}\right]$$ \item {[Multiplication by $z_0^{n}$, z Scaling]} $$\left[z_0^nx[n]\leftrightarrow X\left(\frac{z}{z_0}\right);~\text{ROC} =|z_0|R_x\right]$$ \item {[t Shifting, Multiplication by $z^{-n_0}$]} $$\left[x[n-n_0]\leftrightarrow z^{-n_0}X(z); \text{ROC = }R_x (\text{except possible addition for deletion of }z =0 \text{ or } z=\infty)]\right]$$ \item {[Multiplication by n, z Derivatives]} $$\left[nx[n]\leftrightarrow-z\frac{d}{dz}[X(z)];~\text{ROC}=R_x\right]$$ \item {[Division by n, z Integrals]} $$\left[\frac{1}{n}x[n]\leftrightarrow-\int_0^z\frac{1}{z}X(z)dz;~\text{ROC}=R_x\right]$$ \item {[Convolution]} $$[x_1[n]*x_2[n]\leftrightarrow X_1(Z)X_2(Z); \text{ ROC contains }R_{x1}\cap R_{x2}]$$ \item {[Value Theorems]} $$\left[x[0]=\lim_{z\to \infty}X(z),~x[\infty]=\lim_{z\to 1}(z-1)X(z)\right]$$ $$\left[x[1]=\lim_{z\to\infty}z[X(z)-x[0]],~x[2]=\lim_{z\to\infty}z^2[X(z)-x[0]-x[1]z^{-1}]\right]$$ \end{enumerate} \begin{center} \begin{tabular}{|c|c|c|} \hline Object Functions $x[n]$ & Image Functions $F(s)$&Limitations of Convergence ($\sigma$) \\[0.2cm] $\delta[n]$ & $1$ &All z \\[0.5cm] $\delta[n-m]$ & $z^{-m}$ &All z except 0 (if $m > 0$) or $\infty$ (if $m < 0$) \\[0.5cm] $u[n]$ & $\frac{1}{1-z^{-1}}$ &$|z|>1$ \\[0.5cm] $-u[-n-1]$ & $\frac{1}{1-z^{-1}}$ &$|z|<1$ \\[0.5cm] $a^nu[n]$ & $\frac{1}{1-az^{-1}}$&$|z|>|a|$ \\[0.5cm] $-a^nu[-n-1]$ & $\frac{1}{1-az^{-1}}$&$|z|<|a|$ \\[0.5cm] $na^nu[n]$ & $\frac{az^{-1}}{(1-az^{-1})^{2}}$&$|z|>|a|$ \\[0.5cm] $-na^nu[-n-1]$ & $\frac{az^{-1}}{(1-az^{-1})^{2}}$&$|z|<|a|$ \\[0.5cm] $(n+1)a^nu[n]$ & $\frac{1}{(1-az^{-1})^{2}}$&$|z|>|a|$ \\[0.5cm] $-(n+1)a^nu[-n-1]$ & $\frac{1}{(1-az^{-1})^{2}}$&$|z|<|a|$ \\[0.5cm] $\sin (\omega_0 n)u[n]$& $\frac{\sin(\omega_0)z^{-1}}{1-2\cos(\omega_0)z^{-1}+z^{-2}}$&$|z|>1$ \\[0.5cm] $\cos (\omega_0 n)u[n]$& $\frac{1-\cos(\omega_0)z^{-1}}{1-2\cos(\omega_0)z^{-1}+z^{-2}}$&$|z|>1$ \\[0.5cm] \hline \end{tabular} \end{center} {\bfseries Note:} The thing is even though sequences such as $\frac{\cos \omega_0 n}{n}$ is not absolutely summable, it does have finite energy, and the Fourier transform converges in mean square sense to a discontinuous periodic function. Moreover sequences $\cos \omega_0 n$ is neighter absolutely nor square summable, but a useful Fourier transform can be defined using impulse functions, in both cases, Fourier transforms are not continuous, thus it is not stritly correct to think of the Fourier transform as being the z-transform evaluated on the unit circle {\bfseries Characteristics of ROC for the z-transform:} \begin{enumerate} \item The ROC must be a connected region thus, cannot contain any poles. \item The FT of $x[n]$ converges absolutely if and only if the ROC of the z-transform of $x[n]$ includes the unit circle. \item The ROC will either be of the form $0\le r_R < |z|$, or $|z|<r_L\le \infty$, or in general the annulus, i.e. $0\le r_R < |z| < r_L \le \infty$. \item For a sequence that is zero for $n<N_1<\infty$, the ROC extends outwards from the outermost finite polse in $X(z)$ to and possibly incluidng $z=\infty$. \item For a sequence that is zero for $n > N_2 > -\infty$, the ROC extends inwards from the innermost nonzero pole in X(z) to and possibly including $z=0$. \item A two-sided sequence is an infinite-duration sequence that is neither left-sided nor right-sided, for which the ROC will consist of a ring in the z-plane, bounded on the interior and exterior by a pole and, not containing any poles. \item For a sequence that is zero except in a finite interval $-\infty < N_1 \le n \le N_1 < \infty$, then the ROC is the entire z-plane except possibly $z=0$ or $z=\infty$. \end{enumerate} \subsection{Frequency Analysis of LTIs} With the frequency response expressed in polar form, the magnitude and phase of the FT of the system input and output are related by $$|Y(e^{j \omega}) = |H(e^{j \omega})|\cdot |X(e^{j\omega})|$$ $$\langle Y(e^{j \omega}) = \langle H(e^{j \omega}) + \langle X(e^{j\omega})$$ Denote the principle value of the phase $H(e^{j\omega})$ as ARG[$H(e^{j\omega})$], where $$-\pi < \text{ARG}[H(e^{j\omega})]\le \pi$$ In many cases, the principal value will exhibit discontinuities of 2$\pi$ radians when viewed as a function of $\omega$. Another particularly useful representation of phase is through the group delay defined as: $$\tau(\omega) = grd[H(e^{j\omega})] = -\frac{d}{d\omega}\{\text{arg}[H(e^{j\omega})]\}$$ If the group delay is constant with frequency then each narrowband component will undergo identical delay, if not, there will be different delays applied to different frequency packets resulting in a dispersion in time of the output signal energy. \begin{enumerate} \item {\bfseries General magnitude response of rational LTIs} It follows that, for a system whose input and output satisfy a difference equation of the general form, the system function has the algebraic form: $$H(e^{j\omega})=\frac{\sum_{k=0}^M b_k e^{-j\omega k}}{\sum_{k=0}^N a_k z^{-j \omega k }}$$ Convenient to express it in terms of poles and zeros of $H(z)$: $$H(e^{j\omega })=\left(\frac{b_0}{a_0}\right)\frac{\Pi_{k=1}^M (1-c_ke^{-j\omega})}{\Pi_{k=1}^N (1-d_k e^{-j\omega})}$$ Each of the factors $(1-c_k e^{-j\omega k})$ in the numerator contributes a zero at $z=c_k$ and a pole at $z=0$, similarly, each of the factors $(1-d_k e^{-j \omega k})$ in the denominator contribute a zero at $z=0$ and a pole at $z=d_k$. Correspondingly, the magnitude function is $$|H(e^{j\omega})| = \left|\frac{b_0}{a_0}\right|\frac{\Pi_{k=1}^M |1-c_ke^{-j\omega}|}{\Pi_{k=1}^N |1-d_k z^{-j\omega}|}$$ notes that $|H(e^{j \omega})|$ is the product of the magnitudes of all the zero factors of H(z) evaluated on the unit circle, divided by the product of the magnitudes of all the pole factors evaluated on the unit circle. In discussing LTI systems, useful to identify two classes: \begin{itemize} \item In the first class, at least one nonzero pole of $H(z)$ is not cancelled by a zero, in this case, $h[n]$ will not be zero outside a finite interval, consequently, systems of this class are infinite response (IIR) systems \item For the second class of systems, $H(z)$ has no poles except at $z=0$, thus a partial fraction expansion is not possible, and $H(z)$ is simply a polynomial in $z^{-1}$, in this case, the impulse response is finite in length, it is zero outside a finite interval, consequently are finite impulse response (FIR) systems. \end{itemize} \item {\bfseries Frequency response of 1st order systems} A single factor of the from $(1-re^{j\theta}e^{-j\omega})$, where r is the radius and $\theta$ is the angle of the pole or zero in the z-plane, which is typically of either pole or a zero at a radius r and angle $\theta$ in the z-plane. The gain in dB associated with this factor is $$(+,-) 20\log_{10}|1-re^{j\theta}e^{-j\omega}| = (+,-) 10\log_{10}[1+r^2-2r\cos(\omega-\theta)]$$ The contribution to the principal value of the phase for such a factor is $$(+,-)\text{ARG}[1-re^{j\theta}e^{-j\omega}] = (+,-) \arctan \left[\frac{r\sin(\omega-\theta)}{1-r\cos(\omega-\theta)}\right]$$ In inferring frequency response characteristics from pole-zero plots of discrete-time systems, the associated vector diagrams in the z-plane are typically helpful, in which, each pole and zero factor can be represented by a vector in the z-plane from the pole or zero to a point on the unit circle. The magnitude response is the product of the lengths of zero vectors, divided by the sum of lengths of pole vectors: $$|H(e^{j\omega})|=\frac{||\cdot||\cdot \ldots \cdot ||}{||+||+\ldots+||}$$ Has dom shape graph which for a zero on $r=1$, asymptotes at $\omega=\theta$ and achieves maximum at $\omega=\pi-\theta$, on decreasing $r$, approaches a flat line, while on increasing, moves upwards approaching a perfect oscillation. \item {\bfseries Frequency response of 2nd order systems} Consider the 2nd order system: $$H(z)=\frac{1}{(1-re^{j\theta}z^{-1})(1-re^{-j\theta}z^{-1})}=\frac{1}{1-2r\cos\theta z^{-1}+r^2z^{-2}}$$ The difference equation satisfied by the input and output of the system is $$y[n]-2r\cos\theta y[n-1]+r^2y[n-2]=x[n]$$ As earlier: $$20\log_{10}|H(e^{j\omega})| = -10\log_{10}[1+r^2-2r\cos(\omega-\theta)]-10\log_{10}[1+r^2-2r\cos(\omega+\theta)]$$ \end{enumerate} \subsection{Implementation} Since system impulse response has infinite duration, even if only wanted to compute the output over a finite interval, it would not be efficient to do so by discrete convolution since the computation required to compute output would grow with n, however, the difference equation provides the basis for an algorithm for recursive computation of the output at any time n in terms of the previous outputs, the current sample and the previous input samples, yet is not the only computational algorithm for implementing a particular system, and often, not the best choice. \begin{enumerate} \item {\bfseries Block Diagram} The implementation of an LTI discrete-time system by iteratively evaluating a recurrence formula obtained from a difference equation requires that delayed values of the output, input and intermediate sequences be available, implies the need for storage of past sequence values, also must provide means for multiplication of delayed sequence values by the coefficients, as well as means for adding the resulting products. $$y[n]=\sum_{k=1}^N a_k y[n-k] + \sum_{k=0}^M b_x x[n-k]$$ with corresponding system function $$H(z)=\frac{\sum_{k=0}^{M} b_kz^{-k}}{1-\sum_{k=1}^N a_kz^{-k}}$$ In the following general recurrence and corresponding block diagram can be viewed as a cascade of two systems, the first representing the computation of $v[n]$ from $x[n]$ and the second representing the computation of $[n]$ from $v[n]$, since each of the two is an LTI system, the order in which the two are cascaded can be reversed, without affecting the overall system function. Differences of implementation: The important difference is the order of implementation of zeros and poles in the original and reversed block diagram, theoretically, the order does not affect the overall system function, however, when a difference equation is implemented with finite-precision arithmetic, there can be a significant difference between two systems that are equivalent with the assumption of infinite precision arithmetic in the real number system. Another point concerns the number of delay elements in the system, in the reversed case are total of $(N+M)$ delay elements, however can be redrawn by noting that exactly the same signal, $w[n]$ is stored in the two chains of delay elements, consequently collapsed into one chain, specifically, the minimum number of delay elements required is max$(N,M)$. \begin{minipage}{\linewidth} \centering \includegraphics[width=0.4\textwidth]{Screenshot from 2023-08-05 15-31-59.png} \includegraphics[width=0.4\textwidth]{Screenshot from 2023-08-05 17-47-34.png} \end{minipage} A noncanonic block diagram is referred to as the direct form I implementation of the general Nth order system because it is a direct realization of the difference equation satisfied by the input $x[n]$ and the output $y[n]$, which can be written directly from the system function by inspection; while an implementation with the minimum number of delay elements is commonly referred ato as direct form II or canonic direct form. \item {\bfseries Signal Diagram} Are totally equivalent to block diagram as pictorial representations of difference equations, but are simpler to draw. \item {\bfseries IIR and FIR Systems} One consideration in the choice among different structures is computational complexity, in some digital implementations, structures with the fewest constant multipliers and the fewest delay branches are often desirable. \begin{itemize} \item Direct forms: \item Parallel forms: Factorize into partial fractions. \item Cascade forms: Factorize and pair each zero-pole. \end{itemize} \item {\bfseries FIR lattice filters} A useful building block has two inputs and two outputs, if used in cascade, obtain a flow graph whose lattice shape motivates the name lattice filter. \begin{minipage}{\linewidth} \centering \includegraphics[width=0.8\textwidth]{Screenshot from 2023-08-06 00-32-11.png} \end{minipage} The coefficients $k_1, k_2, \ldots, k_M$, are referred to generally as the k-parameters of the lattice structure. The node variables $a^{(i)}[n]$ and $b^{(i)}[n]$ are intermediate sequences that depend upon the input $x[n]$ through the set of difference equations: $$a^{(i)}[n]=a^{(i-1)}[n]-k_i b^{(i-1)}[n-1];~~i=1,2,\ldots,M$$ $$b^{(i)}[n]=b^{(i-1)}[n-1]-k_i b^{(i-1)}[n];~~i=1,2,\ldots,M$$ Note that there are no feedback loops, which implies that the system has a finite-duration impulse response. {\bfseries Impulse response: } It is clear that if $x[n]=\delta[n]$, then $a^{(i)}[0]=1$ for every i, since the impulse propagates with no delay through the top branch of all the stages, all other paths to any node variable $a^{(i)}[n]$ or $b^{(i)}[n]$ pass through at least one delay, with the greatest delay being the bottom path and then up to node variable $a^{(i)}[n]$ through the coefficient $-k_i$. Convenient to assume specifically that $x[n]=\delta[n]$ so that $a^{(i)}[n]$ and $b^{(i)}[n]$ are the resulting impulse responses at the associated nodes, and that the corresponding z-transforms $A^{(i)}(z)$ and $B^{(i)}(z)$ are the transfer functions between the input and the $i^{th}$ nodes $$A^{(i)}(z)=\sum_{n=0}^i a^{(i)}[n]z^{-n}=1+\sum_{m=1}^i \alpha_m^{(i)} z^{-m}$$ where in the second form, the coefficients $\alpha_m^{(i)}$ for $m\le i$ are composed of sums of products of the coefficients $k_j$ for $j\le m$, as the coefficient for the longest delay from the input to the upper node is $\alpha_i^{(i)}=k_i$. In this notation, the impulse response from $x[n]$ node to variable $a^{(i)}[n]$ is $$a^{(i)}[n]=1, n=0;~~ \alpha_n^{(i)}, 1\le n \le i;~~ 0 \text{ otherwise}$$ [yesma ta first coefficient 1 kina leko hola, just assume ho ki k ho, after this is wrong though, yes 1 banauna milxa tei bhayera aba common nikalni ani last ma ya first ma multiply garni raixa] From difference equations: $$A^{(i)}(z)=A^{(i-1)}(z)-k_iz^{-1}B^{(i-1)}(z)$$ $$B^{(i)}(z)=-k_iA^{(i-1)}(z)+z^{-1}B^{(i-1)}(z)$$ Can calculate $A^{(i)}(z)$ and $B^{(i)}(z)$ recursively up to any value of i with: $$A^{(0)}(z)=B^{(0)}(z)=1$$ The pattern emerges as: $$A^{(i)}(z)=z^{-i}B^{(i)}(1/z),~B^{(i)}(z)=z^{-i}A^{(i)}(1/z)$$ Combining the recursive relations: $$\alpha_m^{(i)} = \left[ \alpha_m^{(i-1)} - k_i \alpha_{i-m}^{(i-1)} \right],~~m=1,\ldots,i-1;~~\alpha_i^{(i)}=k_i$$ In matrix form: $$\vec{\alpha}^(i) = [\vec{\alpha}^{(i-1)}~~0] - k_i [\vec{\alpha'}^{(i-1)}~~-1]$$ from which it follows that: $$\vec{\alpha}^{(i-1)} = \frac{1}{1-k_i^2}[\vec{\alpha}^{(i)}+k_i\vec{\alpha}'^{(i)}]$$ is the basis for an algorithm for analyzing an FIR lattice structure to obtain its transfer function. (book ma euta algoirhtm xa tyo hera bro yo ta lund xa) \item {\bfseries All pole lattice filters} A lattice structure for implementing the all-pole system function $H(z)=\frac{1}{A(z)}$ can be developed from the FIR lattice by recognizing that $H(z)$ is the inverse filter for the FIR function $A(z)$. \begin{minipage}{\linewidth} \centering \includegraphics[width=0.8\textwidth]{Screenshot from 2023-08-06 13-06-43.png} \end{minipage} {\bfseries Note: } For IIR systems, the guarantee of stability is in the condition $|k_i|<1$, even though the lattice structure requires twice the multiplications per output sample as the direct form, it is insensitive to quantization of k-parameters. \item {\bfseries Quantization} A real numer can be represented with infinite precision in two's complement form as: $$x=X_m\left(-b_0+\sum_{i=0}^\infty b_i 2^{-i}\right)$$ where $X_m$ is an arbitrary scale factor and the $b_i's$ are either 0 or 1, $b_0$ is referred as sign bit, if $b_0=0$ then $0\le x \le X_m$, and if $b_0=1$, then $-X_m\le x<0$. If use only finite number of bits $(B+1)$, then $$\hat{x}=X_m\left(-b_0+\sum_{i=1}^B b_i 2^{-i}\right)$$ is quantized, so that the smallest difference between numbers is $\Delta = X_m 2^{-B}$. When the parameters of a rational function or corresponding difference equation are quantized, the poles and zeros of the system move to new position in the z-plane, equivalently, the frequency response is perturbed from its original value, if the system implementation structure is highly sensitive to perturbations of the coefficients, the resulting system may no longer meet the original design specifications, or an IIR sysem might even become unstable. \end{enumerate} \subsection{Design Techniques} Many of the low pass filters are specified by a tolerance scheme where the passband limits vary symmetrically around unity gain in which case $\delta_{p1}=\delta_{p2}$, or the passband may be constrained to have a maximum gain of unity, in which case $\delta_{p1}=0$, since the approximation cannot have an abrupt transition from passband to stopband, a transition region from the paddband edge frequency $\omega_p$ to the beginning of the stopband at $\omega_s$ is allowed, in which the filter gain is unconstrained. \begin{minipage}{\linewidth} \centering \includegraphics[width=0.6\textwidth]{Screenshot from 2023-08-06 15-44-54.png} \end{minipage} Many of the filters used in practice are specified by tolerance scheme, with no constraints on the phase response other than those imposed implicitly by requirements of stability and causality, for ex, the poles the the system function for a causal and stable IIR filter must lie inside the unit circle, in FIR filters, often the constraint of linear phase is imposed which removes the phase of the signal from consideration in design process. \subsubsection{IIR from continuous-time filters} The design of IIR filters relying on the transformation of a continuous-time filte meeting prescribed specification is a reasonable approach since the art of continuous IIR filter design is highly advanced, and methods that work well for continuous-tiem IIR do not lead to simple closed-form design formulas when applied directly of discretes because the frequency response of a discrete-time filter is periodic, and that of a continuous-time filter is not. \begin{enumerate} \item Impulse Invariance: Consider the system function of causal continuous-time filter expressed in terms of partial fraction, assuming all poles of single order: $$H_a(s) = \sum_{k=1}^N \frac{A_k}{s-s_k}$$ The impulse response of the causal discrete-time filter obtained by sampling $h_a(t)$ is $ h[n]= h_c(nT_d) $ and whose system function is given by: $$H(z) = \sum_{k=1}^N \frac{A_k}{1-e^{s_k T_d}z^{-1}}$$ {\bfseries Properties: } \begin{itemize} \item The underlying assumption of band-limited cannot be exactly true, causing aliasing, if the resulting discrete-time filter fails to meet the specifications because of aliasing, there is no alternative with impulse invariance but to try again with a higher-order filter or with different filter paramters, holding the order fixed. \item If the continuous-time causal filter is stable, corresponding to the real part of $s_k$ being less than zero, then the magnitude of $e^{s_kT_d}$ will be less than unity, so that the corresponding pole in the discrete-time filter is inside the unit circle. \end{itemize} {\bfseries Butterworth:} \begin{enumerate} \item Since $H_a(s)$ is independent of $T_d$ and is formed from left half-plane poles of magnitude-square function, choose $T_d=1$. $$1-\delta_p \le |H_a(j\Omega)| \le 1,~0\le |\Omega| \le \omega_p$$ $$|H_a(j\Omega)| \le \delta_s,~\omega_s\le |\Omega| \le \pi$$ \item Since magnitude of an analog Butterworth filter is a monotonic function of frequency $$|H_a(j\omega_p)| = 1-\delta_p$$ $$|H_a(j\omega_s)| = \delta_s$$ \item The magnitude-squared function of a Butterworth filter is of the form $$|H_a(j\Omega)|^2 = \frac{1}{1+\left({\Omega}/{\Omega_a}\right)^{2N}}$$ So the filter design process consists of determining the parameters $N$ and $\Omega_a$ to meet the desired requirements Equations to solve: $$1+\left(\frac{\omega_p}{\Omega_a}\right)^{2N} = \left(\frac{1}{1-\delta_p}\right)^2, \text{ and, } 1+\left(\frac{\omega_s}{\Omega_a}\right)^{2N} = \left(\frac{1}{\delta_s}\right)^2$$ is given by $$N=\frac{\log\left(\frac{1/\delta_s^2-1}{1/\delta_p^2-1}\right)}{2\log(\Omega_s/\Omega_p)};~\Omega_a = \frac{\Omega_p}{\left(1/\delta_p^2-1\right)^{\frac{1}{2N}}};~~[\delta_p\leftarrow 1-\delta_p]$$ \item Find poles: $|\Omega_a|e^{\frac{j(N+1+2i)\pi}{2N}},~i=0,1,\ldots,N-1$; where s-left points lie on $\frac{\pi}{2} < \text{arg} < \frac{3\pi}{2}$ \item Transfer function: $$H_a(s)=\frac{\Omega_c^N}{(s-s_1)\ldots(s-s_N)}$$ \item z- and s- mapping: \begin{minipage}{\linewidth} \centering \begin{tabular}{|c|c|} \hline s-domain & z-domain \\\hline &\\ $\frac{1}{s+\alpha}$ & $\frac{1}{1-e^{-\alpha}z^{-1}}$ \\[0.5cm] $\frac{s+\alpha}{(s+\alpha)^2+\beta^2}$ & $\frac{1-e^{-\alpha}\cos\beta z^{-1}}{1-2e^{-\alpha}\cos\beta z^{-1} +e^{-2\alpha}z^{-2}}$ \\[0.5cm] $\frac{\beta}{(s+\alpha)^2+\beta^2}$ & $\frac{e^{-\alpha}\sin\beta z^{-1}}{1-2e^{-\alpha}\cos\beta z^{-1} +e^{-2\alpha}z^{-2}}$ \\&\\\hline \end{tabular} \label{tab:my_label} \end{minipage} \item For specific example: $$|H_a(j\omega_p)| = 0.89125$$ $$|H_a(j\omega_s)| = 0.17783$$ the simultaneous solution is $N=5.8858$ and $\Omega_c=0.70474$, there parameter N, however, must be an integer, round N up to the nearest integer, $N=6$. With new $\Omega_a=0.7032$ and with $N=6$, the 12 poles of the magnitude-squared $H_c(s)H_c(-s)$ are uniformly distributed in angle on a circle of radius $\Omega_c$, consequently the poles of $H_a(s)$ are the three poles in the left half of the s-plane with the following coordinates: $$\text{Pole pair 1: }-0.182\pm j(0.679),$$ $$\text{Pole pair 2: }-0.497\pm j(0.497),$$ $$\text{Pole pair 3: }-0.679\pm j(0.182).$$ Therefore, $$H_a(s)=\frac{0.12093}{(s^2+0.3640s+0.4945)(s^2+0.9945s+0.4945)(s^2+1.3585s+0.4945)}$$ \end{enumerate} \item Billinear: An algebraic transformation between the variables s and z that maps the entire $j\Omega$-axis in the s-plane to one revolution of the unit circle in the z-plane, since $-\infty < \Omega < \infty$ maps onto $-\pi \le \omega \le \pi$, the transformation between the continuous-time and discrete-time frequency is necessarily nonlinear, therefore is restricted to situations in which the corresponding nonlinear warping of frequency axis is acceptable. With $H_a(s)$ denoting the continuous-time system function and $H(z)$ te discrete time system function, the bilinear transformation corresponds to replacing s by $$s=\frac{2}{T_d}\left(\frac{1-z^{-1}}{1+z^{-1}}\right)$$ that is, $$H(z) = H_a \left(\frac{2}{T_d}\left(\frac{1-z^{-1}}{1+z^{-1}}\right)\right)$$ {\bfseries Properties: } \begin{itemize} \item If $\sigma < 0$ in $s=\sigma +j \Omega$ then it follows that $|z|<1$ for any value of $\Omega$, that is, if a pole of $H_a(s)$ is in left-half of s-plane, its image in the z-plane will be inside the unit circle. \item The $j\Omega$ axis of the s-plane maps onto the unit circle. \item The bilinear transformation avoids the problem of aliasing encountered with the use of impulse invariance, because it maps the entire imaginary axis of the s-plane onto the unit circle in the z-plane, however the price paid is nonlinear compression of the frequency axis. \item The relationship between the variables $\omega$ and $\Omega$ is: $$\omega = 2\arctan(\Omega T_d/2)$$ \item The distortion in the frequency axis also manifests itself as a warping of the phase response of the filter, so cannot obtain a linear-phase discrete-time lowpass filter by applying bilinear to continuous counterpart. \end{itemize} {\bfseries Butterworth:} \begin{enumerate} \item Since $H_a(s)$ is independent of $T_d$ and is formed from left half-plane poles of magnitude-square function, choose $T_d=1$. $$1-\delta_p \le |H_a(j\Omega)| \le 1,~0\le |\Omega| \le 2\tan\left(\frac{\omega_p}{2}\right)$$ $$|H_a(j\Omega)| \le \delta_s,~2\tan\left(\frac{\omega_s}{2}\right)\le |\Omega| \le \infty$$ \item Since magnitude of an analog Butterworth filter is a monotonic function of frequency $$|H_a\left(j 2\tan\left(\frac{\omega_p}{2}\right)\right)| = 1-\delta_p$$ $$|H_a\left(j 2\tan\left(\frac{\omega_s}{2}\right)\right)| = \delta_s$$ \item The magnitude-squared function of a Butterworth filter is of the form $$|H_a(j\Omega)|^2 = \frac{1}{1+\left({\Omega}/{\Omega_a}\right)^{2N}}$$ So the filter design process consists of determining the parameters $N$ and $\Omega_a$ to meet the desired requirements Equations to solve: $$-$$ is given by $$N=\frac{\log\left(\frac{1/\delta_s^2-1}{1/\delta_p^2-1}\right)}{2\log(\Omega_s/\Omega_p)};~\Omega_a = \frac{\Omega_p}{\left(1/\delta_p^2-1\right)^{\frac{1}{2N}}};~~[\delta_p\leftarrow 1-\delta_p]$$ \item Find poles: $|\Omega_a|e^{\frac{j(N+1+2i)\pi}{2N}},~x=0,1,\ldots,N-1$; where s-left points lie on $\frac{\pi}{2} < \text{arg} < \frac{3\pi}{2}$ \item Transfer function: $$H_a(s)=\frac{\Omega_c^N}{(s-s_1)\ldots(s-s_N)}$$ \item z- and s- mapping: \begin{minipage}{\linewidth} \centering \begin{tabular}{|c|c|} \hline s-domain & z-domain \\\hline &\\ $s$ & $2\frac{1-z^{-1}}{1+z^{-1}}$ \\&\\\hline \end{tabular} \label{tab:my_label} \end{minipage} \end{enumerate} \end{enumerate} \subsubsection{Transformation of lowpass IIR filters} Frequency-selective filters of the lowpass, highpass, bandpass, and bandstop types can be obtained from a lowpass discrete-time filter by use of transformations very similar to the bilinear transformation used to transform continuous-time system functions into discrete-time system functions To see how this is done, assume that we are given a lowpass system function $H_{lp}(Z)$ that we wish to transform to a new system function $H(z)$, which has either lowpass, highpass, bandpass, or bandstop characteristics when evaluated on the unit circle. Note that we associate the complex variable $Z$ with the prototype lowpass filter and the complex variable $z$ with the transformed filter. Then, we define a mapping from the Z-plane to the z-plane of the form $$Z^{-1}=G(z^{-1})$$ If $H_{lp}(Z)$is the rational system function of a causal and stable system, we naturally require that the transformed system function $H(z)$ be a rational function of $z^{-1}$ and that the system also be causal and stable. This places the following constraints on the transformation $Z^{-1} = G(z^{-1})$: \begin{enumerate} \item $G(z^{-1})$ must be rational function of $z^{-1}$ \item The inside of the unit circle of the Z-plane must map to the inside of the unit circle of the z-plane. \item The unit circle of the Z-plane must map onto the unit circle of the z-plane. \end{enumerate} The most simples that satisfy to convert to high-pass filter is $$Z^{-1}=-\frac{z^{-1}+\alpha}{1+\alpha z^{-1}},~\alpha=-\frac{\cos\left(\frac{\theta_p+\omega_p}{2}\right)}{\cos\left(\frac{\theta_p-\omega_p}{2}\right)}$$ where, $$\theta_p=\text{Previous cut-off frequency},~\omega_p=\text{Desired cut-off frequency}$$ \subsubsection{FIR by windowing} The design techniques of FIR filters are based directly on approximating the desired frequency response or impulse response of the discrete-time system, generally begins with an ideal desired frequency response $H_d(e^{j\omega})$ defined by piece-wise frequency responses with discontinuities at the boundaries between bands. The corresponding impulse response sequence can be expressed as: $$h_d[n]=\frac{1}{2\pi}\int_{-\pi}^{\pi} H_d(e^{j\omega})e^{j\omega n}d\omega$$ The impulse responses are noncausal and infinitely long, most straightforward approach to obtaining an FIR approximation to such systems is to truncate the ideal impulse response through the process refined to as windowing as: $$h[n]=h_d[n]w[n]$$ Follows from modulation, $$H(e^{j\omega}) = \frac{1}{2\pi}\int_{-\pi}^\pi H_d(e^{j\theta}) W(e^{j(\omega-\theta)})d\theta$$ ie. $H(e^{j\omega})$ is the periodic convolution of the desired ideal frequency response with the FT of the window, thus the frequency response will be a smeared version of the desired response. \begin{itemize} \item If $w[n]=1$ for all n, then $H(e^{j\omega})=H_d(e^{j\omega})$ which suggests that if $w[n]$ is chosen such that $W(e{j\omega})$ is concentrated in a narrow band of frequencies around $\omega=0$, it approximates an impulse, then $H(e^{j\omega})$ will look like $H_d(e^{j\omega})$ except where $H_d(e{j\omega})$ changes abruptly. \item The choice of window is governed by the desire to have $w[n]$ as short as possible in duration, so as to minimize the computation in the implementation of the filter, are conflicting requirements. \end{itemize} The approximation of an ideal filter by truncation of the ideal impulse response is identical to the issue of the convergence of Fourier series, as the plots of $20\log_{10}|W(e^{j\omega})|$ consists of a main lob at $\omega=0$ and side lobs, whereby the rectangular window has the narrowest main lobe, and yields sharpest transitions of $H(e^{j\omega})$ at a discontinuity of $H_d(e^{j\omega})$ however, the first side lobe is only about 13dB below the main peak, Barlett, Hamming, Hann, and Blackman windows show that by tapering the window smoothly to zero, side lobs are greatly reduced in amplitude, however price paid is much wider main lobe. {\bfseries Note: } In designing FIR filters, it is desirable to obtain causal system with a generalized linear-phase response, consequently, all the windows are symmetric about $\frac{M}{2}$ leads to causal filters in general, and if the desired impulse response is also symmetric about $\frac{M}{2}$ then the windowed impulse response will also have that symmetry, and the resulting frequency response will have a generalized linear phase. {\bfseries Kaiser window: } Kaiser found a near-optimal window could be formed using the zero$^\text{th}$ order modified Bessel function of the first kind, a function that is much earier to compute: $$w[n]=\frac{I_0[\beta(1-[(n-\alpha)/\alpha]^2)^\frac{1}{2}]}{I_0(\beta)}, \text{ for } 0 \le n \le M$$ where $\alpha=M/2$, and $I_0(.)$ represents the zeroth order modified Bessel function of the first kind. $$ I_0(x) = 1+\frac{0.25x^2}{1!}+\frac{(0.25x^2)^2}{(2!)^2}+\frac{(0.25x^2)^3}{(3!)^3}+\ldots$$ The Kaiser window has two parameters: the length $(M+1)$ and a shape parameter $\beta$, varying which can trade side-lobe amplitude for main lobe width, such as if the $\beta=0$ reduces to the rectangular window, and increasing M while holding $\beta$ constant causes the main lobe to decrease in width, but does not affect peak amplitude of the side lobes. {\bfseries Low-pass filter:} The desired ideal frequency response with cut-off frequency $\omega_c=\frac{\omega_p+\omega_s}{2}$ so that impulse is symmetric about $\frac{M}{2}$ is defined as: $$H_{lp}(e^{j\omega}) = \left\{\begin{array}{cc} e^{-j\omega\frac{M}{2}} & |\omega|<\omega_c \\ 0 & \omega_c < |\omega| \le \pi \end{array}\right.$$ The corresponding impulse from windowed ideal impulse response is $$h[n] = \frac{\sin[\omega_c(n-M/2)]}{\pi(n-M/2)} w[n]$$ Kaiser obtained a pair of formulas that permit the filter designer to predict in advance the values of $M$ and $\beta$ needed to meet a given frequency-selective filter specification. $$\beta=\left\{\begin{array}{cc} 0.1102(A-8.7) & A > 50\\ 0.5842(A-21)^{0.4} + 0.07886(A-21) & 21 \le A \le 50 \\ 0.0 & A < 21 \\ \end{array}\right.$$ where $A=-20\log_{10}\delta,~\delta=\delta_p=\delta_s$ and assuming $\Delta\omega = \omega_s-\omega_p$ $$M=\frac{A-8}{2.285\Delta \omega}$$ The design can be generalized to the case of multiple passbands and stopbands. \subsubsection{Optimal Filters} Refers to a filter that has been designed to achieve a specific objective while minimizing certain criteria, such as mean square error or minimax or distortion. In the context of digital signal processing, an optimum filter is often designed to enhance or extract certain features from a signal while minimizing noise or interference. Design algorithms have been developed in which some of the parameters $M$, $\delta_1$, $\delta_2$, $\omega_p$, and $\omega_s$ are fixed and an iterative procedure is used to obtain optimum adjustments of the remaining parameters. Convenient to consider the design of zero phase filter i.e one for which $$h[n]=h[-n]$$ Corresponding frequency response is $$A(e^{j\omega}) = \sum_{n=-L}^L h[n]e^{-j\omega n}$$ where $L=\frac{1}{2}M$, Can be written as a polynomial in $\cos\omega$ (why?) $P(x)$ where $x=\cos\omega$ Let us define an approximation error function $$E(e^{j\omega}) = W(e^{j\omega})[H(e^{j\omega})-A(e^{j\omega})]$$ where $W(e^{j\omega})$ is weighting funciton that incorporates design parameters into the design process and $H(e^{j\omega})$ is the desired response. $$W(e^{j\omega})=\left\{\begin{array}{c c} K & 0 \le \omega \le \omega_p \\ 1 & \omega_s \le \omega \le \pi \end{array}\right.$$ $$H(e^{j\omega})=\left\{\begin{array}{c c} 1 & 0 \le \omega \le \omega_p \\ 0 & \omega_s \le \omega \le \pi \end{array}\right.$$ The alternation theorem states the our polynomial will correspond to the filter representing the unique best approximation of the ideal lowpass filter, with the ratio $\delta_1/\delta_2$ fixed at $K$ and with passband and stopband edges $\omega_p$ and $\omega_s$ if and only if $E(cos\omega)$ exhibits at least $(L+2)$ alternations i.e., if and only if $E(cos\omega)$ alternately equals plus and minus its maximum value at least $(L + 2)$ times. $$W(\omega_i)[H(e^{j\omega_i}) - A(e^{j\omega_i})] = (-1)^{i+1}\delta,~~i = 1,2,\ldots,(L+2)$$ where $\delta$ is the optimum error. % In matrix form, % $$\begin{array}{c c c c c} % 1 & x_1 & x_1^2 & \ldots & x_1^L & \frac{1}{W(\omega_i)} \\ % & % \end{array}$$ For given set of external frequencies, the optimum error is given by $$\delta=\frac{\sum_{k=1}^{L+2} b_kH(e^{j\omega_k})}{\sum_{k=1}^{L+2} b_k\frac{(-1)^{k+1}}{W(\omega_k)}},~~b_k=\prod_{i=1, i\ne k}^{L+2} \frac{1}{x_k-x_i}$$ Interpolation formula to obtain our approximation, $$A(e^{j\omega}) = \frac{\sum_{k=1}^{L+1}\frac{d_k}{x-x_k}C_k}{\sum_{k=1}^{L+1}\frac{d_k}{x-x_k}},~~C_k=H(e^{j\omega_k})-\frac{(-1)^{k+1}\delta}{W(\omega_k)}~~d_k=\prod_{i=1, i\ne k}^{L+1} \frac{1}{x_k-x_i}$$ If $|E(\omega)| \le \delta$ for all $\omega$ in the passband and stopband, then the optimum approximation has been found. Adopting the philosophy of the Remez exchange method, the extrenal frequencies are exchanged for a completely new set defined by the $(L + 2)$ largest peaks of the error curve. \begin{figure}[H] \centering \includegraphics[width=0.6\textwidth]{Screenshot from 2023-09-01 12-49-29.png} \label{fig:enter-label} \end{figure} \subsection{Comparison of IIR and FIR filters} \begin{itemize} \item IIR have the advantage that a variety of frequency-selective fitlers can be designed using closed-form design formulas, and the coefficients of the discrete time filter can be obtained by straightforward substitution into a set of design equations. \item IIR methods are limited to frequency-selective filters, and permit only the magnitude response to be specified, if other shapes are necessary to approximate prescribed phase -or group delay, an algorithmic procedure will be required. \item FIR filters can have a precisely generalized linear phase. \item The closed-form design equations do not exist for FIR filters. \end{itemize} \subsection{The DFT} Consider an aperiodic sequence $x[n]$ with Fourier transform $X(e^{j\omega})$ and assume that a sequence $\Tilde{X}[k]$ is obtained by sampling $X(e^{j\omega}$) at frequencies $\omega_k=\frac{2\pi k}{N}$ i.e. $$\Tilde{X}[k] = X(e^{j\omega})_{\omega=\frac{2\pi k}{N}} = X(e^{j\frac{2\pi k}{N}})$$ Since the Fourier transform is periodic in $\omega$ with period $2\pi$, the resulting sequence is periodic in k with period N. It follows that with $W_N=e^{-j\frac{2\pi}{N}}$, $$X[k]=\left\{\begin{array}{cc} \sum_{n=0}^{N-1}x[n]W_N^{kn} & 0 \le k \le N-1\\ 0 & \text{otherwise}\\ \end{array}\right.$$ $$x[n]=\left\{\begin{array}{cc} \frac{1}{N}\sum_{n=0}^{N-1}x[n]W_N^{-kn} & 0 \le k \le N-1\\ 0 & \text{otherwise}\\ \end{array}\right.$$ The fact that $X[k]=0$ for k outside the interval $0\le k\le N-1$ and that $x[n]=0$ for n outisde the interval $0\le n \le N-1$ is implied, but not always stated explicitly. The DFT $X[k]$ is equal to samples of the periodic Fourier transform $X(e^{j\omega})$, and if $x[n]$ is evaluated for values of n outside the interval $0\le n\le N-1$, the result will not be zero, but rather a periodic extension of $x[n]$. An N-point DFT is expressed as the multiplication $X=Wx$, where $X$ is the original input signal, $W$ is the N-by-N square DFT matrix, and $X$ is the DFT is the signal. The transformation $W$ can be defined as $W=\omega^{jk}$, or equivalently $$W=\left[\begin{array}{c c c c c c} 1 & 1 & 1 & 1 & \ldots & 1 \\ 1 & \omega & \omega^2 & \omega^3 & \ldots & \omega^{N-1} \\ 1 & \omega^2 & \omega^4 & \omega^6 & \ldots & \omega^{2(N-1)} \\ 1 & \omega^3 & \omega^6 & \omega^9 & \ldots & \omega^{3(N-1)} \\ \vdots & \vdots & \vdots & \vdots & \ddots & \omega^{4(N-1)} \\ 1 & \omega^{N-1} & \omega^{2(N-1)} & \omega^{3(N-1)} & \ldots & \omega^{(N-1)(N-1)} \\ \end{array}\right]$$ (omega bhaneko 1 ko Nth root, in opposite direction hai dosxt \subsubsection{Properties} \begin{enumerate} \item Linearity: $$ax_1[n]+bx_2[n]\leftrightarrow aX_1[k]+bX_2[k]$$ \item Shift: $$e^{-j\frac{2\pi k}{N}m}x[n]\leftrightarrow W^m_NX[k]$$ \item Duality: $$X[n] \leftrightarrow Nx[((-k))_N],~~((-k))_N = N-k\text{ for }1 \le k \le N-1$$ \end{enumerate} \subsubsection{Circular Convolution} Here, we consider two finite-duration sequences $x_1[n]$ and $x_2[n]$, both of length $N$, with DFTs $X_1[k]$ and $X_2[k]$, respectively, and we wish to determine the sequence $x_3[n]$, for which the DFT is $X_3[k] = X_1[k]X_2[k]$. $$x_3[n]=\sum_{m=0}^{N-1}x_1[m]x_2[((n-m))_M]$$ An $n\times n$ circulant matrix $C$ takes the form $$C=\left[\begin{array}{c c c c c} c_0 & c_{n-1} & \ldots & c_2 & c_1 \\ c_1 & c_0 & \ldots & c_3 & c_{n-1} \\ \vdots & \vdots & \ddots & \vdots & \vdots \\ c_{n-2} & c_{n-3} & \ldots & c_0 & c_{n-1} \\ c_{n-1} & c_{n-2} & \ldots & c_1 & c_0 \\ \end{array}\right]$$ Given the matrix equation $Cx=b$, where $C$ is the circulant matrix of size $n$ we can write the equation as circular convolution. $$c*x = b$$ where $c$ is the first column of $C$, and the vectors $c$, $x$ and $b$ are cyclically extended in each direction. \subsubsection{Linear Convolution using Circular Convolution} Consider an L-point input sequence $x[n]$ and a P-point impulse response $h[n]$. The linear convolution of these two sequences, which will be denoted by $y[n]$, has finite duration with length $(L+P -1)$. For the circular convolution and linear convolution to be identical, the circular convolution must have a length of at least $(L + P - 1)$ points, i.e., both $x[n]$ and $h[n]$ must be augmented with sequence values of zero amplitude. This process is often referred to as zero-padding. Since LTI systems can be implemented by convolution, this implies that circular convolution can be used to implement these systems. \subsection{Fast Fourier Transform} Since $x[n]$ may be complex, $N$ complex multiplication and $N-1$ complex additions are required to compute each value of the DFT if we use directly for the computation. Requires $4N$ real multiplications and $4N-2$ real additions. To compute all $N$ values therefore requires a total of $N^2$ complex multiplications and $N(N-1)$ complex additions. (radix 2 wala FFT le chai $Nlog_2 N$ additions ra $N/2 log_2N$ multiplication use gardo raixa Most approaches to improving the efficiency of computation of the DFT exploit the symmetry and periodicity property of $W^{kn}$; specifically, $$W_N^{k(N-n)}=W_N^{-kn}=(W_N^{kn})^*$$ $$W_N^{kn}=W_N^{k(n+N)}=W_N^{(k+N)n}$$ The first class, called decimation in time, derives its name from the fact that in the process of arranging the computation into smaller transformations, the sequence $x[n]$ (generally thought of as a time sequence) is decomposed into successively smaller subsequences. In the second general class of algorithms, the sequence of discrete Fourier transform coefficients $X[k]$ is decomposed into smaller subsequences—hence its name, decimation in frequency. \subsubsection{Decimation in Time} The principle of decimation-in-time is conventionally illustrated by considering special case of N an integer power of $2$. Since N is divisible by two, we can consider computing $X[k]$ by separating $x[n]$ into two $(N/2)$-point sequences consisting of the even-numbered points $g[n] = x[2n]$ and the odd-numbered points $h[n] = x[2n + 1]$. ... $$X[k]=G[((k))_{N/2}]+W_N^k H[((k))_{N/2}],~k=0,1,\ldots,N-1 $$ \chapter{Differential Equations} \section{Ordinary Differential Equations} [An ODE] $$\left[f(x,y,y',\ldots,y^n)=0\right]$$ [is basically, a mathematical statement relating a function to its derivatives such that the integral] $$\left[\phi(x,y,C_1,\ldots,C_n)=0\right]$$ [explicit or implicit, which converts the ODE into an identity is known as solution of that the ODE] \subsection{First Order and First Degree} \begin{enumerate} \item {\bfseries Exact:} $$[d(U(x,y))=0]:~ \left[U(x,y)=C\right]$$ \begin{center} \begin{tabular}{|c|c|}\hline Equation&Exact\\[0.3cm] $ydx+xdy$&$d(xy)$\\[0.3cm] $\frac{ydx-xdy}{y^2}$&$d\left(\frac{x}{y}\right)$\\[0.5cm] $2xydx+x^2dy$&$d(x^2y)$\\[0.3cm] $\frac{2xydx-x^2dy}{y^2}$&$d\left(\frac{x^2}{y}\right)$\\[0.5cm] $2xy^2dx+2x^2ydy$&$d(x^2y^2)$\\[0.3cm] $\frac{2xy^2dx-2x^2ydy}{y^4}$&$d\left(\frac{x^2}{y^2}\right)$\\[0.5cm] \hline \end{tabular}\end{center} \item {\bfseries Linear:} $$\left[y'+P(x)y=Q(x)\right]:~\left[y\cdot e^{\int P(x)dx}=\int(Q(x)\cdot e^{P(x)dx})dx+C\right]$$ {\bfseries The Bernoulli's Equation:} $$[y'+P(x)y=Q(x)y^n],~[v=y^{-n+1}]:~\left[\frac{dv}{dx}+P(x)(-n+1)v=(-n+1)Q(x)\right]$$ \item {\bfseries Variable Change} $$\left[y'=f(ax+by+c)\right],[ax+bx+c=f(v)]:[v'-a=bf(v)]: \left[\int\frac{dv}{bf(v)+a}=\int dx+C\right]$$ \item {\bfseries Homogeneous} $$\left[y'=\frac{p^n(x,y)}{q^n(x,y)}\right]:~\left[v+xv'=\frac{p(1,v)}{q(1,v)}\right]:~\left[\int \frac{q(1,v)dv}{p(1,v)-vq(1,v)}=\int \frac{dx}{x}+C\right], [y=vx]$$ {\bfseries The Reducible:} $$\left[y'=\frac{ax+by+c}{mx+ny+p}\right]:\left[y'=\frac{aX+bY+ah+bk+c}{mX+nY+mh+nk+p}\right]:\left[y'=\frac{aX+bY}{mX+nY}\right]: \left[\genfrac{0}{0}{0pt}{0}{x=X+h}{y=Y+h}\right]$$ \end{enumerate} \subsection{First Order and Higher Degree} \begin{enumerate} \item {\bfseries Solvable:} $$\left[[p-f_1(x,y)]\ldots [p-f_n(x,y)]=0\right]$$ $$\left[p-f_1(x,y)=0: F_1(x,y,C)=0 \ldots ~ p-f_n(x,y)=0: F_n(x,y,C)=0\right]$$ $$:\left[F_1(x,y,C)F_2(x,y,C)\ldots F_n(x,y,C)=0\right]$$ \item {\bfseries Parametric Solutions:} $$[y=f(x,p)]: \left[p=g(x,p,p')\right]:\left[F(x,p,C)=0\right]: [y=f(x,p),~(x,p,C)=0]_p$$ {\bfseries The Clairaut's Equation:} $$[y=px+f(p)]:[y=Cx+f(C)],~ [x+f'(p)=0, y=cx+f(c)]_p$$ \end{enumerate} \subsection{Linear Second Order Differential Equation} \begin{enumerate} \item {\bfseries Homogeneous:} $$[y''+P(x)y'+Q(x)y=0]:\left[ y=C_1y_1(x)+C_2y_2(x)\right]$$ \item {\bfseries Inhomogeneous:} $$\left[y''+P(x)y'+Q(x)y=f(x)\right]:\left[ y=\{C_1y_1(x)+C_2y_2(x)\}_\text{HOMO}+Y(x)\right]$$ \end{enumerate} \subsubsection{General Relations} \begin{enumerate} \item ~[Fundamental Solutions] $$\left[\frac{dW}{W}=-P(x)dx\right]$$ [where] $$\left[W=y_1(x)y_2'(x)-y_1'(x)y_2(x)\right]$$ [or] $$\left[y_2=y_1(x)\int \frac{e^{[-\int P(x)dx]}}{y_1(x)^2}dx\right]$$ \item ~[Fundamental and Particular Solutions] $$\left[Y=c_1(x)y_1(x)+c_2(x)y_2(x)\right]$$ where, $$\left[c_1'(x)y_1(x)+c_2'(x)y_2(x)=0\right]$$ $$\left[c_1'(x)y_1'(x)+c_2'(x)y_2'(x)=f(x)\right]$$ \end{enumerate} \subsubsection{Constant Coefficients}\par [say the roots of the characteristic equation] $$\left[\varphi(m)=m^2+pm+q=0\right]$$ [are $m_1$ and $m_2$, then] $$[y_0=c_1e^{m_1x}+c_2e^{m_2x}]~[y_0=e^{mx}(c_1+c_2x)]~[y_0=e^{\alpha x}(c_1\cos \beta x+c_2\sin \beta x)]$$ [in a compact way, as $D$ is a linear differential operator then] $$\left[Y=\frac{1}{\varphi(D)}f(x)\right]$$ \begin{enumerate} \item $[f(x)=e^{ax}]$ $$\left[\frac{1}{\varphi(D)}=\frac{1}{\varphi(a)}\right] \left[\frac{1}{\varphi(D)}=x\frac{1}{\varphi'(a)}\right] \left[\frac{1}{\varphi(D)}=x^2\frac{1}{\varphi''(a)}\right]$$ \item $[f(x)=\sin ax$ or $f(x)=\cos ax]$ $$\left[\frac{1}{\varphi(D^2)}=\frac{1}{\varphi(-a^2)}\right]\left[\frac{1}{\varphi(D^2)}=x\frac{1}{\varphi'(-a^2)}\right]\left[\frac{1}{\varphi(D^2)}=x^2\frac{1}{\varphi''(-a^2)}\right]$$ \item $[f(x)=x^m]$ $$[\varphi(D)^{-1}]: \left[(1\pm D)^{-1}=1\mp D+D^2\mp D^3+\ldots,~(1\pm D)^{-2}=1\mp 2D+3D^2\mp 4D^3+\ldots\right]$$ \item $[f(x)=e^{ax}V(x)]$ $$\left[\frac{1}{\varphi(D)}f(x)=e^{ax}\frac{1}{\varphi(D+a)}V(x)\right]$$ $$\left[\frac{1}{\varphi(D)}\cos ax V=\text{RP of } e^{iax}\frac{1}{\phi(D+ia)}V,~\frac{1}{\phi(D)}\sin ax V=\text{IP of } e^{iax}\frac{1}{\varphi(D+ia)}V\right]$$ \end{enumerate} {\bfseries Euler's Equations:} $$[(ax+b)^2y''+(ax+b)py'+qy=f(x)]$$[then, put] $$[ax+b=e^t]:~ \left[(ax+b)y'=ay'_t,~(ax+b)^2y''=a^2\left(y''_t-y_t\right)\right]$$ {\bfseries Zero:} $$\left[(x^2D^2+pxD+q)y=f(x)\right]: \left[(\delta_t ^2+(p-1)\delta_t+q)y=f(e^t)\right]$$ \subsubsection{Power Series, Polynomials} [check, consider, replace: convert, group, equate, test] $$[y''+p(x)y'+q(x)y=0]\left[y=\sum_{n=1}^{\infty}c_n(x-x_0)^n\right]\left[y'=\sum_{n=1}^{\infty}nc_nx^{n-1},~y''=\sum_{n=2}^{\infty}n(n-1)c_nx^{n-2}\right]$$ [in case check, frobenius method] $$\left[p(x)=\frac{P(x)}{(x-x_0)},~q(x)=\frac{Q(x)}{(x-x_0)^2}\right] \left[y_r=(x-x_0)^r\sum_{n=0}^{\infty}c_n(x-x_0)^n=\sum_{n=0}^{\infty}c_n(x-x_0)^{n+r}\right]$$ $$\left[y_r'=\sum_{n=0}^{\infty}(n+r)c_nx^{n+r-1},~y_r''=\sum_{n=0}^{\infty}(n+r)(n+r-1)c_nx^{n+r-2}\right]$$ [$a_0$]: $$\left[r(r-1)+P(x_0)r+Q(x_0)=0\right], [r_1\ge r_2]$$ [then] $$\left[y_1=y_{r_1},~ [\text{nonint sep}]~y_2=y_{r_2},~ [\text{int sep}]~y_2=y_1(x)\ln x+x^{r_2+1}\sum_{n=0}^{\infty}c_nx^n\right]$$ \subsection{Special Functions.} \begin{enumerate} \item {\bfseries Bessel's Functions:}\par [consider an ODE] $$\left[x^2y''+xy'+(x^2-k^2)y=0\right]$$ [and the solutions are sought as] $$\left[y=\sum_{n=0}^{\infty}a_nx^{r+n}\right]$$ [which on replacing to the ODE] $$\left[x^2\sum_{n=0}^{\infty}(r+n)(r+n-1)a_nx^{r+n-2}+x\sum_{n=0}^{\infty}(r+n)a_nx^{r+n-1}+(x^2-k^2)\sum_{n=0}^{\infty}a_nx^{r+n}=0\right]$$ $$\left[\text{or, }\sum_{n=0}^{\infty}[(r+n)(r+n-1)a_n+(r+n)a_n-k^2a_n]x^{m+n}+\sum_{n=0}^{\infty}a_nx^{r+n+2}=0\right]$$ $$\left[\text{or, }\sum_{n=0}^{\infty}[(r+n)^2-k^2]a_nx^{r+n}+\sum_{n=2}^{\infty}a_{n-2}x^{r+n}=0\right]$$ $$\left[\text{or, }(r^2-k^2)a_0x^r+[(r+1)^2-k^2]a_1x^{r+1} +\sum_{n=2}^{\infty}[[(r+n)^2-k^2]a_n+a_{n-2}]x^{r+n}=0\right]$$ [where the indicial equation is] $$\left[r^2-k^2=0\rightarrow r=\pm k\right]$$ [further, equating the identical powers of $x$] \begin{enumerate} \centering \item ~[$[(r+1)^2-k^2]a_1=0\rightarrow a_1=0$] \item $\left[\text{ $[(r+n)^2-k^2]a_n+a_{n-2}=0\rightarrow a_n=\frac{a_{n-2}}{k^2-(r+n)^2}$}\right]$ \end{enumerate} [and expanding the recursion formula] \begin{enumerate} \item ~[Odd Coefficients] $$\left[a_3=\frac{a_1}{k^2-(r+3)^2}=0,~~ a_5=\frac{a_3}{k^2-(r+5)^2}=0, ~a_7=0 \text{ and so on.}\right]$$ \item ~[Even Coefficients] $$\left[a_2=\frac{a_0}{r^2-(r+2)^2}=\frac{-a_0}{2(2r+2)}=\frac{-a_0}{1\cdot 2^2(r+1)}=\frac{-a_0}{1!\cdot 2^2(r+1)}\right]$$ $$\left[a_4=\frac{a_2}{r^2-(r+4)^2}=\frac{-a_2}{4(2r+4)}=\frac{-a_2}{2\cdot 2^2(r+2)}=\frac{a_0}{2!\cdot 2^4(r+1)(r+2)}\right]$$ $$\left[a_6=\frac{a_4}{r^2-(r+6)^2}=\frac{-a_4}{6(2r+6)}=\frac{-a_4}{3\cdot 2^2(r+3)}=\frac{-a_0}{3!\cdot 2^4(r+1)(r+2)(r+3)}\right]$$ $$\ldots$$ [so] $$\left[y_1=y_{r=k}=\sum_{n=0}^{\infty}a_nx^{k+n}=a_0x^k\left[1-\frac{x^2}{1!\cdot 2^2(k+1)}+\frac{x^4}{2!\cdot 2^4(k+1)(k+2)}-\ldots \right]\right]$$ $$\left[=a_0x^k\sum_{n=0}^{\infty}(-1)^n\frac{x^{2n}}{2^{2n}n!(k+1)(k+2)\ldots (k+n)}\right]$$ [Put] $$\left[a_0=\frac{1}{2^k\Gamma(k+1)}\right]$$ [to get] $$\left[y_1=\frac{1}{2^{k}\Gamma(k+1)}x^{k}\sum_{n=0}^{\infty}(-1)^n\frac{x^{2n}}{2^{2n}n!(k+1)(k+2)\ldots (k+n)}\right]$$ $$\left[=\sum_{n=1}^{\infty}(-1)^n\frac{x^{2n+k}}{2^{2n+k}n!\Gamma(k+1)(k+1)(k+2)\ldots (k+n)}\right]$$ $$\left[=\sum_{n=1}^{\infty}(-1)^n\left(\frac{x}{2}\right)^{2n+k}\frac{1}{n!\Gamma(n+k+1)}\right]$$ $$[=J_k(x)]$$ \end{enumerate} [then, depending on $k$] \par {\bfseries CASE 1:} [$k$ is non integer], $$\left[y_2=y|_{r=-k}=\sum_{n=1}^{\infty}(-1)^n\left(\frac{x}{2}\right)^{2n-k}\frac{1}{n!\Gamma(n-k+1)}\right]$$$$\left[=J_{-k}(x)\right]$$ [so] :$$\left[y=C_1J_k(x)+C_2J_{-k}(x)\right]$$ {\bfseries CASE 2:} [general]\par [introducing] $$Y_k=\frac{1}{\sin k \pi}[J_k(x)\cos k\pi-J_{-k}(x)]$$ [which for integers say k(i)] $$\left[Y_k=\lim_{k\to k(i)} Y_k=\lim_{k\to k(i)}\frac{1}{\pi}\left[\frac{d}{dk}J_k(x)-(-1)^n\frac{d}{dk}J_{-k}(x)\right]\right]$$ $$\left[=\frac{2}{\pi}J_k(x)\ln\frac{x}{2}-\frac{1}{\pi}\sum_{n=0}^{\infty}\frac{(-1)^n}{n!(n+k)!}\left(\frac{x}{2}\right)^{2n+k}\phi_\text{sum}-\frac{1}{\pi}\sum_{n=0}^{k-1}\frac{(k-n-1)!}{n!}\left(\frac{x}{2}\right)^{2n-k}\right]$$ [where] $$\left[\phi_\text{sum}=\phi_D(n+k+1)+\phi_D(n+1),~\phi_D(m)=\frac{\Gamma'(m)}{\Gamma(m)} \text{is the Digamma}\right]$$ [or] $$\left[=\frac{2}{\pi}J_k(x)\left(\ln \frac{x}{2}+\gamma\right)-\frac{1}{\pi}\sum_{n=0}^{\infty}\frac{(-1)^n}{n!(n+k)!}\left(\frac{x}{2}\right)^{2n+k}h_\text{sum}-\frac{1}{\pi}\sum_{n=0}^{k-1}\frac{(k-n-1)!}{n!}\left(\frac{x}{2}\right)^{2n-k}\right]$$ [where] $$\left[\gamma=\text{Euler's constant},~h_m=1+\frac{1}{2}+\ldots+\frac{1}{m},~h_0=0\right]$$ $$\left[h_\text{sum}=h_n+h_{n+k},~h_m=\phi_D(m+1)+\gamma\right]$$ [then] $$\left[y=C_1J_k(x)+C_2Y_k(x)\right]$$ [moreover on modification] $$\left[x^2y''+xy'+(x^2-k^2)y=0\right]$$ [solves as] $$\left[y=A_1J_k(-ix)+B_2Y_k(-ix)=C_1I_k(x)+C_2J_k(x)\right]$$ [some of the Bessel's are as follows] $$\left[J_{\frac{1}{2}}(x)=\sqrt{\frac{2}{\pi x}}\sin x,~J_{-\frac{1}{2}}(x)=\sqrt{\frac{2}{\pi x}}\cos x\right]$$$$\left[Y_0(x)=\frac{2}{\pi}\left[J_0(x)\left(\ln \frac{x}{2}+\gamma\right)-\sum_{n=1}^{\infty}\frac{(-1)^nh_n}{(n!)^2}\left(\frac{x}{2}\right)^{2n}\right]\right]$$ $$\left[I_0(x)=\sum_{n-0}^{\infty}\frac{x^{2k}}{4^k(k!)^2},~K_0(x)=\int_0^{\infty}\frac{\cos(xt)}{\sqrt{1+t^2}}dt\right]$$ {\bfseries Properites:} \begin{enumerate} \item ~[Recurrence] $$\left[J_{k+1}(x)=\frac{2k}{x}J_k(x)-J_{k-1}(x)\right]$$ $$[\text{Tell me about your two friends, I will tell about you}]$$ $$\left[J_k'(x)=-\frac{k}{x}J_k(x)+J_{k-1}(x)\right]$$ $$[\text{Tell me about yourself and your friend, I will tell your future}]$$ $$\left[[x^{\pm k}J_k(x)]'=\pm x^{\pm k}J_{k\mp 1}(x)\right]$$ $$[\text{Your aims decide your destiny}]$$ \item ~[Generator] $$\left[e^{\frac{x}{2}\left(t-\frac{1}{t}\right)}=\sum_{k=-\infty}^{\infty}J_k(x)t^k\right]$$ \end{enumerate} \item {\bfseries Legendre's Functions:}\par [consider a ODE] $$\left[(1-x^2)y''-2xy'+k(k+1)y=0\right]$$ [and the solutions are sought as] $$\left[y=\sum_{n=-0}^{\infty}a_nx^{n}\right]$$ [which on replacing to the ODE] $$\left[(1-x^2)\sum_{n=2}^{\infty}n(n-1)a_nx^{n-2}-2x\sum_{n=1}^{\infty}na_nx^{n-1}+k(k+1)\sum_{n=0}^{\infty}a_nx^{n}=0\right]$$ $$\left[\text{or, }\sum_{n=2}^{\infty}n(n-1)a_nx^{n-2}-\sum_{n=2}^{\infty}n(n-1)a_nx^{n}-\sum_{n=1}^{\infty}2na_nx^{n}+\sum_{n=0}^{\infty}k(k+1)a_nx^{n}=0\right]$$ $$\left[\text{or, }\sum_{n=2}^{\infty}[n(n-1)a_{n}+k(k+1)a_{n-2}]x^{n-2}-\sum_{n=4}^{\infty}(n-2)(n-3)a_{n-2}x^{n-2}-\sum_{n=3}^{\infty}2(n-2)a_{n-2}x^{n-2}=0\right]$$ $$\left[\text{or, }\sum_{n=4}^{\infty}[n(n-1)a_{n}+k(k+1)a_{n-2}-(n-2)(n-3)a_{n-2}-2(n-2)a_{n-2}]x^{n-2}=0\right]$$ $$\left[\& 2a_2+k(k+1)a_0+(6a_3+k(k+1)a_1)x-2a_1x=0\right]$$ [further, equating the identical powers of $x$] \begin{enumerate} \centering \item $\left[2a_2+k(k+1)a_0=0\rightarrow a_2=\frac{-k(k+1)}{2}a_0\right]$ \item $\left[6a_3+k(k+1)a_1-2a_1=0\rightarrow a_3=\frac{2-k(k+1)}{6}a_1=-\frac{(k-1)(k+2)}{3!}a_1\right]$ \item $\left[n(n-1)a_{n}+k(k+1)a_{n-2}-(n-1)(n-2)a_{n-2}=0\right]$ $$\left[\rightarrow a_n=\frac{(n-1)(n-2)-k(k+1)}{n(n-1)}a_{n-2}\right]$$ \end{enumerate} [and expanding the recursion formula] $$\left[a_4=\frac{(k-2)k(k+1)(k+3)}{4!}a_0\right]$$ $$\left[a_5=\frac{(k-3)(k-1)(k+2)(k+4)}{5!}a_1 ~~\text{and so on}\right]$$ [so] $$\left[y=a_0y_1(x)+a_2y_2(x)\right]$$ [where] $$\left[y_1=1-\frac{k(k+1)}{2!}x^2+\frac{(k-2)k(k+1)(k+3)}{4!}x^4-+\ldots\right]$$ $$\left[=1+\sum_{n=1}^{\infty}(-1)^n\frac{[(k-2n+2)\ldots(k-2)k][(k+1)(k+3)\ldots(k+2n-1)]}{2n!}x^{2n}\right]$$ $$\left[y_2=x-\frac{(k-1)(k+2)}{3!}x^3+\frac{(k-3)(k-1)(k+2)(k+4)}{5!}x^5-+\ldots\right]$$ $$\left[=x+\sum_{n=1}^{\infty}(-1)^n\frac{[(k-2n+1)\ldots(k-3)(k-1)][(k+2)(k+4)\ldots(k+2n)]}{(2n+1)!}x^{2n+1}\right]$$ {\bfseries NOTE:}\par [when $k$ is even, $y_1(x)$ and when $k$ is odd, $y_2(x)$ are polynomials] \par [for which the coefficients are chosen as] $$\left[a_k=\frac{(2k)!}{2^k(k!)^2}=\frac{1.3.5\ldots(2k-1)}{k!}\right]$$ [then by recursion formula] $$\left[a_{k-2n}=(-1)^n\frac{(2k-2n)!}{2^kn!(k-n)!(k-2n)!}\right]$$ [so] $$\left[P_k(x)=\sum_{n=0}^{\frac{n}{2}\text{ or }\frac{n-1}{2}}(-1)^n\frac{(2k-2n)!}{2^kn!(k-n)!(k-2n)!}x^{k-2n}\right]$$ [moreover, by Leibniz rule] $$\left[(1-x^2)P_k''-2xP_k'+k(k+1)P_k=0 \right]$$ $$\left[\text{or, }(1-x^2)P_k^{(m+2)}-2(m+1)xP_k^{(m+1)}+[k(k+1)-m^2-m)]P^{(m)}_k=0\right]$$ [and substituting] $$\left[P^{(m)}_k=u=P^m_k(1-x^2)^{-\frac{m}{2}}\right]$$ [gives] $$\left[(1-x^2)(P^m_k)''-2x(P^m_k)'+\left[k(k+1)-\frac{m^2}{1-x^2}\right]P^m_k=0\right]$$ [where] $$\left[P^m_k=(1-x^2)^{\frac{m}{2}}\frac{d^mP_k}{dx^m}\right]$$ [further by Rodrigues] $$\left[P^m_k=\frac{(-1)^m}{2^kk!}(1-x^2)^\frac{m}{2}\frac{d^{k+m}}{dx^{k+m}}[x^2-1]^k\right]$$ [so] $$\left[P_k^{-m}=(-1)^m\frac{(k-m)!}{(k+m)!}P_k^m\right]$$ [some of Legendre's are as follows] $$[P_0(x)=1]$$ $$[P_1(x)=x]$$ $$\left[P_2(x)=\frac{1}{2}(3x^2-1)\right]$$ $$\left[P_3(x)=\frac{1}{2}(5x^3-3x)\right]$$ $$\left[P_4(x)=\frac{1}{8}(35x^4-30x^2+3)\right]$$ {\bfseries Properties:} \begin{enumerate} \item ~[Recurrence] $$\left[P_{k+1}(x)=\frac{2k+1}{k+1}xP_k(x)-\frac{k}{k+1}P_{k-1}(x)\right]$$ $$[\text{Tell me about your two friends, I will tell about you}]$$ $$\left[P'_k(x)=-\frac{k}{1-x^2}xP_{k}(x)+\frac{k}{1-x^2}P_{k-1}(x)\right]$$ $$[\text{Tell me about yourself and your friend, I will tell your future}]$$ $$\left[P_{k}'(x)=\frac{k}{x}P_k(x)+\frac{1}{x}P'_{k-1}\right]$$ $$[\text{Tell me about yourself and your friend's future, I will tell your future}]$$ \item ~[Generator] $$\left[\frac{1}{\sqrt{1-2xt+t^2}}=\sum_{k=0}^{\infty}P_n(x)t^k\right]$$ $$\left[P_k(x)=\frac{1}{2^kk!}\frac{d^k}{dx^k}(x^2-1)^k\right]$$ \end{enumerate} \end{enumerate} \section{The Laplace Transform} [The Laplace Transform] $$\left[F(s)=F(\sigma+i\omega)=L(f(t))=\int_{0}^{\infty}f(t)e^{-st}dt=\lim_{b\to \infty}\int_{0}^{b}f(t)e^{-st}dt\right]$$ [of the function $f(t)$ is an exponentially paired improper integral which transforms the object function $f(t)$ to the image function $F(s)$ and the integral is said to converge for the sets of $s$ for which the limit exists, to gain which] \par $$\left[f(t)=L^{-1}[F(s)]=\frac{1}{2\pi i}\int_{c-i\infty}^{c+i\infty}e^{st}F(s)ds\right]$$ [of the function $F(s)$ is the contour integral on the convergence of $s$, on right of the sets of singularities which brings the object function $f(t)$ from the image function $F(s)$ such that] $$\left[f(t)=L^{-1}L[f(t)] \text{ on } [0,\infty)\right]$$ [The adequate conditions of sufficiency] $$[\text{p,w continuity on $[0,\infty)$,~$O(e^{\alpha t})$:~ on $\sigma: [\alpha,\infty)$}]$$ \subsection{Properties} \begin{enumerate} \item {[Linearity]} $$\left[L[af(t)+bg(t)]=aL[f(t)]+bL[g(t)]\right]$$ \item {[Scaling]} $$\left[L[f(at)]=\frac{1}{a}F\left(\frac{s}{a}\right),~~\frac{1}{a}L[f\left(\frac{t}{a}\right)]=F(as)\right]$$ \item {[Shifting, Exponentiation} $$\left[L[f(t-a)u(t-a)]=e^{-as}F(s),~~L[e^{at}f(t)]=F(s-a)\right]$$ \item {[Derivatives, Multiplication by t]} $$\left[L[f'(t)]=sL[f(t)]-f(0^+),~ -L[tf(t)]=F'(s)\right]$$ $$\left[\lim_{t\to 0^+}f(t)=\lim_{s\to \infty}sF(s),~\lim_{t\to \infty}f(t)=\lim_{s\to 0}sF(s)\right]$$ $$\left[L[f^{(n)}(t)]=s^nF(s)-s^{n-1}f(0^+)-s^{n-2}f'(0^+)-\ldots-f^{n-1}(0^+),~L[(-t)^nf(t)]=F^{(n)}(s)\right]$$ \item {[Integrals, Division by t]} $$\left[L\left[\int_{0}^{t}f(t)dt\right]=\frac{1}{s}F(s),~L\left[\frac{f(t)}{t}\right]=\int_s^{\infty}F(s)ds\right]$$ \item {[Convolution]} $$\left[L(f*g)(t)=L\left(\int_{0}^{t}f(\tau)g(t-\tau)d\tau\right)=F(s)G(s)\right]$$ \end{enumerate} \subsection{Find Transforms} \begin{enumerate} \item {[Table Transforms]} \begin{center} \begin{tabular}{|c|c|c|} \hline Object Functions $x(t)$ & Image Functions $X(z)$&Region of Convergence ($\sigma$) \\[0.2cm] $t^a~[a>-1]$ & $\frac{\Gamma(a+1)}{s^{a+1}}$&$(0,\infty]$ \\[0.5cm] $\sin at$ & $\frac{a}{s^2+a^2}$&$(0,\infty]$ \\[0.5cm] $\cos at$ & $\frac{s}{s^2+a^2}$&$(0,\infty]$ \\[0.5cm] $\sinh at$ & $\frac{a}{s^2-a^2}$&$(|a|,\infty]$ \\[0.5cm] $\cosh at$ & $\frac{s}{s^2-a^2}$&$(|a|,\infty]$ \\[0.5cm] $\delta(t)$ & $1$&$*$ \\[0.5cm] \hline \end{tabular} \end{center} \item {[Heaviside Expansion]} $$\left[\frac{N_{<(n+1)}(s)}{(s-a)^{n+1}}=\frac{A_0}{s-a}+\frac{A_1}{(s-a)^2}+\ldots+\frac{A_n}{(s-a)^{n+1}};~A_k=\frac{N^{n-k}(a)}{(n-k)!k!}\right]$$ \item {[Periodic Functions]} $$\left[L[f(t)]=\frac{1}{1-e^{-sT}}{\int_{OaP}e^{-st}f(t)dt}\right]$$ \item {[Infinite Series]} $$\left[L^{-1}[F(s)]=L^{-1}\left[\sum_{n=0}^{\infty}\frac{a_n}{s^{n+1}}\right]=\sum_{n=0}^{n}\frac{a^n}{n!}t^n=f(t)\right]$$ \end{enumerate} \section{Partial Differential Equations} \subsection{Heat Equation} [PDE] $$\left[u_t=c^2u_{xx};~D_c([0,L]\times[0,\infty])\right]$$ [Boundary Conditions] $$\left[u(0,t)=u(L,t)=0\right]$$ [Initial Conditions] $$\left[u(x,0)=f(x)\right]$$ [Consider the solution given by] $$\left[u(x,t)=F(x)G(t)\right]$$ [Then] $$\left[u_t=F\dot{G},~u_{xx}=F''G\right]$$ [which on replacing to the PDE] $$\left[\frac{\dot{G}}{c^2G}=\frac{F''}{F}=-k^2\right]$$ [gives two ODEs] $$F''+k^2F=0,~\dot{G}+c^2k^2G=0$$ [Hence] $$\left[u(x,t)=(C_1\cos kx+C_2\sin kx)(C_3e^{c^2k^2t})\right]$$ [from BCs] $$\left[u(0,t)=0:~ C_1=0,~u(L,t)=0: k=\frac{n\pi}{L}\right]$$ [Finally] $$\left[u(x,t)=\sum_{n=1}^{\infty}A_n e^{\frac{c^2n^2\pi^2}{L^2}t} sin\frac{n \pi}{L}x\right]$$ [where from ICs] $$\left[u(x,0)=\sum_{n=1}^{\infty}A_n\sin\frac{n \pi}{L}x=f(x):~ A_n=\frac{2}{L}\int_{0}^{L}f(x)\sin\frac{n\pi x}{L}dx\right]$$ \subsection{Wave Equation:} \subsubsection{One Dimensional} [PDE]: $$\left[u_{tt}=c^2u_{xx};~D_c([0,L]\times[0,\infty])\right]$$ [Boundary Conditions]: $$\left[u(0,t)=u(L,t)=0\right]$$ [Initial Conditions]: $$\left[u(x,0)=f(x),~ u_t(x,0)=g(x)\right]$$ [Consider the solution given by] $$\left[u(x,t)=F(x)G(t)\right]$$ [Then] $$\left[u_{tt}=F\ddot{G} \text{ and } u_{xx}=F''G\right]$$ [which on replacing to the PDE] $$\left[\frac{\ddot{G}}{c^2G}=\frac{F''}{F}=-k^2\right]$$ [gives two ODEs] $$\left[F''+k^2F=0,~\ddot{G}+c^2k^2G=0\right]$$ [Hence] $$\left[u(x,t)=(C_1\cos kx+C_2\sin kx)(C_3\cos ckt+C_4\sin ckt)\right]$$ [from BCs] $$\left[u(0,t)=0: C_1=0,~u(L,t)=0: k=\frac{n\pi}{L}\right]$$ [finally] $$\left[u(x,t)=\sum_{n=1}^{\infty}\left[A_n\cos \frac{cn\pi}{L} t+B_n\sin \frac{cn\pi}{L} t\right]sin\frac{n \pi}{L}x\right]$$ [Now, from ICs] $$\left[u(x,0)=\sum_{n=1}^{\infty}A_n\sin\frac{n \pi}{L}x=f(x):~ A_n=\frac{2}{L}\int_{0}^{L}f(x)\sin\frac{n\pi x}{L}dx\right]$$ [and] $$\left[u_t(x,0)=\sum_{n=1}^{\infty}B_n\frac{cn\pi}{L}\sin\frac{n \pi}{L}x=g(x):~ B_n=\frac{2}{cn\pi}\int_{0}^{L}g(x)\sin\frac{n\pi x}{L}dx\right]$$ \subsubsection{Two Dimensional} [PDE]: $$\left[u_{tt}=c^2\left(u_{xx}+u_{yy}\right);~ D_c([0,a]\times[0,b]\times[0,\infty])\right]$$ [Boundary Conditions]: $$\left[u(\partial D_c)=0\right]$$ [Initial Conditions]: $$\left[u(x,y,0)=f(x,y),~ u_t(x,y,0)=g(x,y)\right]$$ [Consider the solution given by] $$\left[u(x,t)=F(x,y)G(t)\right]$$ [then] $$\left[u_{xx}=F_{xx}G,~ u_{yy}=F_{yy}G,~ u_{tt}=F\ddot{G}\right]$$ [which on replacing to the PDE] $$\left[\frac{\ddot{G}}{c^2G}=\frac{F_{xx}+F_{yy}}{F}=-k^2\right]$$ [gives two DEs] $$\left[F_{xx}+F_{yy}+k^2F=0,~\ddot{G}+c^2k^2G=0\right]$$ [Then solving, let] $$[F(x,y)=P(x)Q(y)]$$ [which gives the ODEs] $$\left[P_{xx}+\alpha^2P=0,~Q_{yy}+\beta^2Q=0;~\alpha^2+\beta^2=k^2\right]$$ [Hence] $$\left[F(x,y)=(C_1\cos \alpha x+C_2\sin \alpha x)(C_3\cos\beta y+C_4\sin\beta y)\right]$$ [from BCs] $$\left[u(\partial D_c)=0:~ P(x)Q(y)=0:~ P(0)=0, P(a)=0, Q(0)=0, Q(b)=0\right]$$ [respectively implying] $$\left[C_1=0,~\alpha=\frac{n\pi}{a},~~ C_3=0,~ \beta=\frac{m\pi}{b}\right]$$ [finally] $$\left[F(x,y)=C_2C_4\sum_{n_1=1}^{\infty}\sum_{m=1}^{\infty}\sin\left(\frac{n\pi x}{a}\right)\sin\left(\frac{m\pi y}{b}\right)\right]$$ [and] $$\left[G(t)=\sum_{m=1}^{\infty}\sum_{m=1}^{\infty}C_5\cos\lambda_{nm} t+C_6\sin\lambda_{nm}t,~\lambda_{nm}=c\pi\sqrt{\frac{n^2}{a^2}+\frac{m^2}{b^2}}\right]$$ [hence] $$\left[u(x,y,t)=\sum_{n=1}^{\infty}\sum_{m=1}^{\infty}\left[A_{nm}\cos\lambda_{nm} t+B_{nm}\sin\lambda_{nm}t\right]\sin\left(\frac{n\pi x}{a}\right)\sin\left(\frac{m\pi y}{b}\right)\right]$$ [Now, from ICs] $$\left[u(x,y,0)=\sum_{n=1}^{\infty}\sum_{m=1}^{\infty}A_{nm}\sin\left(\frac{n\pi x}{a}\right)\sin\left(\frac{m\pi x}{b}\right)=f(x,y)\right]$$ [so] $$\left[A_{nm}=\frac{4}{ab}\int_{0}^{b}dy\int_{0}^{a}f(x,y)\sin\left(\frac{n\pi x}{a}\right)\sin\left(\frac{m\pi x}{b}\right)dx\right]$$ ] [and] $$\left[u_t(x,y,0)=\sum_{n=1}^{\infty}\sum_{m=1}^{\infty}B_{nm}\sin\left(\frac{n\pi x}{a}\right)\sin\left(\frac{m\pi x}{b}\right)=g(x,y)\right]$$[so] $$\left[B_{nm}=\frac{4}{ab\lambda_{nm}}\int_{0}^{b}dy\int_{0}^{a}g(x,y)\sin\left(\frac{n\pi x}{a}\right)\sin\left(\frac{m\pi x}{b}\right)dx\right]$$ \subsection{Laplace Equations:} \subsubsection{Three Dimensional in Cartesian} [PDE] $$\left[u_{xx}+u_{yy}+u_{zz}=0;~D_c([0,a]\times[0,b]\times[0,c])\right]$$ [Boundary Conditions] $$\left[u(x,y,c)=f(x,y),~0; \text{ elsewhere}\right]$$ [Consider the solution is given by] $$\left[u(x,y)=F(x)G(y)H(z)\right]$$ [then] $$\left[u_{xx}=F_{xx}GH,~ u_{yy}=FG_{yy}H,~ u_{zz}=FGH_{zz}\right]$$ [which on replacing to the PDE] $$\left[\frac{1}{F}F_{xx}+\frac{1}{G}G_{yy}+\frac{1}{H}H_{zz}=0\right]$$ [gives three ODEs] $$\left[F_{xx}+{\alpha}^2F=0,~G_{yy}+{\beta}^2F=0,~H_{zz}-({\alpha^2+\beta^2})H=0\right]$$ [Hence] $$\left[u(x,y,z)=(C_1\cos \alpha x+C_2\sin \alpha x)(C_3\cos \beta y+C_4\sin \beta y)(C_5e^{\sqrt{\alpha^2+\beta^2}z}+C_6e^{-\sqrt{\alpha^2+\beta^2}z})\right]$$ [Constants] $$\left[C_1=0,~\alpha=\frac{n_1\pi}{a},~C_3=0,~\beta=\frac{n_2\pi}{b}, C_6=-C_5\right]$$ [Finally] $$\left[u(x,y,z)=\sum_{n_1=1}^{\infty}\sum_{n_2=1}^{\infty}A_{n_1n_2}\sin\frac{n_1 \pi}{a}x \sin\frac{n_2 \pi}{b}y\sinh \lambda_{n_1n_2}z,~\lambda_{n_1n_2}=\pi\sqrt{\frac{n_1^2}{a^2}+\frac{n_2^2}{b^2}}\right]$$ [from BCs] $$\left[u(x,y,c)=\sum_{n_1=1}^{\infty}\sum_{n_2=1}^{\infty}A_{n_1n_2}\sin\frac{n_1 \pi}{a}x \sin\frac{n_2 \pi}{b}y\sinh \lambda_{n_1n_2}c=f(x,y)\right]$$ $$\left[: A_{n_1n_2}=\frac{4}{ab\sinh \lambda_{n_1n_2}c}\int_{0}^{b}dy\int_{0}^{a}f(x,y)\sin\left(\frac{n_1\pi x}{a}\right)\sin\left(\frac{n_2\pi x}{b}\right)dx\right]$$ \subsubsection{Three Dimensional in Spherical} [PDE]$$\left[\nabla^2 u=\frac{1}{r^2}\frac{\partial}{\partial r}\left[r^2\frac{\partial u}{\partial r}\right]+\frac{1}{r^2\sin\theta}\frac{\partial}{\partial \theta}\left[\sin\theta\frac{\partial u}{\partial \theta}\right]+\frac{1}{r^2\sin^2\theta}\frac{\partial^2u}{\partial \phi^2}\right]$$ [on] $$\left[D_c([0,R]\times[0,\pi]\times[0,2\pi))\right]$$ [Boundary Conditions] $$\left[u(R,\theta,\phi)=f(\theta,\phi)\right]$$ [Consider the solution is given by] $$\left[u(r,\theta,\phi)=U(r)P(\theta)Q(\phi)\right]$$ [then] $$\left[\frac{\partial^2u}{\partial r^2}=\frac{d^2U}{dr^2}PQ,~ \frac{\partial^2u}{\partial \theta^2}=U\frac{d^2P}{d\theta^2}Q \text{ and } \frac{\partial^2u}{\partial \phi^2}=UP\frac{d^2Q}{d\phi^2}\right]$$ [which on replacing to the PDE gives three ODEs] \begin{enumerate} \centering \item $\left[\frac{d^2Q}{d \phi^2}+{m}^2Q=0\right]$ \item $\left[r^2\frac{d^2U}{d r^2}+r\frac{dU}{dr}-l(l+1)=0\right]$ \item $\left[\frac{d^2P}{d\theta^2}+\frac{\cos\theta}{\sin\theta}\frac{dP}{d\theta}+\left[l(l+1)-\frac{m^2}{\sin^2\theta}\right]=0\right]$ \end{enumerate} [Hence] $$\left[u(r,\theta, \phi)=(C_1\cos m \phi+C_2\sin m \phi)(C_3r^{l}+C_4r^{-l-1})C_5P^m_l(\cos\theta)\right]$$ [Constants] $$\left[l: 0,1,2,\ldots,~m: -l,-(l-1),\ldots,-1,0,1,\ldots,l-1,l\right]$$ $$\left[C_3=0 [\text{for exterior region}]\text{ or } C_4=0 [\text{for interior region}]\right]$$ [Finally] $$\left[u(r,\theta,\phi)=\sum_{l=0}^{\infty}\sum_{m=-l}^{l}A_{lm}[r^l ~|~ r^{-l-1}]Y_l^m(\theta,\phi)\right]$$ [where] $$\left[Y_l^m(\theta,\phi)=\sqrt{\frac{2l-1}{4\pi}\frac{(l+m)!}{(l-m)!}}P^m_l(\cos\theta)e^{im\phi}\right]$$ [from equational BCs] $$\left[u(R,\theta,\phi)=\sum_{l=0}^{\infty}\sum_{m=-l}^{l}A_{lm}R^lY_l^m(\theta,\phi)=f(\theta,\phi)\right]$$ $$\left[: A_{lm}=\frac{1}{R^l}\int_{0}^{2\pi}d\phi\int_{0}^{\pi}f(\theta,\phi)~[\text{conjugate of }Y^m_l(\theta,\phi)]~\sin\theta d\theta\right]$$ [Properties of Spherical Harmonics] \begin{itemize} \centering \item $\left[Y_l^{-m}(\theta,\phi)=\text{conjugate of }(-1)^mY_l^m(\theta,\phi)\right]$ \item $\left[Y_{00}=\frac{1}{\sqrt{4\pi}}\right]$ \item $\left[Y_{11}=-\sqrt{\frac{3}{8\pi}}\sin\theta e^{i\phi}, Y_{10}=\sqrt{\frac{3}{4\pi}}\cos \theta\right]$ \item $\left[Y_{22}=-\frac{1}{4}\sqrt{\frac{15}{2\pi}}\sin^2\theta e^{2i\phi},Y_{21}=-\sqrt{\frac{15}{8\pi}}\sin\theta\cos\theta e^{i\phi},Y_{20}=\sqrt{\frac{5}{4\pi}}\left(\frac{3}{2}\cos^2\theta-\frac{1}{2}\right)\right]$ \item $\left[Y_{l0}=\sqrt{\frac{2l+1}{4\pi}}P_l(\cos\theta)\right]$ \item $\left[P_l(\cos\gamma)=\frac{4\pi}{2l+1}\sum_{m=-l}^{m=l}Y_l^m(\theta,\phi) ~[\text{conjugate of }Y^m_l(\theta',\phi')]\right]$\par [where, $\gamma$ is the angle between the positions of $(r,\theta,\phi)$ and $(r',\theta',\phi')$] \end{itemize} \subsubsection{Three Dimensional in Cylindrical} [PDE]$$\left[\nabla^2 U=\frac{1}{s}\frac{\partial}{\partial s}\left[s\frac{\partial U}{\partial s}\right]+\frac{1}{s^2}\frac{\partial^2U}{\partial \phi^2}+\frac{\partial^2 U}{\partial z^2}\right]$$ [on] $$\left[D_c([0,a]\times[0,2\pi]\times[0,L])\right]$$ [Boundary Conditions] $$\left[u(s,\phi,L)=f(s,\phi),~0;\text{ elsewhere}\right]$$ [Consider the solution is given by] $$\left[u(s,\phi,z)=U(s)Q(\phi)Z(z)\right]$$ [then] $$\left[\frac{\partial^2u}{\partial s^2}=\frac{d^2U}{ds^2}QZ,~ \frac{\partial^2u}{\partial \phi^2}=U\frac{d^2Q}{d\phi^2}Z,~ \frac{\partial^2u}{\partial z^2}=UQ\frac{d^2Z}{dz^2}\right]$$ [which on replacing to the PDE gives three ODEs] \begin{enumerate} \centering \item $\left[\frac{d^2Q}{d \phi^2}+{m}^2Q=0\right]$ \item $\left[\frac{d^2Z}{d z^2}-k^2Z=0\right]$ \item $\left[s^2\frac{d^2U}{ds^2}+s\frac{dU}{ds}+(k^2s^2-m^2)U=0\right]$ \end{enumerate} [Hence] $$\left[u(s,\phi, z)=(C_1e^{-kz}+C_2e^{kz})(C_3\cos m\phi+C_4\sin m\phi)[C_5J_m(ks)+C_6Y_m(ks)]\right]$$ [Constants] $$\left[m=0,1,2,\ldots,~k=\frac{1}{a}[\text{nth roots of $J_m$]}\right]$$ $$\left[C_5=0 ~[\text{for exterior region}],~C_6=0~\text{[for interior region]},~C_2=-C_1\right]$$ [Finally] $$\left[u(s,\phi,z)=\sum_{m=0}^{\infty}\sum_{n=1}^{\infty}(A_{mn}\cos m\phi+B_{mn}\sin m\phi)J_m(k_{mn}s)\sinh (k_{mn}z)\right]$$ [from BCs] $$\left[u(s,\phi,L)=\sum_{m=0}^{\infty}\sum_{n=1}^{\infty}(A_{mn}\cos m\phi+B_{mn}\sin m\phi)J_m(k_{mn}s)\sinh (k_{mn}L)=f(s,\phi)\right]$$ $$\left[:A_{mn}=\frac{2}{\sinh (k_{mn}L)}\frac{1}{\pi a^2J_{m+1}^2(k_{mn}a)}\int_{0}^{2\pi}\cos m\phi d\phi\int_{0}^{a}sf(s,\phi)J_m(k_{mn}s) ds\right]$$ $$\left[:B_{mn}=\frac{2}{\sinh (k_{mn}L)}\frac{1}{\pi a^2J_{m+1}^2(k_{mn}a)}\int_{0}^{2\pi}\sin m\phi d\phi\int_{0}^{a}sf(s,\phi)J_m(k_{nm}s) ds\right]$$ \chapter{Complex Calculus} \section{Elementary Functions} The elementary functions are defined in such way that they reduce to the functions in pre calculus when the imaginary part is zero so, share the analytical properties and correspond to the elements of calculus. $$f(z)=u(x,y)+iv(x,y)=u(r,\theta)+iv(r,\theta)$$ `\begin{enumerate} \item {\bfseries Polynomials and Rationals:}\par [just by basic addition and multiplication of complex numbers, polynomials], $$w=a_0+a_1z+a_2z^2+\ldots+a_nz^n$$$$[C\xrightarrow[]{}C]$$ [also, including division, the rationals,] $$w=\frac{a_0+a_1z+a_2z^2+\ldots+a_nz^n}{b_0+b_1z+b_2z^2+\ldots+b_mz^m}$$ $$[C\xrightarrow{}C, \text{ if } b_0=0 \text{ then, } C-\{0\}\xrightarrow{} C-\{0\}]$$ \item {\bfseries Exponentials and Logarithms:}\par [based on the euler's formula, the exponentials,] $$w=e^z=e^{\text{re}(z)}e^{i~\text{im}(z)}=e^{\text{re}(z)}[\cos(\text{im}(z))+i\sin(\text{im}(z))]$$ $$[C\xrightarrow{} C-{\{0\}}]$$ [since the exponential is periodic of $2\pi i$, the multi-valued inverse logarithmic] $$w=\ln z=\ln |z|+i\text{arg (z)}=\ln |z|+i(\Theta+2n\pi)~~[n=0,\pm 1,\pm 2,\ldots]$$ $$[C-\{0\}\xrightarrow{}C]$$ {\bfseries NOTE:} If $\text{arg}(z)$ is restricted to $a<\text{arg}(z)<a+2\pi$ known as branch, then, defines the logarithms which $a=-\pi$, is the principal branch, $$w=\Ln z=\ln|z|+i\theta~~[-\pi<\theta<\pi]$$ \item {\bfseries Complex Exponents}\par [using the multi-valued logarithms, exponents] $$w=z^c=e^{c\ln z}$$ $$[C-\{0\}\xrightarrow{}C-\{0\}]$$ also, $$w=c^z=e^{z\ln c}~~[c\ne 0]$$ $$[C\xrightarrow{}C-\{0\}]$$ \item {\bfseries Trigonometrics} \par [from the linear combination of the exponentials, trigonometric,] $$\sin z=\frac{e^{iz}-e^{-iz}}{2i},~~\cos z=\frac{e^{iz}+e^{-iz}}{2}$$ $$[C\xrightarrow[]{}C]$$ $$\tan z=\frac{\sin z}{\cos z}$$ \item {\bfseries Hyperbolic} \par [similarly, hyperbolic] $$\sinh z=\frac{e^z-e^{-z}}{2},~~\cosh z=\frac{e^z+e^{-z}}{2}$$ $$[C\xrightarrow[]{}C]$$ $$\tanh z=\frac{\sinh z}{\cosh z}$$ which related to the trig functions by, $$\sinh (iz)=i\sin z,~~\sin (iz)=i\sinh z$$ $$\cosh (iz)=\cos z,~~\cos (iz)=\cosh z$$ \item {\bfseries Inverse} \par [on the definition of the trig functions, the inverses] $$\sin^{-1}x=-i\ln\left[iz+(1-z^2)^\frac{1}{2}\right],~~\cos^{-1}x=-i\ln\left[z+i(1-z^2)^\frac{1}{2}\right]$$ $$[C\xrightarrow{}C]$$ $$\tan^{-1}x=\frac{i}{2}\ln\frac{i+z}{i-z}$$ similarly, $$\sinh^{-1}x=\ln\left[z+(z^2+1)^\frac{1}{2}\right],~~\cosh^{-1}x=\ln\left[z+(z^2-1)^\frac{1}{2}\right]$$ $$[C\xrightarrow[]{}C]$$ $$\tanh^{-1}x=\frac{1}{2}\ln\frac{1+z}{1-z}$$ \end{enumerate} \section{The Limit} {\bfseries Definition:}\par The Limit, $$\lim_{z\to z_0}f(z)=w_0$$of the function $f(z)$ at the point $z=z_0$ equals $L$ implies that for each positive constant $\epsilon$ there is a positive constant $\delta$ such that $$|f(z)-w_0|<\epsilon$$whenever,$$|z-z_0|<\delta~[z\ne z_0]$$ and is said that limit of the function $f(z)$ as $z$ approaches the point $z=z_0$ is $L$.\par {\bfseries Rectangular:}(if and only if) $$\lim_{(x,y)\to (x_0,y_0)}u(x,y)=u_0 \text{ and }\lim_{(x,y)\to (x_0,y_0)}v(x,y)=v_0$$ \subsection{Limits Involving Infinity} It is convenient to include the point at infinity to the complex plane and consider limits involving it, to visualize which the complex plane can be thought as passing through the equator of a unit sphere centered at the point $z=0$ such that each point is determined by the intersection of the line through the north pole and with the surface thus, \begin{itemize} \centering \item $\lim_{z\to z_0}f(z)=\infty \text{ if and only if } \lim_{z\to z_0}\frac{1}{f({z})}=0$ \item $\lim_{z\to \infty}f(z)=w_0 \text{ if and only if } \lim_{{z}\to 0}f\left(\frac{1}{z}\right)=w_0$ \item $\lim_{z\to \infty}f(z)=\infty \text{ if and only if } \lim_{z\to 0}\frac{1}{f(\frac{1}{z})}=0$ \end{itemize} \section{The Continuity} {\bfseries Definition:}\par The Continuity, $$\lim_{z\to z_0}f(z)=z_0$$ of the function $f(z)$ at the point $z=z_0$ implies that the functional value and the limiting value of the function $f(z)$ at the point $z=z_0$ and the function $f(z)$ is said to be continuous at the point $z=z_0$.\par {\bfseries Rectangular:} (if and only if) $$\text{if }u(x,y) \text{ and }v(x,y) \text{ are continuous at }(x_0,y_0)$$ \section{The Derivative} {\bfseries Definition:} The derivative, $$\left.\frac{d}{dz}f(z)\right|_{z=z_0}=\lim_{h\to 0}\frac{f(z_0+h)-f(z_0)}{h}$$ or, $$\left.\frac{d}{dz}f(z)\right|_{z=z_0}=\lim_{z\to z_0}\frac{f(z_0)-f(z_0)}{z-z_0}$$ of the function $f(z)$ at the point $z=z_0$, if limit exists, implies the rate of change of the function $f(x)$ with respect to $x$ and the function $f(z)$ is said to be differentiable at the point $z=z_0$ \par {\bfseries Rectangular: [Cauchy Riemann]} $$=u_x(x_0,y_0)+iv_x(x_0,y_0)=-i[u_y(x_0,y_0)+iv_y(x_0,y_0)]$$ $$[\text{if } (x_0,y_0)\ne (0,0)]=e^{-i\theta}[u_r(r_0,\theta_0)+iv_r(r_0,\theta_0)]=\frac{-ie^{-i\theta}}{r}[u_\theta(x_0,y_0)+iv_\theta(x_0,y_0)]$$ {\bfseries THEOREM:}\par If the functions $u(x,y)$ and $v(x,y)$, are continuously differentiable and satisfy the Cauchy's equations on $(x_0,y_0)\epsilon D_o$ then, the function $f(z)$ is differentiable at the point $z=z_0$ \subsection{Analytic Functions} {\bfseries Definition:}\par A function $f(z)$ is said to be \begin{itemize} \item analytic at a point $z=z_0$ if the function $f(z)$ is differentiable on $z=z_0\epsilon D_o$ \item analytic on $D_o$ if the function $f(z)$ is analytic at every points in $D_o$ \item analytic on $D_c(R)$ if the function $f(z)$ is analytic on $D_o$ of $D_c(R)$ and the boundary points of $D_c(R)$ \item an analytic if the function $f(z)$ is analytic at every points in the domain of the function $f(z)$ \item entire if the function $f(z)$ is analytic at every points on the $C$. \end{itemize} {\bfseries NOTE:} If $f'(z)=0$ on $D_o$ then $f(z)$ is a constant on $D_o$.\par same mathi ko differentiablity condition (jun thai teti bela succificient thiyo, not necessarily necessary) le analytic pani imply garxa \subsection{Some Derivatives} The derivative functions of earlier defined analytic functions, their branches in case of multi valued, follows as: \begin{itemize} \item Polynomials: $$\frac{d}{dz}z^n=nz^{n-1}$$ \item Exponentials and Logarithmics: $$\frac{d}{dz}e^z=e^z,~~\frac{d}{dz}\ln z=\frac{1}{z}$$ \item Trigonometrics and their Inverses: $$\frac{d}{dz}\sin z=\cos z,~~\frac{d}{dz}\cos z=-\sin z,$$ $$\frac{d}{dz}\tan z=\sec^2 z,~~\frac{d}{dz}\cot z=-\cosec^2 z $$ $$\frac{d}{dz}\cosec z=-\cosec z\sec z,~~\frac{d}{dz}\sec z=\sec z\tan z$$ $$\frac{d}{dz}\sin^{-1}z=\frac{1}{(1-z^2)^\frac{1}{2}},~~\frac{d}{dz}\cos^{-1}z=\frac{-1}{(1-z^2)^\frac{1}{2}},~~\frac{d}{dz}\tan^{-1}x=\frac{1}{1+z^2}$$ \item Hyperbolic: $$\frac{d}{dz}\sin z=\cos z,~~\frac{d}{dz}\cos z=\sin z,$$ $$\frac{d}{dz}\tan z=\sec^2 z,~~\frac{d}{dz}\cot z=-\cosec^2 z $$ $$\frac{d}{dz}\cosec z=-\cosec z\sec z,~~\frac{d}{dz}\sec z=-\sec z\tan z$$ \end{itemize} \section{The Antiderivative} {\bfseries Definition:}\par The Anti Derivative, $$\int f(z)dz=F(z)+C$$ of the function $f(z)$ is the differentiable (or, analytic) function $F(z)$ whose derivative function is the original function $f(z)$. \section{Integrals} The Contour Integral, $$\int_Cf(z)dz=\int_a^{b}f[z(t)]z'(t)dt$$ of the function $f(z)$ on $C$ is generally, the limit of the sums likewise the line integrals, but if the function $f(z)$ is continuous on $C\epsilon D_o$, integrals come from $C$.\par {\bfseries THEOREM: }\par If a continuous function $f(z)$ has an anti derivative function $F(z)$ then, for every $C$, $$\int_C f(z)dz=F(\text{END})-F(\text{START})$$ where, start and end are respectively, the initial and final points of $C$. \subsection{Cauchy-Goursat Theorem} {\bfseries THEOREM:}\par If a function $f(z)$ is analytic on $D_c(R)$ enclosed by O+vely $C_o$ then, $$\int_{C_o} f(z)dz=0$$ {\bfseries Extensions:} \begin{itemize} \item If a function $f(z)$ is analytic on $D_c(R)$ enclosed by O+vely $C_o$, exclosed by O-vely $[C_k]_o$s disjoints then, $$\int_{C_o} f(z)dz+\sum_{k=1}^{n}\int_{[C_k]_o}f(z)dz=0$$ is also known as the principle of deformation of paths since it tells that contours can always be deformed onto one another by passing through the points in which the function is analytic, without altering the integrals. \end{itemize} \subsection{Cauchy Integral Formula} {\bfseries THEOREM:}\par If a function $f(z)$ is analytic on $D_c(R)$ enclosed by O+vely $C_o$, for point $z=z_0$ on $D_o$ of $D_c(R)$, $$f(z_0)=\frac{1}{2\pi i}\int_{C_o}\frac{f(z)}{z-z_0}dz$$ and, $$f^n(z_0)=\frac{n!}{2\pi i}\int_{C_o} \frac{f(s)}{(s-z_0)^{n+1}}ds$$ {\bfseries Extensions:} \begin{itemize} \item Morera: If a function $f(z)$ is continuous on $D_o$ and $$\int_C f(z)dz=0$$ for closed $C$s in $D_o$ then, the function $f(z)$ is analytic on $D_o$.\par \item If a function $f(z)$ is analytic on $D_o$ enclosed by O+vely $[C_R]_o$ centered at the point $z=z_0$ and absolute of the function $f(z)$ $[|f(z)|]$ attains the maximum value of $M_R$ on O+vely $[C_R]_o$ then, $$|f^n(z_0)|\le \frac{n!M_R}{R^n}$$ \item Liouville's Theorem: If a function $f(z)$ is an entire bounded function then the function $f(z)$ is a constant function.\end{itemize} \section{Series} \subsection{Taylor} {\bfseries THEOREM:}\par If a function $f(z)$ is analytic on $(D_o)_R$ centered at the point $z=z_0$ then the function $f(z)$ has a Taylor series on $(D_o)_R$ $$f(z)=\sum_{n=0}^{\infty}a_n(z-z_0)^n~~[|z-z_0|<R]$$ where, $$a_n=\frac{f^n(z_0)}{n!}$$ which if put $z_0=0$ then the Maclaurin's series is given by $$f(z)=\sum_{n=0}^{\infty}\frac{f^n(0)}{n!}z^n~~[|z|<R]$$ \subsection{Laurent Series} {\bfseries THEOREM:}\par If a function $f(z)$ fails to be analytic at a point $z=z_0$ but is analytic on $(D_o)_{R_2-R_1}$ centered at the point $z=z_0$ then the function $f(z)$ has a Laurent series, $$f(z)=\sum_{n=0}^{\infty}a_n(z-z_0)^n+\sum_{n=1}^{\infty}\frac{b_n}{(z-z_0)^n}~~[R_1<|z-z_0|<R_2]$$ where, for a $C_o$ around the point $z=z_0$ on $(D_o)_{R_2-R_1}$ $$a_n=\frac{1}{2\pi i}\int_{C_o}\frac{f(z)}{(z-z_0)^{n+1}}dz,~~b_n=\frac{1}{2\pi i}\int_{C_o}\frac{f(z)}{(z-z_0)^{-n+1}}dz$$ Or more compactly, $$f(z)=\sum_{n=-\infty}^{\infty}c_n(z-z_0)^n~~[R_1<|z-z_0|<R_2];~~c_n=\frac{1}{2\pi i}\int_{C_o}\frac{f(z)}{(z-z_0)^{n+1}}dz$$ {\bfseries NOTE}:\par {\bfseries Definition:}\par The Residue, $$\text{res}|_{z=z_0}f(z)=b_1=\frac{1}{2\pi i}\int_C f(z)dz$$ of the function $f(z)$ at the singular point $z=z_0$ is the coefficient of the reciprocal term $(z-z_0)^{-1}$ i.e $b_1$ in the Laurent series of the function $f(z)$ about the singular point $z=z_0$. \subsubsection{Nature of Singularities} \begin{enumerate} \item Removable Singularity:\par [each terms of the fractional portion of Laurent of the function $f(z)$ is zero]\par then, $$\text{res}|_{z=z_0}f(z)=0$$ also, the function $f(z)$ is analytic and bounded on $z_0\epsilon D_o$. \item Poles [Order m]:\par [the fractional portion of Laurent of the function $f(z)$ contains $m$ number of non-zero terms]\par then, the function $f(z)$ can be, $$f(z)=\frac{\phi(z)}{(z-z_0)^m}$$ where, the function $\phi(z)$ is analytic and non zero at the point $z=z_0$ and, $$\text{res}|_{z=z_0}f(z)=\frac{\phi^{m-1}(z_0)}{(m-1)!}$$ also, $$\lim_{z\to z_0}f(z)=\infty$$ \item Essential Singularity:\par [if an infinite number of terms in fractional portion of Laurent of the function $f(z)$ are non-zero] \par then, the function $f(z)$ assumes every possible finite value on $z_0\epsilon D_o$ with one possible exception, an infinite number of times. \end{enumerate} \subsubsection{Zeros of an Analytic Function} {\bfseries Definition:}\par The Zero, $$z=z_0$$ of a function $f(z)$ analytic at the point $z=z_0$ is of order $m$ if the derivatives of the function $f(z)$ lower than the order $m$ vanish at the point $z=z_0$. \par {\bfseries THEOREM:}\par A function $f(z)$ analytic at the point $z=z_0$ has zero the point $z=z_0$ of order $m$ if and only if there exists a function $g(x)$ which is analytic but non-zero at the point $z=z_0$ such that $$f(z)=(z-z_0)^mg(x)$$ {\bfseries THEOREM:}\par If the functions $f(z)$ and $g(z)$ are analytic at a point $z=z_0$ such $$f(z_0)\ne 0,~~ g(z_0)=0$$ then, the function $\frac{f(x)}{g(x)}$ has pole the point $z=z_0$, \begin{enumerate} \item of order $m$ if the function $g(z)$ has zero the point $z=z_0$ of order $m$. \item of order $1$ if $g'(z_0)\ne 0$ then, $$\text{res}|_{z=z_0}\frac{f(z)}{g(z)}=\frac{f(z_0)}{g'(z_0)}$$ \end{enumerate} \section{Cauchy Residues Theorem} {\bfseries THOEREM:} If a function $f(z)$ is analytic on $D_c(R)$ enclosed by O+vely $C_o$ except at finite number of singularities the points $z=z_k~[1,2,\ldots,n]$ on $D_c(R)$ then, $$\int_C f(z)dz=2\pi i\sum_{k=1}^{n}\text{res}|_{z=z_k}f(z)$$ Further if the function $f(z)$ is analytic on $D_o$ exclosed by O+vely $C_o$ then, $$\int_{C_o} f(z)dz=2 \pi i~ \text{res}|_{z=0}\left[\frac{1}{z^2}f\left(\frac{1}{z}\right)\right]$$ \section{Applications} \subsection{Definite} [of the form] $$\int_{0}^{2\pi}F(\sin \theta, \cos \theta)d\theta$$ then, considering a $$ \text{O+vely $[C_1]_o: z=e^{i\theta} (0\le\theta<2\pi)$}$$ so, $$\int_{0}^{2\pi}F(\sin n\theta, \cos m\theta)d\theta=\int_C\left(\frac{z^n-z^{-n}}{2i},\frac{z^m+z^{-m}}{2}\right)\frac{dz}{iz}$$ \subsection{Improper} The over integral of $f(z)$ without on and above $z_k[k=1,2,\ldots,n]$ singularities is by considering the contour integral on up origined O+vely semi $[C_R]_o$, large enough to contain all of which, i.e $$\int_{-R}^{R}f(x)dx=2\pi i\sum_{k=1}^{n}\text{res}|_{z=z_k}-\int_{[C_R]_o}f(z)dz$$ so, $$PV\int_{-\infty}^{\infty}f(x)dx=2\pi i\sum_{k=1}^{n}\text{res}|_{z=z_k}-\lim_{R\to \infty}\int_{[C_R]_o}f(z)dz$$ further if $f(z)$ has a $x_0$ singularity on $(\infty,\infty)$ then, is indented by up O-vely semi $[C_r]_o$ small enough to exclude then,\par $$PV\int_{-\infty}^{\infty}f(x)dx=2\pi i\sum_{k=1}^{n}\text{res}|_{z=z_k}-\lim_{R\to \infty}\int_{[C_R]_o}f(z)dz-\lim_{r\to 0}\int_{[C_r]_o}f(z)dz$$ {\bfseries THEOREM: [Jordan's]}\par If a function $f(z)$ is analytic on $D_o$ exterior to up origined O+vely semi $[C_{R_0}]_o$ such that on every up origined O+vely semi $[C_R]_o [R>R_o]$, $$|f(z)|\le M_R \text{ as } \lim_{R\to \infty}M_R=0$$ then for $a>0$, $$\lim_{R\to \infty}\int_{[C_R]_o}f(z)e^{iaz}dz=0$$ {\bfseries THEOREM: [Indented]}\par If a function $f(z)$ has a simple pole the point $z=x_0$ on $(-\infty,\infty)$ then for up O-vely semi $[C_r]_o$ centered at the point $z=x_0$, $$\lim_{R\to 0}\int_{[C_r]_o}f(z)dz=-\pi i~ \text{res}|_{z=x_0}$$ \section{Conformal Mappings} \subsection{Linear} \begin{enumerate} \item Translations $[w=z+B]$ translates by the vector $(\text{re}(B),\text{im}(B))$ \begin{itemize} \item $(u,v)=(x+re(B),~y+im(B))$ \end{itemize} \item Rotations and Scaling $[w=Az]$ rotates by $\text{arg(A)}$ and scales by $|A|$. \begin{itemize} \item $(\rho,\phi)=(|A|r,~\theta+\text{arg}(A))$ \end{itemize} \item InOut $\left[w=z^{-1}\right]=\text{conjugate of }|z|^{-2}z$ inverse with respect to unit circle \begin{itemize} \item $(u,v)=\left(\frac{x}{x^2+y^2},~\frac{-y}{x^2+y^2}\right)=(\rho,\phi)=(r^{-1},-\theta)$\pa \text{line likes the origin while circle does not} \end{itemize} {\bfseries NOTE:}\par The inout transformation maps lines and circles onto lines and circles i.e $$A(x^2+y^2)+Bx+Cy+D=0 ~[B^2+C^2>4AD]$$ $$\rightarrow D(u^2+u^2)+Bu-Cv+A=0$$ \item Mobius $w=\left[\frac{az+b}{cz+d}\right] (ad-bc\ne0)=\frac{a}{c}+\frac{bc-ad}{c}\frac{1}{cz+d}$\par {\bfseries NOTE:}\par The linear transformation that maps the three distinct points $z_1,z_2,z_3$ onto the three distinct points $w_1,w_2,w_3$ respectively is unique given by the implicit formula, $$\frac{(w-w_1)(w_2-w_3)}{(w-w_3)(w_2-w_1)}=\frac{(z-z_1)(z_2-z_3)}{(z-z_3)(z_2-z_1)}$$ {\bfseries Tokens:} \begin{enumerate} \item Unit Circle on Unit Circle [~$|z|\le 1$ to $|w|\le 1$~] $$w=e^{i\alpha}\frac{z-z_0}{1-\overline {z_0z}},~\text{im}(\alpha)=0,~ [|z_0|<1]$$ \item Half-Plane on Half-Plane [~$\text{im}(z)\ge 0$ to $\text{im}(w)\ge 0$~] $$w=\frac{az+b}{cz+d},~\text{im}(a,b,c,d)=0,~ [ad-bc>0]$$ \item Half-Plane on Unit Circle [~$\text{im}(z)\ge 0$ to $|w|\le 1$~] $$w=e^{i\alpha}\frac{z-z_0}{1-\overline {z_0}},~\text{im}(\alpha)=0,~ [\text{im}(z_0)>0]$$ \end{enumerate} \end{enumerate} \subsection{Others} \begin{enumerate} \item Sinusoidal $[w=\sin z=(\sin x\cosh y,\cos x\sinh y)]$ \par [transforms vertical lines onto hyperbolas and horizontal lines onto ellipses] \item Squaring $[w=z^2=(x^2-y^2,2xy)=(r^2,2\theta)]$ \par [transforms vertical lines onto left parabolas and horizontal lines onto right parabolas] \end{enumerate} \subsection{Conformality} {\bfseries Definition:}\par The Analytic Mapping $$w=f(z)$$ at the point $z=z_0$ preserves the angle in both magnitude and sense between the intersecting smooth curves $C_1$ and $C_2$ at a point $z=z_0$ and is said to be conformal at the point $z=z_0$.\par {\bfseries Properties:}\par \begin{enumerate} \item differs inclination at $z_0$ from that of $w_0$ by $\text{arg}(f'(z_0))$ \item magnifies small lines by $|f'(z_0)|$ hence, conforms small areas to their image by approximately same shapes. \end{enumerate} \section{Complex Potential} {\bfseries Definition:}\par The Complex Potential $$F(z)=\phi(x,y)+i\psi(x,y)$$ is an analytic function such that, the function $\phi(x,y)$ is harmonic on $D_o$ accompanied by its conjugate, the function $\psi(x,y)$ on $D_o$ such that the level curves $\phi(x,y)=C_1$, physically as equipotentials and $\psi(x,y)=C_2$, physically as fluxes are orthogonal on $D_o$ except where $F'(z)=0$.\par {\bfseries THEOREM:}\par The functions $H(u,v)$ and $H[u(x,y),v(x,y)]$ are harmonic on the analytic map [$(D_z)_o \rightarrow (D_w)_o$].\par {\bfseries Use of Mapping in Harmonic Modeling} \begin{enumerate} \item The sought problem is transformed by a conformal mapping to an easier version of it which is then solved and brought back to the originality by using theorems and making use of conservation of boundary conditions. \item Tools For Harmonic Modeling: \begin{enumerate} \item Between Parallel Plates: [spreading along the y axis]$$F(z)=az+b$$ \item Between Coaxial Cylinders: [onto the plane] $$F(z)=a\Ln z+b$$ \item Between Annular Regions: [opening through origin] $$F(z)=a-ib\Ln z$$ \end{enumerate} \end{enumerate} \subsection{Poisson's Formula} The complex potential $F(z)$ on centered $[D_R]_o$ relates by the Cauchy Integral formula, $$F(z)=\frac{1}{2\pi}\int_{0}^{2\pi}F(z^*)\frac{z^*}{z^*-z}d\alpha$$ where, $$z^*=Re^{i\alpha}$$ but, $$\int_{0}^{2\pi}F(z^*)\frac{z^*}{z^*-z}d\alpha=0$$ Hence, $$F(z)=\frac{1}{2\pi}\int_{0}^{2\pi}F(z^*)\frac{z^*\text{cj}[z^*]-z\text{cj}[z]}{(z^*-z)(\text{cj}[z^*]-\text{cj}[z])}d\alpha$$ Finally in polar, $$\phi(r,\theta)=\frac{1}{2\pi}\int_{0}^{2\pi}\phi(R,\theta)\frac{R^2-r^2}{R^2-2Rr\cos(\theta-\alpha)+r^2}d\alpha$$ Further, $$\frac{z^*\text{cj}[z^*]-z\text{cj}[z]}{(z^*-z)(\text{conj}[z^*]-\text{cj}[z])}=\text{re}\left[\frac{z^*+z}{z^*-z}\right]=\text{re}\left[1+2\sum_{n=1}^{\infty}\left(\frac{z}{z^*}\right)^n\right]$$ so, $$\phi(r,\theta)=c_0+\sum_{n=1}^{\infty}\left(\frac{r}{R}\right)^n[a_n\sin n\theta+b_n\cos n\theta]$$ where, $$c_0=\frac{1}{2\pi}\int_{0}^{2\pi}\phi(R,\alpha)d\alpha$$ $$a_n=\frac{1}{\pi}\int_{0}^{2\pi}\phi(R,\alpha)\sin n\alpha d\alpha,~~ b_n=\frac{1}{2\pi}\int_{0}^{\pi}\phi(R,\alpha)\cos n\alpha d\alpha$$ \subsection{Properties of Harmonic Functions} \begin{enumerate} \item the value of a harmonic function at a point averages on any circle centered at the same point \item a harmonic function on a region attains maximum value on the boundary of the same region \end{enumerate} \chapter{Statistics and Probability} \section{Representation of Data} The observational data elements of a variable $$x_1,x_2,\ldots,x_n$$ consisting of $n$ measurements posses-es parameters that augment visual displays \begin{enumerate} \item Mean: [arithmetic middle of the data] $$\overline{x}=\frac{\sum_{i=1}^{n}x_i}{n}$$ \item Median: [central location of the data] $$x_M=\text{[if n is odd] } \frac{n+1}{2}\text{ th obse}$$ $$\text{or [if n is even] average of } \frac{n}{2}\text{ th and }\frac{n+2}{2}\text{ th obse } $$ \item Variance: [square of average deviation from mean] $$s^2=\frac{\sum_{i=1}^{n}(x_i-x_m)^2}{n-1}$$ alternatively, $$s^2=\frac{n\sum_{i=1}^{n}x_i^2-\left[\sum_{i=1}^{n}x_i\right]^2}{n(n-1)}$$ \item Standard Deviation: [root of the variance] $$s=\sqrt{\frac{\sum_{i=1}^{n}(x_i-x_m)^2}{n-1}}$$ \item Coefficient of Variance: [relative deviation for comparison] $$V=\frac{s}{x_m}\times 100\%$$ \item pth Percentiles [sample dividing data so p\% lie below it]\par How to find percentiles? \begin{itemize} \item find $\frac{np}{100}$ which if is \item an integer, = th sample \item not an integer, = average of th and +1th samples \end{itemize} whose specials are the \item Quartiles: [divisors of the set into quarters] $$Q_1=25\text{th}, Q_2 (\text{ or } x_M)=50\text{th} \text{ and }Q_3=75\text{th}$$ which can graphically be highlighted in boxplots [quartiles shown as bars along the data] \item Range: [length of the interval of data] $$=x_{\text{MAX}}-x_{\text{MIN}}$$ \item Interquartile Range: [variation in the middle half of data] $$=Q_3-Q_1$$ \end{enumerate} \section{Probability} {\bfseries Terminologies in Probability} \begin{enumerate} \item {\bfseries Experiment:} process of measurement or observations involving randomness so result cannot be predicted exactly \item {\bfseries Trial:} performance of the experiment \item {\bfseries Sample Space:} collection of every possible outcomes discrete or continuous, denoted by $S$ \item {\bfseries Events:} subsets of sample space denoted by capitals A$\ldots$ Z, outcomes being called simple events or favourable cases \item {\bfseries Random Variable:} variable that assumes values corresponding to outcome of an experiment [maps sample space to values], might represent the possible outcomes of a yet-to-be-performed experiment, or the possible outcomes of a past experiment whose already-existing value is uncertain \item {\bfseries Mass Function} relating discrete random variables by their probabilities denoted by $f(x)$ \item {\bfseries Density Function:} relating continuous random variable by their probabilities denoted by $f(x)$ \end{enumerate} {\bfseries Definitions:}\par The Probability, \begin{itemize} \item The Classical: $$P(A)=\frac{\text{total favourable cases in A}}{\text{total possible outcomes in equally probable S}}$$ \item The Empirical: $$P(A)=\lim_{N\to\infty}\frac{\text{total occurrence of A}}{\text{total trials (say N)}}$$ \end{itemize} of the event A in an experiment is the quantitative measure for the degree of confidence or uncertainty for the prediction of the occurrence of favourable outcomes for the event A during the tiral of an experiment. \subsection{Properties of Probabilities (same space)} \begin{enumerate} \item Complement: [probability of complement of A] $$P(A^c)=1-P(A)$$ \item Union: [probability of A or B] $$P(A\cup B)=P(A)+P(B)-P(A\cap B)$$ {\bfseries NOTE:} for exclusive events $$P(A\cap B)=0$$ \item Intersection: [probability of A and B] $$P(A\cap B) = P(B|A)P(A)$$ {\bfseries NOTE:} for independent events $$P(B|A)=P(B)$$ \item Bayesian Theorem: If $B_1, B_2, \ldots B_n$ are mutually exclusive and exhaustive events then for any arbitrary event A which is a subset of union of all those events then $$P(B_i|A)=\frac{P(B_i)P(A|B_i)}{\sum P(B_i)P(A|B_i)}$$ \end{enumerate} \subsection{Probability Distributions} {\bfseries Definition:}\par The Probability Distribution $$F(x)=P(X\le x)$$ defines, in a trial the probability of the random variable of the experiment assuming any values not exceeding that of the input which corresponds to the fundamental result, $$P(a<X\le b)=F(b)-F(a)$$ which in terms of M/D is given by, $$F(x)=\sum_{x_i\le x}f(x)~~[i=1,2,\ldots] \text{[Discrete]}=\int_{-\infty}^{x}f(x)dx \text{[continuous]}$$ \subsubsection{Mean and Variance} {\bfseries Definitions:}\par The Mean $$\mu=\sum_{i}x_if(x_i) ~[\text{discrete}]=\int_{-\infty}^{\infty}xf(x)dx ~[\text{continuous}]$$ and The Variance $$\sigma^2=\sum_{i}(x_i-\mu)^2f(x_i) ~[\text{discrete}]=\int_{-\infty}^{\infty}(x-\mu)^2f(x)dx ~[\text{continuous}]$$ are theoretically the counterparts of respectively the statistical mean and variance that measure certain properties of the distribution of the random variable in an experiment such that \begin{center} \begin{tabular}{|c|c|c|} \hline Random Variable & Mean & Variance \\ \hline &&\\ $X$ & $\mu$ & $\sigma^2$ \\[0.5cm] $aX+b$ & $a\mu+b$ & $a^2\sigma^2$ \\[0.5cm] $\frac{X-\mu}{\sigma}$ & $0$ & $1$ \\[0.5cm] \hline \end{tabular} \end{center} \subsection{Discrete Distributions} \subsubsection{Binomial Distributions} [only two possible outcomes of a trial, probability success is same in each trial, outcomes are independent] [occurs in games of chance (rolling dice), quality inspection (counting number of defectives), opinion polls, etc.] whose parameters are: \begin{enumerate} \item Random Variable: \begin{center} X = number of times an success occurs in $n$ independent trials, say p=P(the success event) \end{center} \item Mass Function: $$b(x;n,p)=C(n,x)p^x(1-p)^{n-x}$$ \item Mean: $$\mu=np$$ \item Variance: $$\sigma^2=np(1-p)$$ \end{enumerate} \subsubsection{Hypergeometric Distributions} [sampling without replacements, binomial but the trials are dependent] whose parameters are: \begin{enumerate} \item Random Variable: \begin{center} X = number of times an success occurs in $n$ draws without replacement from the sample size N containing the M number of favourable cases to the event of success. \end{center} \item Mass Function: $$h(x;N,M,n)=\frac{C(M,x)C(N-M,n-x)}{C(N,n)}$$ \item Mean: $$\mu=n\frac{M}{N}$$ \item Variance: $$\sigma^2=\frac{N-n}{N-1}npq$$ \end{enumerate} {\bfseries NOTE:} Hypergeometric is calculation heavy so need to approximate it using binomial, if $N\to \infty$, and $\frac{M}{N}\to p$, $h(x;M,N,n)=b(x;p,n)$; Practically if $n/N < 0.1$ then approacehs \subsubsection{Poisson Distributions} [occurs for describing number of times an event occurs in one unit of time or space where a unit can be considered as large number of instant trials of binomial as probability goes smaller] whose parameters are: \begin{enumerate} \item Random Variable: \begin{center} X = number of times an event occurs in a specified region or a period of time where its unit probability is $\mu$ \end{center} \item Mass Function: $$f(x)=\frac{\mu^x}{x!}e^{-\mu}$$ \item Mean: $$\mu = \mu$$ \item Variance: $$\sigma^2 = \mu$$ \item Approximation: $$n\to\infty, p\to 0,np\to\mu$$ Practiallcy $n>=20, p <0.05$ \end{enumerate} \subsubsection{Negative Binomial Distributions} [outworking the binomial to find number of trials to obtain specific number of success] whose parameters are: \begin{enumerate} \item Random Variable: \begin{center} X = number of trials required to get $r$ success say p=P(the success event) \end{center} \item Mass Function: $$f(x)=C(x-1,r-1)p^r(1-p)^{x-r}$$ \item Mean: $$\mu=\frac{r}{p}$$ \item Variance: $$\sigma^2=\frac{(1-p)r}{p^2}$$ Further if $Y$ makes a random variable for number of failures before $r$ successes then, $$Y=X-r$$ \end{enumerate} \subsubsection{Multinomial Distributions} [multiple possible outcomes of a trial, with same probability in each trial, outcomes are independent] whose parameters are: \begin{enumerate} \item Random Variable: \begin{center} X = number of times an event occurs in $n$ independent trials, \par say p$_i$[i=1,2,..,k]=P(each event) \end{center} \item Mass Function: $$f(x_1,x_2,\ldots,x_k)=\frac{n!}{x_1!x_2!\ldots x_k!}p_1^{x_1}p_2^{x_2}\ldots p_k^{x_k}$$ \end{enumerate} \subsection{Continuous Distributions} \subsubsection{Normal Distributions (="N")} [frequently occurring continuous distribution in many applications of the normal random variables, occurring, to laws of chance, in errors in measurement is defined by the density function] $$f(x)=\frac{1}{\sigma\sqrt{2\pi}}\text{exp}\left[-\frac{1}{2}\left(\frac{x-\mu}{\sigma}\right)^2\right]$$ which for $\mu=0$ and $\sigma=1$ is Standard Normal Distribution (=SND), $$\phi(x)=\frac{1}{\sqrt{2\pi}}\int_{-\infty}^{x}e^{-\frac{u^2}{2}}du$$ and relates by, $$F(x)=\phi\left(\frac{x-\mu}{\sigma}\right)$$ The probability ranges as, $$P(a< X\le b)=\phi\left(\frac{b-\mu}{\sigma}\right)-\phi\left(\frac{a-\mu}{\sigma}\right)$$ whose specific values are $$P(\mu-\sigma<X\le \mu+\sigma)=68\%$$ $$P(\mu-2\sigma<X\le \mu+2\sigma)=95.5\%$$ $$P(\mu-3\sigma<X\le \mu+3\sigma)=99.7\%$$ \par {\bfseries THEOREM: De-Moivre and Laplace}\par The normal distribution is a limiting case of the binomial good for $np>=5, nq>=5$, best when $>15$, while if p and q are nearly equal then good for small value of n too\par Poison good for $>=5$ \par Correction factor: Addition or subtraction of 0.5 when necessary, to change the nature of equality \subsubsection{Uniform Distributions} [used in hypothesis testing, finance etc] whose parameters are as follows: \begin{enumerate} \item Mass Function: $$f(x)= \begin{cases} \frac{1}{b-a} & a<x<b \\[0.5cm] 0 & \text{elsewhere} \\ \end{cases}$$ \item Mean: $$\mu=\frac{a+b}{2}$$ \item Variance: $$\sigma^2=\frac{1}{12}(b-a)^2$$ \end{enumerate} \subsection{Exponential} [time to failure of electrical components] whose parameters are as follows: \begin{enumerate} \item Mass Function: [lambda is called spread factor] $$f(x)=\lambda e^{-\lambda}\text{ for x }>=0, 0\text{ otherwise}$$ \item Mean: $$\mu=\frac{1}{\lambda}$$ \item Variance: $$\mu=\frac{1}{\lambda^2}$$ \end{enumerate} \subsubsection{Gamma Distributions} [used in rainfalls, insurances etc] whose parameters are as follows: \begin{enumerate} \item Mass Function: [alpha is shape parameter beta is scale parameter] $$f(x)= \begin{cases} \frac{1}{b^a\Gamma(a)}x^{a-1}e^{-\frac{x}{b}} & x<0 \\[0.5cm] 0 & \text{elsewhere} \\ \end{cases}$$ \item Mean: $$\mu=ab$$ \item Variance: $$\sigma^2=ab^2$$ \end{enumerate} \subsubsection{Log-Normal Distributions} [for random variable whose log is normal] whose parameters are as follows: \begin{enumerate} \item Mass Function: $$f(x)= \begin{cases} \frac{1}{x}\frac{1}{b\sqrt{2\pi}}\text{exp}\left[-\frac{1}{2}\left(\frac{\ln x-a}{b}\right)^2\right] & x>0 \\[0.5cm] 0 & \text{elsewhere} \\ \end{cases}$$ \item Mean: $$\mu=e^{a+\frac{b^2}{2}}$$ \item Variance: $$\sigma^2=e^{2a+b^2}(e^{b^2}-1)$$ \end{enumerate} \subsubsection{Beta Distributions} [used in order statistics and management] whose parameters are as follows: \begin{enumerate} \item Mass Function: $$f(x)= \begin{cases} \beta(a,b)x^{a-b}(1-x)^{b-1} & 0<x<1 \\[0.5cm] 0 & \text{elsewhere} \\ \end{cases}$$ \item Mean: $$\mu=\frac{a}{a+b}$$ \item Variance: $$\sigma^2=\frac{ab}{(a+b)^2(a+b+1)}$$ \end{enumerate} \subsubsection{Weibull Distributions} [used in analysis and engineering] whose parameters are as follows: \begin{enumerate} \item Mass Function: $$f(x)= \begin{cases} abx^{b-1}e^{-ax^b} & x>0, a>0, b>0 \\[0.5cm] 0 & \text{elsewhere} \\ \end{cases}$$ \item Mean: $$\mu=a^{-\frac{1}{b}}\Gamma\left(1+\frac{1}{b}\right)$$ \item Variance: $$\sigma^2=a^{-\frac{2}{b}}\left[\Gamma\left(1+\frac{2}{b}\right)-\left[\Gamma\left(1+\frac{1}{b}\right)\right]^2\right]$$ \end{enumerate} \subsubsection{Normal Theory Distributions} \begin{enumerate} \item Chi Distribution (=CHI)\par Random Variable: [of degree of freedom $\nu$] $$\chi^2_{\nu}=\sum_{i=1}^{\nu}Z_i^2=Z_1^2+Z_2^2+\ldots+Z_{\nu}^2$$ where, $Z_i[i=1,2,\ldots]$ are independent SND variables. \begin{enumerate} \item PDF: $\text{Gamma}\left(\alpha=\frac{k}{2}, \beta = 2\right)$ \item Mean: $\mu=k$ \item Variance: $\sigma^2=2k$ \end{enumerate} \item Student t Distribution (StD)\par Random Variable: [of degree of freedom $\nu$] $$t=Z\sqrt{\frac{\nu}{\chi^2_{\nu}}}$$ where, $Z$ and $\chi^2_{\nu}$ are independent SND and CHI variables. \begin{enumerate} \item Mean: $\mu=0 [\nu>1] \text{ else undefined}$ \item Variance: $\sigma^2=\frac{\nu}{\nu-1} [\nu>2] =\infty [1<\nu\le 2] $ \end{enumerate} \item F Distribution (FD) \par Random Variable: \par [of degree of freedom ($\nu_1$,$\nu_2)$] $$F_{\nu_1,\nu_2}=\frac{\nu_2\chi_{\nu_1}^2}{\nu_1\chi_{\nu_2}^2}$$ where, $\chi^2_{\nu_1}$ and $\chi^2_{\nu_2}$ are independent CHI variables. \end{enumerate} \subsection{Joint Distributions} $\ldots$ $$F(x,y)=P(X\le x, Y\le x)=\sum_{x_i\le x}\sum_{y_j \le y}f(x,y)=\int_{-\infty}^{y}\int_{-\infty}^{x}f(x,y)dxdy$$ such that, $$P(a_1<X\le b_1, a_2<Y\le b_2)=F(b_1,b_2)+F(a_1,a_2)-F(a_1,b_2)-F(b_1,a_2)$$ $\ldots$\par individual marginal distribution of X in (X,Y) i.e (X=x, Y arbitrary) $$f_1(x)=\sum_{y}f(x,y)=\int_{-\infty}^{\infty}f(x,y)dy, ~~~F_1(x)=\sum_{x_i\le x}f_1(x_i)=\int_{-\infty}^{x}f_1(x)dx$$ individual marginal distribution of Y in (X,Y) i.e (X arbitrary, Y=y) $$f_2(x)=\sum_{x}f(x,y)=\int_{-\infty}^{\infty}f(x,y)dx, ~~~F_2(x)=\sum_{y_j\le j}f_2(y_j)=\int_{-\infty}^{y}f_2(y)dy$$ {\bfseries NOTE: for independent X and Y} $$F(x,y)=F_1(x)F_2(y)$$ following the necessary and sufficient condition: $$f(x,y)=f_1(x)f_2(y)$$ $\ldots$\par \subsection{Expectations and Moments} {\bfseries Definition:}\par The Expectation, $$E(g(X))=\sum_{i}g(x_i)f(x_i)~ [i=1,2,\ldots] ~\text{[discrete]}=\int_{-\infty}^{\infty}g(x)f(x)dx ~\text{[continuous]}$$ of the function $g(x)$ such that $g(X)$ is a random variable, implies the value of $g(X)$ expected on an average during the trial of the experiment to the random variable $X$ whose special cases are \par The Moments, $$E(g(X^k))=\sum_{i}x^k_if(x_i)~ [i=1,2,\ldots] ~\text{[discrete]}=\int_{-\infty}^{\infty}x^kf(x)dx ~\text{[continuous]}$$ called $k$th moments whose $k$th central moments are given by, $$E(g(X-\mu)^k)=\sum_{i}(x-\mu)^k_if(x_i)~ [i=1,2,\ldots] ~\text{[discrete]}=\int_{-\infty}^{\infty}(x-\mu)^kf(x)dx ~\text{[continuous]}$$ which leads to, $$\mu=E(X) \text{ and } \sigma^2=E((X-\mu)^2)=E(X^2)-\mu^2$$ and further for joint expectations: $$E[g(X,Y)]=\sum_x\sum_yg(x,y)f(x,y)=\int_{-\infty}^{\infty}\int_{-\infty}^{\infty}g(x,y)f(x,y)dxdy$$ with the theorems: $$E(aX+bY)=aE(X)+bE(Y)$$ and further if X and Y are independent, $$E(XY)=E(X)E(Y),~~\sigma_{aX+bY}^2=a^2\sigma_X^2+b^2\sigma_Y^2$$ {\bfseries Definition:}\par The Moment Generating Function $$M(t)=E(e^{tX})$$ which is the expectation of the random variable $e^{tX}$ given by, $$E(e^{tX})=\sum_{i}e^{tx_i}f(x_i)~ [i=1,2,\ldots] ~\text{[discrete]}=\int_{-\infty}^{\infty}e^{tx}f(x)dx ~\text{[continuous]}$$ such that, $$M^{(k)}(0)=E(X^k),~~M_{aX+b}=M(bt)e^{at}$$ and further if $X$ and $Y$ are independent, $$M_{X+Y}(t)=M_X(t)M_Y(t)$$ {\bfseries THEOREM: Chebyshev's}\par The probability of the random variable $X$ getting the value deviated from the mean $\mu$ by scaled standard deviation $k\sigma$ is given by, $$P(|X-\mu|\ge k\sigma)\le \frac{1}{k^2}$$ \subsection{Methods to Obtain Distributions} $\ldots$ \begin{enumerate} \item Moment Generating Method: $$M_{X_1+X_2+\ldots+X_n}(t)=M_{X_1}(t).M_{X_2}(t)\ldots M_{X_n}(t)$$ \item Distribution Function Method; $$F(H(X))=P(H(X)\le x)$$ \item Transformation Method: $$f_{H(X)}(x)=\begin{cases} f_X(H^{-1}(x)\left|\frac{dH^{-1}(x)}{dx}\right| & \text{ where } \frac{dH^{-1}(x)}{dx}\ne 0 \\[0.5cm] 0 & \text{ elsewhere} \end{cases} $$ \item Convolution\par $$f_{X+Y}(x)=\int_{-\infty}^{\infty}f_X(u)f_Y(x-u)du,~~f_{\frac{Y}{X}}(x)=\int_{-\infty}^{\infty}|u|f_X(u)f_Y(ux)du$$ \end{enumerate} \section{Statistics} {\bfseries Terminologies in Statistics:} \begin{enumerate} \item {\bfseries Population of Units:} entities of experimental unit whose characteristics are of interest \item {\bfseries Statistical Population:} sets of measurements or observations data corresponding to each units identified by its probability distribution denoted by N [if its size] (mean, variance). \item {\bfseries Random Sample:} representative subset of stat population from which it is obtained to provide framework for theory of probability denoted by the set of independent random variables, with same probability distribution hence same mean and variance $$\text{ RS }:X_i[i=1,2,\ldots,n]$$ \item {\bfseries Statistic:} numerical descriptive measures of the samples which would be a random variable i.e. $$f(X_i[i=1,2,\ldots,n])$$ \item {\bfseries Parameter:} numerical feature of probability distribution of the population denoted by $\theta$ \item {\bfseries Test Statistic:} statistic of estimator with parameters such the thus distribution is a exact one \item {\bfseries Tail Area:} (?)$_{x}$ input values for which probability area on the right side equals subscript $$\text{SND}_{0.05}=1.96 \text{ and }\text{SND}_{0.005}=2.575$$ \par \end{enumerate} \subsection{Sampling Distribution} The probability distribution of random sample based statistic to a statistical population provide major simplification en route to analysing data as \par {\bfseries Mean and Variance (as statistics)}\par The mean and variance of RS $X_i$ from $N(\mu,\sigma^2)$ are such, $$\mu_{\overline{x}}=\mu,~~ \mu_{S^2}=\sigma^2$$ and further, $$\sigma_{\overline{x}}^2=\frac{\sigma^2}{n}\left(\frac{N-n}{N-1}\right)\text{generally approximated as }=\frac{\sigma^2}{n}$$ which implies for any $\epsilon>0$, (Law of Large Numbers) $$ \text{ as } n\to \infty ,~~ P(|\overline{X}-\mu|>\epsilon)\to 0$$ \par {\bfseries THEOREM: (Central Limit Theorem)}\par The mean of infinitely sized RS $X_i$ of $N(\mu,\sigma^2)$, $$Y_n=\lim_{n\to \infty}\frac{[X_1+X_2+\ldots+X_n]}{n}$$ is asymptotically normal such the test statistic, $$[\text{in practice n}\ge 30]~\lim_{n\to \infty}\sqrt{n}\left(\frac{Y_n-\mu}{\sigma}\right)=\text{SND}$$\par Specially for $$N="N"(\mu,\sigma^2)$$ then the test statistics, $$Z=\sqrt{n}\left(\frac{\overline{X}-\mu}{\sigma}\right)=\text{SND},~~ t=\sqrt{n}\left(\frac{\overline{X}-\mu}{S}\right)= \text{StD}_{\nu=n-1}$$ {\bfseries Comparisons:} \par The RSs $X_i$s and $Y_i$s [independent comparison] from large samples (population does not matter) $$Z=\frac{[(\overline{X}-\mu_x)-(\overline{Y}-\mu_y)]}{\sqrt{\frac{\sigma_x^2}{n_x}+\frac{\sigma_y^2}{n_y}}}=\text{SND}$$ Now for normal populations: The above works regardless size of the samples (analogous to mean wala)\par and if $\sigma_x=\sigma_y$ (from normal population regardless of size but unknown variances) $$t=[(\overline{X}-\mu_x)-(\overline{Y}-\mu_y)]\frac{1}{S_p}\sqrt\frac{n_xn_y}{n_x+n_y}=\text{StD}_{\nu=(n_x-1)+(n_y-1)},~S_p^2=\frac{(n_x-1)S_x^2+(n_y-1)S_y^2}{(n_x-1)+(n_y-1)}$$ \par Another way:\par The earlier methods do not work for "before and after" datas or other naturally paired ones. So we need matched pairs:\par The such cases if the RSs $X_i$ and $Y_i$ are in comparison in matched pairs, then RSs are treated as a single RS, $$D_i=X_i-Y_i=X_1-Y_1, X_2-Y_2, \ldots, X_n-Y_n~ [n_x=n_y=n]$$ then, $$t=\sqrt{n}\left(\frac{\overline{D}-\mu_D}{S_D}\right)=\text{Std}_{\nu=n-1}$$\par {\bfseries One Way ANOVA:}\par The way to generalize independent comparisons, the RSs are $$[Y]_1,[Y]_2,\ldots,[Y]_k$$ from $$"N"_1(\mu_1,\sigma^2),"N"_2(\mu_2,\sigma^2),\ldots,"N"_k(\mu_k,\sigma^2)$$ such that if, $$\text{SS(Tr) [treatment sum of squares]}=\sum_{i=1}^{k}n_i(\overline{Y_i}-\overline{Y})$$ and, $$\text{SSE [error sum of squares]}=\sum_{i=1}^{k}\sum_{j=1}^{n_i}([Y_j]_i-\overline{Y_i})^2$$ then, $$F=\frac{\frac{\text{SS(Tr)}}{k-1}}{\frac{\text{SSE}}{N-k}}=\text{FD}_{\nu_1=k-1,\nu_2=N-k},~~N=\sum_{i=1}^{k}n_i$$ {\bfseries For proporiton:}\par If population is binomial and infinite with parameters p and q then, $$\mu_p = p,~\sigma_p^2= \frac{pq}{n}$$ For larger population would follow the normal distribution\par Difference of proprotion ko lagi arkai ho hai feri:\par \begin{enumerate} \item $p1=p2=0$ prove garna xa bhane euta p banaunxa parxa so pooled p use garni \item $p1 - p2 \ne 0$ prove garna xa bhane different p's ra q's use ganri \item In general interval banauna xa bhane 2nd wala use ganri \end{enumerate} \par {\bfseries Goodness of Fit:}\par We speak of goodness of fit when we try to compare an observed frequency dis- tribution with the corresponding values of an expected, or theoretical, distribution\par The particular observations data of a RS, $$X_1=x_1, X_2=x_2, \ldots, X_n=x_n$$ then, the measure of resemblance of the data to a particular distribution $F(x)$ follows if the input x-axis divides into $k$ intervals as $I_1,\ldots, I_k$ such that each $I_i [i=1,2,\ldots,k]$ contains at least 5 values of given observations then if, [meaning that observed values lai yesari group garni ki euta group ma 5 ota frequencies hos] $$o_i= \text{ no of sample observations data in } I_i$$ $$e_i= \text{ no of theoretical samples as $F(x)$ in } I_i$$ so, $$\chi^2=\sum_{i=1}^{k}\frac{(o_i-e_i)^2}{e_i}\sim \text{CHI}_{\nu=k-1}$$ [k-1 is a degree of freedom but we subtract m from it where m is lost on calculating the parameter of the theoritical distribution. For example if you calculate p in binomial by averaging out the theory you lose one.]\par {\bfseries for contingency}\par For a table the expeceted value is rowtotal*coltotal/grand total, with degree of freedom (r-1)*(c-1) \subsection{Statistical Inferences} The process of using data analysis to deduce underlying properties of a population by studying sample statistics comes with the following forms \subsubsection{Estimation} The approximation of the value of a population parameter on basis of the information from the sample is in the form of estimator's statistics which posses the characteristics: \begin{enumerate} \item Unbiased: the mean of distribution of estimator is equal to the true value of the parameter. \item The Spread: the spread measured by variance of distribution of estimator is as small as possible \end{enumerate} {\bfseries NOTE: Maximum Likelihood Estimator}\par The RS of probability distribution $f(x,\theta)$ each if corresponds to the particular observational data, $$X_1=x_1, X_2=x_2, \ldots, X_n=x_n$$ then the joint, $$L(\theta,x_1,x_2,\ldots,x_n)=f(x_1)f(x_2)\ldots f(x_n)$$ implies the probability that the RS consists precisely of those particular values then, there is $$\theta=\hat{\theta}$$ such that, $$\frac{d}{d\theta}L(\theta)=0 \text{ or for convenience }\frac{d}{d\theta}\ln\theta L(\theta)=0$$ is the maximum likelihood estimator of $\theta$ and further for functions of $\hat{\theta}$, $$\widehat{f(\theta)}=f(\hat{\theta})$$ The estimator's statistics estimates the parameters in two possible ways: \begin{enumerate} \item Point Estimation: $$[\text{parameter}]=\text{[estimator]}$$ such that if feasible the errors, $$[\text{standard error}]=[\text{std deviation of the estimator}]$$ \item Interval Estimation: $$P\left(\text{[lower confidence limit]}<\text{[parameter]}< \text{[upper confidence limit]}\right)=\text{[confidence]}$$ to obtain which solve, $$P((?)_{1-\frac{\alpha}{2}}<\text{[test statistics (parameter)]}<(?)_{\frac{\alpha}{2}})=1-\alpha$$ \end{enumerate} \subsubsection{Hypothesis Testing} The evaluation of an assumption regarding a population parameter is based on two competing models as: \begin{enumerate} \item Null Hypothesis: to be rejected claim ($=$), $$H_0 \rightarrow \text{[parameter]}=\text{[naught P]}$$ \item Alternative Hypothesis: claim to establish, one sided ($>$/$<$) or two sided ($\ne$), $$H_1 \rightarrow \text{[parameter]}[> \text{ or } \ne \text{ or }<]\text{[naught P]}$$ then the possible conclusions are: \begin{center} \begin{tabular}{|c|c|c|} \hline & $H_0$ is True & $H_0$ is False \\[0.3cm] \hline Accept $H_0$ & Right Decision & Type $II$ Error \\[0.3cm] Reject $H_0$ & Type $I$ Error & Right Decision \\[0.3cm] \hline \end{tabular} \end{center} such that, $$\alpha(\text{Level of Significance})=P(\text{[rejection of $H_0 $ when $ H_0$ is true]})=P(\text{Type $I$ Error})$$ implying, $$\alpha \to =\frac{\text{[no of times null hypothesis is rejected]}}{\text{[no of observations sets of sample data]}}$$ $$\beta=P(\text{[non rejection of $H_0 $ when $ H_1$ is true]})=P(\text{Type $II$ Error})$$ The distribution of test statistic constructs a criterion to reject the null hypothesis based on the value of the statistic from an observational data of RS as, $$[\text{value of TS from RS data as [parameter]=[P naught]}]> \text{or} <[\text{value of TS based on $\alpha$}]$$ which according to alternative hypothesis are, \begin{center}\begin{tabular}{|c|c|} \hline Hypothesis & Rejections \\[0.5cm]\hline [parameter]$>$[naught P] & $\text{TS} > (TS)_{\alpha}$ \\[0.5cm] [parameter]$<$[naught P] & $\text{TS} < (TS)_{1-\alpha}$ \\[0.5cm] [parameter]$\ne$[naught P] & $\text{TS} > (TS)_\frac{\alpha}{2} \text{ or }\text{TS} < (TS)_{1-\frac{\alpha}{2}}$ \\[0.5cm] \hline \end{tabular}\end{center} In case of dubious result the, $$\text{p-Value}=P(\text{[obtaining a value as or more extreme than the values actually observed]})$$ signifies the testing as, \begin{center}\begin{tabular}{|c|c|c|c|} \hline Highly& Statistically& Tends to Statistically& Not Statistically\\[0.3cm] p$<$0.01&0.01$<$p$<$0.05&0.05$<$p$<$0.1&p$>$0.1 \\[0.3cm] \hline \end{tabular}\end{center} The OC curve, $$L([\text{parameter}])=P(\text{accepting } H_0 \text{ when [parameter] prevails})$$ is such, $$\alpha=1-L([\text{parameter}]),~ \mu\to H_0 \text{ and }\beta=L(\text{[parameter]}),~ \text{[parameter]}\to H_1$$ whose complement, power curve, expresses the performance of the test $$\gamma(\text{[parameter]})=1-L(\text{[parameter]})=P(\text{rejecting } H_0 \text{ when } \mu \text{ prevails})$$ then, $$\gamma([\text{parameter} 0])=P(\text{Rejections: seek} [\text{P naught}]\to [\text{parameter} 0],~ \text{adjust} [TS]_{xx} \text{ accordingly})$$ \end{enumerate} \subsection{Regression and Co-rrelation Analysis} The set of statistical processes for estimating the relationship between a dependent variable (say y, outcome, a random variable) and one or more independent variable (say x, predictors) is based on a probabilistic model corresponding to the scatter plot [graph plot of individual x and y] out of which the simplest probabilistic model assumes that the expected value of outcome is linear in predictors, $$E[Y|x]=\alpha+\beta x+\epsilon$$ where the deviation term $\epsilon$ is a random variable such that $$\epsilon="N"(0,\sigma^2)$$ whose regression coefficients parameters $\sigma^2$, $\alpha$ and $\beta$ are to be estimated as: \par [The general regression coeefficents are found assuming nothing about the actual population distribuion. While on the assumption that the y depends on x as a normal distribution ie for a Xi, Yi follows a normal distribution, we want to estimate the actual alpha and betas (note that both are random varaibles)]\par (Now in this model Y will be a random variable)\par {\bfseries Least Square Method:}\par The sample data of bivariate observations $(x_i,y_i) [i=1,2,\ldots,n]$ estimates the parameters forming the best fit line that minimizes square of deviations of the observed values from the predicted ones, say, $$\hat{y}=a+b x$$which if $$\text{SSE}=\sum_{i=1}^{n}e_i^2=\sum_{i=1}^{n}[y_i-\hat{y_i}]^2$$ and further, $$S_{xx}=\sum_{i}^{n}(x_i-\overline{x})^2=\sum_{i}^{n}x_i^2-\frac{1}{n}\left(\sum_{i=1}^n x_i\right)^2$$ $$S_{yy}=\sum_{i}^{n}(y_i-\overline{y})^2=\sum_{i}^{n}y_i^2-\frac{1}{n}\left(\sum_{i=1}^n y_i\right)^2$$ $$S_{xy}=\sum_{i}^{n}(x_i-\overline{x})(y_i-\overline{y})=\sum_{i}^{n}x_iy_i-\frac{1}{n}\left(\sum_{i=1}^n x_i\right)\left(\sum_{i=1}^n y_i\right)$$ such that to minimize SSE, the coefficients $$b=\frac{S_{xy}}{S_{xx}} \text{ and }a=\overline{y}-b\overline{x} \text{ as a result, } \text{SSE}=S_{yy}-\frac{S_{xy}^2}{S_{xx}}$$ and the standad error of estimates, $$s^2_{e}=\frac{SSE}{n-2}=\frac{1}{n-2}\left[S_{yy}-\frac{S_{xy}^2}{S_{xx}}\right]$$ or alternatively by optimization, the normal equations, $$\sum_{i=1}^{n}y_i=an+b\sum_{i=1}^{n}x_i$$ $$\sum_{i=1}^{n}x_iy_i=a\sum_{i=1}^{n}x_i+b\sum_{i=1}^{n}x_i^2$$ Hence, the least square estimators, [unbiased and low spread] of the parameters are: $$\hat{\alpha}=a, \hat{\beta}=b \text{ and } \hat{\sigma^2}=s_e^2$$ The least square model, $$Y_i=\alpha+\beta x_i+\epsilon_i [i=1,2,\ldots,n]$$ if assumed that, i's are independent of one another such that $$Y_i="N"(\alpha +\beta x_i,\sigma^2) \text{ from } \epsilon_i="N"(0,\sigma^2)$$ then, $$t=\frac{\hat{\alpha}-\alpha}{s_e}\sqrt{\frac{nS_{xx}}{S_{xx}+n\overline{x}^2}}=\text{Std}_{\nu=n-2},~~t=\frac{\hat{\beta}-\beta}{s_e}\sqrt{S_{xx}}=\text{StD}_{\nu=n-2}$$ further standard errors in estimating expected values and exact values for the particular $x=x_0$ are: $$SE[E(y)-\hat{y}]=s_e\left[\frac{1}{n}+\frac{(x_0-\overline{x})^2}{S_{xx}}\right]$$ $$SE[y-\hat{y}]=s_e\left[1+\frac{1}{n}+\frac{(x_0-\overline{x})^2}{S_{xx}}\right]$$ such that, $$t=\frac{E(y)-\hat{y}}{SS(E(y)-\hat{y})}\sim \frac{y-\hat{y}}{SS(y-\hat{y})}=\text{StD}_{\nu=n-2}$$ \par The correlation coefficient for sample observations bi-variate data $(x_i,y_i) [i=1,2,\ldots,n]$ measures degree of the strength of the linear relationship between the pairs, $$r=\frac{S_{xy}}{\sqrt{S_{xx}S_{yy}}}$$ whose values implies, \begin{enumerate} \item $[r=+1]$, the scatter plot lie exactly on the straight line with positive slope \item $[r>0]$, the scatter run from lower left to upper right \item $[r<0]$, the scatter run from upper left to lower right \item $[r=-1]$, the scatter plot lie exactly on the straight line with negative slope \item $[r\sim 0]$, weak linear associations \end{enumerate} The sample correlation coefficient estimates [although not unbiased] the correlation coefficient of population, that measures association between the bivariate normally distributed random variables given by, $$\rho=E\left[\left(\frac{X-\mu_x}{\sigma_X}\right)\left(\frac{Y-\mu_Y}{\sigma_Y}\right)\right]$$ where the positive values implies that both $X$ and $Y$ are simultaneously large or simultaneously small with high probability such that the density function, $$f(x,y)=\frac{1}{2\pi\sigma_x\sigma_y}\sqrt{1-\rho^2}~\text{exp}\left[-\frac{1}{2(1-\rho^2)}\left(\frac{(x-\mu_x)^2}{\sigma_x^2}+\frac{(y-\mu_y)^2}{\sigma_y^2}-\frac{2\rho(x-\mu_x)(y-\mu_y)}{\sigma_x\sigma_y}\right)\right]$$ implying, $$Z=\frac{\sqrt{n-3}}{2}\ln\left[\frac{(1+r)(1-\rho)}{(1-r)(1+\rho)}\right]=\text{SND}$$ {\bfseries Multiple Coorelation}\par $$r_{12,3}=\text{correlation coeffecient keeping 3 as consnat}=\frac{r_{12}-r_{13}r_{23}}{\sqrt{1-r_{13}^2}\sqrt{1-r_{23}^2}}$$ For joint effect of X2 and X3 on X1, $$R_{1,23}=\sqrt\frac{r^2_{12}+r_{13}^2-2r_{12}r_{13}r_{23}}{1-r_{23}^2}$$ \chapter{Numerical Analaysis} \section{Prerequisities} \begin{enumerate} \item Errors $$\left[|X-X'|<10^{d-k},~|X-X'|<\frac{1}{2}10^{d-k}\right]$$ \item Error Propagation $$\left[y=f(x_1,x_2,\ldots,x_n):~\delta y\sim \frac{\partial y}{\partial x_1}\delta x_1+\frac{\partial y}{\partial x_2}\delta x_2+\ldots+\frac{\partial y}{\partial x_n}\delta x_n\right]$$ \end{enumerate} \section{Non Linear Equations} \begin{enumerate} \item Bisection\par [if a function $f(x)$ is] \begin{enumerate} \centering \item {[continuous on $(a,b)$]} \item {[as IVT on $(a,b)$]} \end{enumerate} [then the approximations to the root follow as:] $$\left[x=\frac{a+b}{2},~a=x|a,~b=x|b\right]\begin{tabular}{|c|c|c|c|c|} \hline $n$ & $a$ & $b$ & $x$ & $f(x)$\\\hline $1$ & $0$ & $1$ & $=$ & $\pm ve$\\ \hline \end{tabular}$$ [with the propagation of] $$\left[n\ge \frac{log(|a-b|)-\log\epsilon}{\log2}:~\frac{\epsilon_{n+1}}{\epsilon_n}=\frac{1}{2}\right]$$ \item Chord\par [if a function $f(x)$ is] \begin{enumerate} \centering \item {[continuous on ($x_0$,$x_1$)]} \item {[as IVT on ($x_0$,$x_1$)]} \end{enumerate} [then the approximations to the root follow as:] $$\left[x =x_0-\frac{x_1-x_0}{f(x_1)-f(x_0)}f(x_0):~x_0=x|x_0,~x_1=x|x_1\right]\begin{tabular}{|c|c|c|c|c|c|c|} \hline $n$ & $x_0$ & $x_1$ & $f(x_0)$ & $f(x_1)$ & $x$ & f(x)\\\hline $1$ & $0$ & $1$ & $-$ & $-$ & $=$ & $\pm ve$\\ \hline \end{tabular}$$ \item Secant\par [if a function $f(x)$ is] \begin{enumerate} \centering \item {[continuous on ($x_0$,$x_1$)]} \item {[sufficenlty close to the root]} \end{enumerate} $$\left[x =x_1-\frac{x_1-x_0}{f(x_1)-f(x_0)}f(x_1):~x_0=x_1,~x_1=x\right]\begin{tabular}{|c|c|c|c|c|c|c|} \hline $n$ & $x_0$ & $x_1$ & $f(x_ 0)$ & $f(x_1)$ & $x$\\\hline $1$ & $0$ & $1$ & $=$ & $-$ & $-$\\ \hline \end{tabular}$$ \item Fixed Point\par [if a function $f(x)$ is] \begin{enumerate} \centering \item {[corresponds to fixation $\Phi(x)$]} \item {[$|\Phi'(x)|<1$ for all $x_0,\ldots$ in $I$]} \end{enumerate} [then the approximations to the root follow as:] $$\left[x=\Phi(x_0):~x_0=x\right]\begin{tabular}{|c|c|c|c|} \hline $n$ & $x_0$ & $\Phi(x_0)$ & $x$ \\\hline $1$ & $0$ & $-$ & $=$\\ \hline \end{tabular}$$ \item Newton Raphson's\par [if a function $f(x)$ is] \begin{enumerate} \centering \item {[sufficiently differentiable]} \item {[$|f(x)f''(x)|<|f'(x)|^2$ for all $x_0,\ldots$ in $I$]} \end{enumerate} [then the approximations to the root follow as:] $$\left[x=x_0-\frac{f(x_0)}{f'(x_0)}:~x_0=x\right]\begin{tabular}{|c|c|c|c|c|} \hline $n$ & $x_0$ & $f(x_0)$ & $f'(x_0)$ & $x$\\\hline $1$ & $0$ & $-$ & $-$ & $=$\\ \hline \end{tabular}$$ \end{enumerate} \section{Linear Systems} \begin{enumerate} \item Jacobi Siedal\par [if a linear system is] \begin{enumerate} \centering \item {[uniquely consistent]} \item {[strictly diagonal dominant]} \end{enumerate} [then the approximations to the solutions follow as:] $$\left[[x_i]=\frac{1}{a_{ii}} \left[b_i-\sum_{j=1:j\ne i}^{n}a_{ij}[x_j]_0\right]:~[x_i]_0=[x_i]\right]\begin{tabular}{|c|c|c|c|c|} \hline $n$ & $[x_i]_0$ & $[x_i]$\\\hline $1$ & $0$ & $=$\\ \hline \end{tabular}$$ $$\left[[x_i]=\frac{1}{a_{ii}} \left[b_i-\sum_{j=1}^{i-1}a_{ij}[x_j]-\sum_{j=i+10}^{n}a_{ij}[x_j]_0\right]:~[x_i]_0=[x_i]\right]\begin{tabular}{|c|c|c|c|c|} \hline $n$ & $[x_i]_0$ & $[x_i]$\\\hline $1$ & $0$ & $=$\\ \hline \end{tabular}$$ \item Power Method\par [then the approximations to the solutions follow as:] $$\left[[Z]=AX:~[X]= \frac{1}{\text{zmax}}[Z]\right]\begin{tabular}{|c|c|c|c|c|} \hline $n$ & $[X]$ & $[Z]$\\\hline $1$ & $0$ & $=$\\ \hline \end{tabular}$$ \end{enumerate} \section{ODE Solutions} \begin{enumerate} \item Euler's\par [then the approximations to the solutions follow as] $$\left[x=x_0+h,~y=hf(x_0,y_0):~x_0=x,~ y_0=y\right]\begin{tabular}{|c|c|c|c|c|c|} \hline $n$ & $x_0$ & $y_0$ & $f(x_0,y_0)$ & $x$ & $y$ \\\hline $1$ & $0$ & $0$ & $-$ & $-$ & $-$\\ \hline \end{tabular}$$ $$\left[y_\text{old}=y::~y_\text{new}=y_0+0.5h[f(x_0,y_0)+f(x_0+h,y_\text{old})]:~y_\text{old}=y_\text{new}::~y=y_\text{new}\right]$$ \item RungeKutta's\par [then the approximations to the solutions follow as] $$\left[k_1=hf(x_0,y_0),~k_2=f\left(x_0+\frac{h}{2},y_0+\frac{k_1}{2}\right),~k_3=hf\left(x_0+\frac{h}{2},y_0+\frac{k_2}{2}\right)\right]$$ $$\left[k_4=hf(x_0+h,y_0+k3),~x=x_0+h,~y=y_0+\frac{1}{6}[K_1+2K_2+2K_3+K_4]:~x_0=x,~ y_0=y\right]$$ $$\begin{tabular}{|c|c|c|c|c|c|c|c|c|} \hline $n$ & $x_0$ & $y_0$ & $k_1$ &$k_2$ &$ k_3$ & $ k_4$& $x$ & $y$ \\\hline $1$ & $0$ & $0$ & $-$& $-$ &$-$ & $-$ & $-$ & $-$\\ \hline \end{tabular}$$ \end{enumerate} \chapter{Logics} \begin{enumerate} \item Propositions $$\text{[a proposition is a sentence that declares a fact that is either true or false, but not both, on]}$$ $$\left[\neg p,~p\land q,~p\lor q,~p\oplus q, p\rightarrow q\right]$$ [generates following] $$\left[\neg q\rightarrow\neg p;~~q\rightarrow p:~\neg p\rightarrow \neg q\right]$$ \item Equivalency $$\left[\text{compound prepositions p and q are logically equivalent if p}\Longleftrightarrow\text{q is a tautology}\right]$$ \item Equivalencies Goes Here \item Functions $$\left[\text{predicate refers to the property that the subject variable can have}\right]$$ \end{enumerate} \par~\par~\par~\par $$\langle T_{f'},\phi\rangle=\langle T_f',\phi\rangle$$ \chapter{Neural Math} [Can visualize them as little circles] [derivative of loss ufnciton with respect to anything is how its sensitive to the loss function] [Loss function is for single example while error is expected value of loss function over all the examples] \section{Linear Model} Linear model (say input vector arranged rowwise in a matrix X and output labels as y, hence a way of thinking about a model is that it takes many X vector of examples in and throw out the y vector) Model: $$h(X) = X\vec{w}$$ Loss: $$L(\vec{w}, X, \vec{y}) = \frac{1}{2\text{row}(X\text{ or }y)}\sum[sq.(X\vec{w}-\vec{y})]$$ Gradient: $$\grad_wL(\vec{w}, X, \vec{y}) = \frac{1}{\text{row}(X\text{ or }y)} X^T(X\vec{w}-\vec{y})$$ Analytically: $$X^T(X\vec{w}-\vec{y})=0\rightarrow \vec{w}=(X^TX)^{-1}X^T\vec{y} = X^+\vec{y}$$ Gradient Descent: $$\vec{w}^\text{new} = \vec{w}^\text{old} - \eta X^{T}(X\vec{w}^\text{old}-\vec{y})$$ Do you see whats happening here? so the weight wi is updated by difference between predicted and actual dotted with inputs that the weight receives. Weighted is updated in such a way that the neurons it conencts are lit up \section{Logistic Model} Model: $$h(X) = \sigma(X\vec{w}) = \sigma(\vec{z}) = probablilty we are in class 1$$ Loss: $$L(\vec{w}, X, \vec{y}) = \frac{1}{\text{row}(X\text{ or }y)}\sum [(\vec{y})\circ(\lg\sigma\vec{z})+(1-\vec{y})\circ(1-\lg\sigma\vec{z})))]$$ Gradient: $$\grad_wL(\vec{w}, X, \vec{y}) = \frac{1}{\text{row}(X\text{ or }y)} X^T(\sigma\vec{z}-\vec{y})$$ Analytically: $$?$$ Gradient Descent: $$\vec{w}^\text{new} = \vec{w}^\text{old} - \eta X^{T}(X\vec{w}^\text{old}-\vec{y})$$ \section{Softmax} Model: outputs probability of being in each class so sums to 1; there will be n linears with own weights and the combine to output which class $$h_{ci}(X) = \frac{exp(X\vec{w})}{}$$ ... \section{Deep} Upto now the prinicple has been neurons that wire together fire together Model: now the weights of the layers are rowwise while the input vector features are column wise and resulting will be also column wise Model: $$\vec{a}^{[L]} = f^LW^Lf^{L-1}W^{L-1}\ldots f^1W^1\vec{a}^{[0]}$$ Loss: $$L(\vec{a}^{[L]}, \vec{y})$$ Gradient (from the graph see) $$\frac{dL(\vec{a}^{[L]}, \vec{y})}{d\vec{w}_i^{[l]}}=\frac{dL(\vec{a}^{[L]}, \vec{y})}{d{z}_i^{[l]}}\cdot \frac{d{z}_i^{[l]}}{d\vec{w}_i^{[l]}}$$ $${\partial_{\vec{w}_i^{[l]}}L}={\partial_{{z}_i^{[l]}}L}\cdot \vec{a}_T^{[l-1]}$$ $${\partial_{\vec{w}_i^{[l]}}L}={\partial_{{z}^{[l]}}L}\cdot \vec{a}_T^{[l-1]}$$ [looks like first one is the row vector while all others are the matrices, satisfies dimensionality] [is a matrix mulitpcliation so go from right to left [Note here derivative of z with earlier a is just the weight matrix in itself while derivative of a with z is somehow related to activation function with its elements on its diagonal] % $$d_{\vec{a}^0}L=d_{\vec{a}^{L}}L\cdot\sum(f^L)' \cdot \frac{d\vec{a}^{L}\cdot\frac{d\vec{z}^L}{d\vec{a}^{L-1}}\cdot\frac{d\vec{a}^{L-1}}{d\vec{z}^{L-1}}\cdot \ldots \cdot\frac{d\vec{z}^0}{d\vec{a}^{0}}$$ % Gradient Descent: % For each layer caclulate its error term like we did before: % $$\delta^{[l]}=\grad_{\vec{z}^{[l]}}L(\vec{a}^{[L]}, \vec{y})$$ % is probably a matrix$$ % for last layer: % $$\delta^{[L]}=\grad_{\vec{a}^{[L]}}L(a^{[L]}, \vec{y})\circ f'^{[L]}(z^{[L]})$$ \newpage \begin{enumerate} \item {Power Functions}$$\int x^ndx=\frac{x^{n+1}}{n+1};~\text{For } n =-1, \int \frac{1}{x}dx=\ln|x|$$ \item {Exponential and Logarithms} $$\int a^xdx=a^x\log_{a}e;~\int \log_{a}x=x\log_{a}\left(\frac{x}{e}\right)$$ \item {Trigonometric Functions} $$\int \sin xdx=-\cos x,~\int \cos xdx=\sin x,~\int \tan xdx=\ln|\sec x$$ $$\int \cot xdx=\ln|\sin x|$$ $$\int \cosec xdx=\ln|\cosec x-\cot x|=\ln\left|\tan\left(\frac{x}{2}\right)\right|$$ $$\int \sec xdx=\ln|\sec x+\tan x|=\ln\left|\tan \left(\frac{x}{2}+\frac{\pi}{4}\right)\right|$$ \item {Hyperbolic Functions} $$\int \sinh xdx=\cosh x,~\int \cosh xdx=\sinh x,~\int \tanh xdx=\ln|\cosh x|$$ $\int \coth xdx=\ln|\sinh x|,~\int \cosech xdx=\ln\left|\tanh \left(\frac{x}{2}\right)\right|,~\int \sech xdx=\arctan(\sinh x)$ \item {Some Standard Functions} \par $$\int \frac{1}{x^2+a^2}dx=\frac{1}{a}\arctan\frac{x}{a},~\int \frac{\pm 1}{x^2-a^2}dx=\frac{1}{a}\arccoth\frac{x}{a},~\frac{1}{a}\arctanh\frac{x}{a}$$ $$\int \frac{1}{\sqrt{a^2-x^2}}dx=\arcsin\frac{x}{a},~\int \frac{1}{\sqrt{x^2\pm a^2}}dx=\arcsinh\frac{x}{a},~\arccosh\frac{x}{a}$$ $$\int \sqrt{a^2-x^2}dx=\frac{x}{2}\sqrt{a^2-x^2}+\frac{a^2}{2}\arcsin\frac{x}{a}$$ $$\int \sqrt{x^2\pm a^2}dx=\frac{x}{2}\sqrt{x^2\pm a^2}\pm\frac{a^2}{2}\left[\arcsinh\frac{x}{a},~\arccosh\frac{x}{a}\right]$$ $$\int e^{ax}\sin bxdx=\frac{1}{a^2+b^2}\left[\sin bx\frac{d}{dx}e^{ax}-e^{ax}\frac{d}{dx}\sin bx\right]$$ $$\int e^{ax}\cos bxdx=\frac{1}{a^2+b^2}\left[\cos bx\frac{d}{dx}e^{ax}-e^{ax}\frac{d}{dx}\cos bx\right]$$ \item {Inverse of Continuous Functions}$$\int f^{-1}(x)dx =xf^{-1}(x)-\left.\int f(u)du\right|_{u=f^{-1}(x)}$$ \end{enumerate} \newpage \begin{enumerate} \item {Mirror Properties}\par \begin{enumerate} \item {King Rule} $$: \left[\int_{a}^{b}f(x)dx=\int_{a}^{b}f(a+b-x)dx\right]$$ \item {Jack Rule}$$:\int_{-a}^{a}f(x)dx=\int_{0}^{a}(f(x)+f(-x))dx=\begin{cases}2\int_{0}^{a}f(x)dx & \text{if } f(-x)=f(x)\\0& \text{if } f(-x)=-f(x)\end{cases}$$ \end{enumerate} \item {Beta and Gamma Functions}\par $$\beta(m,n)=\int_{0}^{1}x^{m-1}(1-x)^{n-1}dx;~~m,n>0$$ $$\Gamma (n)=\int_{0}^{\infty}e^{-x}x^{n-1}dx;~~n>0$$ \begin{enumerate} \item ~$\beta (m,n)=\beta (n,m),~~~~\beta (m,n)=\frac{\Gamma (m)\Gamma (n)}{\Gamma (m+n)}$ \item $\Gamma (n+1)=n\Gamma (n) \text{ for n is positive integer}$,~~~~$\Gamma (1)=1,~~\Gamma \left(\frac{1}{2}\right)=\sqrt\pi$ \item Euler's Reflection: $\Gamma (m)\Gamma (1-m)=\frac{\pi}{\sin m\pi},~~~0<m<1$ \item Gamma Sinusoidal: $\int_0^{\frac{\pi}{2}}\sin ^px\cos ^qxdx=\frac{\Gamma \left(\frac{p+1}{2}\right)\Gamma \left(\frac{q+1}{2}\right)}{2\Gamma \left(\frac{p+q+2}{2}\right)}~~p,q>-1$ \end{enumerate} \item{Leibniz Rule (Feynman's rule)*}\par $$\left[\frac{d}{dt}\int_{a}^{b}f(x,t)dx=\int_{a}^{b}\frac{\partial}{\partial t}f(x,t)dx\right]$$ [*: You might want to look into the underlying assumption about nature of $f(x,t)$] \end{enumerate} \chapter{Optimal filters} Refers to a filter that has been designed to achieve a specific objective while minimizing certain criteria, such as mean square error or minimax or distortion. In the context of digital signal processing, an optimum filter is often designed to enhance or extract certain features from a signal while minimizing noise or interference. Design algorithms have been developed in which some of the parameters $M$, $\delta_1$, $\delta_2$, $\omega_p$, and $\omega_s$ are fixed and an iterative procedure is used to obtain optimum adjustments of the remaining parameters. Convenient to consider the design of zero phase filter i.e one for which $$h[n]=h[-n]$$ Corresponding frequency response is $$A(e^{j\omega}) = \sum_{n=-L}^L h[n]e^{-j\omega n}$$ where $L=\frac{1}{2}M$, Can be written as a polynomial in $\cos\omega$ (why?) $P(x)$ where $x=\cos\omega$ Let us define an approximation error function $$E(e^{j\omega}) = W(e^{j\omega})[H(e^{j\omega})-A(e^{j\omega})]$$ where $W(e^{j\omega})$ is weighting funciton that incorporates design parameters into the design process and $H(e^{j\omega})$ is the desired response. $$W(e^{j\omega})=\left\{\begin{array}{c c} K & 0 \le \omega \le \omega_p \\ 1 & \omega_s \le \omega \le \pi \end{array}\right.$$ $$H(e^{j\omega})=\left\{\begin{array}{c c} 1 & 0 \le \omega \le \omega_p \\ 0 & \omega_s \le \omega \le \pi \end{array}\right.$$ The alternation theorem states the our polynomial will correspond to the filter representing the unique best approximation of the ideal lowpass filter, with the ratio $\delta_1/\delta_2$ fixed at $K$ and with passband and stopband edges $\omega_p$ and $\omega_s$ if and only if $E(cos\omega)$ exhibits at least $(L+2)$ alternations i.e., if and only if $E(cos\omega)$ alternately equals plus and minus its maximum value at least $(L + 2)$ times. $$W(\omega_i)[H(e^{j\omega_i}) - A(e^{j\omega_i})] = (-1)^{i+1}\delta,~~i = 1,2,\ldots,(L+2)$$ where $\delta$ is the optimum error. % In matrix form, % $$\begin{array}{c c c c c} % 1 & x_1 & x_1^2 & \ldots & x_1^L & \frac{1}{W(\omega_i)} \\ % & % \end{array}$$ For given set of external frequencies, the optimum error is given by $$\delta=\frac{\sum_{k=1}^{L+2} b_kH(e^{j\omega_k})}{\sum_{k=1}^{L+2} b_k\frac{(-1)^{k+1}}{W(\omega_k)}},~~b_k=\prod_{i=1, i\ne k}^{L+2} \frac{1}{x_k-x_i}$$ Interpolation formula to obtain our approximation, $$A(e^{j\omega}) = \frac{\sum_{k=1}^{L+1}\frac{d_k}{x-x_k}C_k}{\sum_{k=1}^{L+1}\frac{d_k}{x-x_k}},~~C_k=H(e^{j\omega_k})-\frac{(-1)^{k+1}\delta}{W(\omega_k)}~~d_k=\prod_{i=1, i\ne k}^{L+1} \frac{1}{x_k-x_i}$$ If $|E(\omega)| \le \delta$ for all $\omega$ in the passband and stopband, then the optimum approximation has been found. Adopting the philosophy of the Remez exchange method, the extrenal frequencies are exchanged for a completely new set defined by the $(L + 2)$ largest peaks of the error curve. \begin{figure}[H] \centering \includegraphics[width=0.6\textwidth]{Screenshot from 2023-09-01 12-49-29.png} \label{fig:enter-label} \end{figure} \subsection{Comparison of IIR and FIR filters} \begin{itemize} \item IIR have the advantage that a variety of frequency-selective fitlers can be designed using closed-form design formulas, and the coefficients of the discrete time filter can be obtained by straightforward substitution into a set of design equations. \item IIR methods are limited to frequency-selective filters, and permit only the magnitude response to be specified, if other shapes are necessary to approximate prescribed phase -or group delay, an algorithmic procedure will be required. \item FIR filters can have a precisely generalized linear phase. \item The closed-form design equations do not exist for FIR filters. \end{itemize} \end{document}