mirror of
https://github.com/Andreaierardi/Master-DataScience-Notes.git
synced 2025-01-26 19:27:37 +01:00
105 lines
7.2 KiB
TeX
105 lines
7.2 KiB
TeX
\babel@toc {english}{}
|
||
\contentsline {chapter}{\numberline {1}Lecture 1 - 09-03-2020}{8}%
|
||
\contentsline {section}{\numberline {1.1}Introduction of the course}{8}%
|
||
\contentsline {section}{\numberline {1.2}Examples}{8}%
|
||
\contentsline {subsection}{\numberline {1.2.1}Spam filtering}{11}%
|
||
\contentsline {chapter}{\numberline {2}Lecture 2 - 10-03-2020}{12}%
|
||
\contentsline {section}{\numberline {2.1}Argomento}{12}%
|
||
\contentsline {section}{\numberline {2.2}Loss}{12}%
|
||
\contentsline {subsection}{\numberline {2.2.1}Absolute Loss}{12}%
|
||
\contentsline {subsection}{\numberline {2.2.2}Square Loss}{13}%
|
||
\contentsline {subsection}{\numberline {2.2.3}Example of information of square loss}{14}%
|
||
\contentsline {subsection}{\numberline {2.2.4}labels and losses}{15}%
|
||
\contentsline {subsection}{\numberline {2.2.5}Example TF(idf) documents encoding}{17}%
|
||
\contentsline {chapter}{\numberline {3}Lecture 3 - 16-03-2020}{19}%
|
||
\contentsline {section}{\numberline {3.1}Overfitting}{21}%
|
||
\contentsline {subsection}{\numberline {3.1.1}Noise in the data}{21}%
|
||
\contentsline {section}{\numberline {3.2}Underfitting}{23}%
|
||
\contentsline {section}{\numberline {3.3}Nearest neighbour}{23}%
|
||
\contentsline {chapter}{\numberline {4}Lecture 4 - 17-03-2020}{26}%
|
||
\contentsline {section}{\numberline {4.1}Computing $h_{NN}$}{26}%
|
||
\contentsline {section}{\numberline {4.2}Tree Predictor}{28}%
|
||
\contentsline {chapter}{\numberline {5}Lecture 5 - 23-03-2020}{32}%
|
||
\contentsline {section}{\numberline {5.1}Tree Classifier}{32}%
|
||
\contentsline {section}{\numberline {5.2}Jensen’s inequality}{34}%
|
||
\contentsline {section}{\numberline {5.3}Tree Predictor}{38}%
|
||
\contentsline {section}{\numberline {5.4}Statistical model for Machine Learning}{39}%
|
||
\contentsline {chapter}{\numberline {6}Lecture 6 - 24-03-2020}{41}%
|
||
\contentsline {section}{\numberline {6.1}Bayes Optimal Predictor}{41}%
|
||
\contentsline {subsection}{\numberline {6.1.1}Square Loss}{42}%
|
||
\contentsline {subsection}{\numberline {6.1.2}Zero-one loss for binary classification}{43}%
|
||
\contentsline {section}{\numberline {6.2}Bayes Risk}{46}%
|
||
\contentsline {chapter}{\numberline {7}Lecture 7 - 30-03-2020}{48}%
|
||
\contentsline {section}{\numberline {7.1}Chernoff-Hoffding bound}{48}%
|
||
\contentsline {section}{\numberline {7.2}Union Bound}{49}%
|
||
\contentsline {section}{\numberline {7.3}Studying overfitting of a ERM}{53}%
|
||
\contentsline {chapter}{\numberline {8}Lecture 8 - 31-03-2020}{55}%
|
||
\contentsline {section}{\numberline {8.1}The problem of estimating risk in practise}{56}%
|
||
\contentsline {section}{\numberline {8.2}Cross-validation}{58}%
|
||
\contentsline {section}{\numberline {8.3}Nested cross validation}{60}%
|
||
\contentsline {chapter}{\numberline {9}Lecture 9 - 06-04-2020}{61}%
|
||
\contentsline {section}{\numberline {9.1}Tree predictors}{61}%
|
||
\contentsline {subsection}{\numberline {9.1.1}Catalan Number}{63}%
|
||
\contentsline {chapter}{\numberline {10}Lecture 10 - 07-04-2020}{67}%
|
||
\contentsline {section}{\numberline {10.1}TO BE DEFINE}{67}%
|
||
\contentsline {section}{\numberline {10.2}MANCANO 20 MINUTI DI LEZIONE}{67}%
|
||
\contentsline {section}{\numberline {10.3}Compare risk for zero-one loss}{69}%
|
||
\contentsline {chapter}{\numberline {11}Lecture 11 - 20-04-2020}{71}%
|
||
\contentsline {section}{\numberline {11.1}Analysis of $K_{NN}$}{71}%
|
||
\contentsline {subsection}{\numberline {11.1.1}Study of $K_{NN}$}{74}%
|
||
\contentsline {subsection}{\numberline {11.1.2}study of trees}{75}%
|
||
\contentsline {section}{\numberline {11.2}Non-parametric Algorithms}{76}%
|
||
\contentsline {subsection}{\numberline {11.2.1}Example of parametric algorithms}{77}%
|
||
\contentsline {chapter}{\numberline {12}Lecture 12 - 21-04-2020}{78}%
|
||
\contentsline {section}{\numberline {12.1}Non parametrics algorithms}{78}%
|
||
\contentsline {subsection}{\numberline {12.1.1}Theorem: No free lunch}{78}%
|
||
\contentsline {section}{\numberline {12.2}Highly Parametric Learning Algorithm}{80}%
|
||
\contentsline {subsection}{\numberline {12.2.1}Linear Predictors}{80}%
|
||
\contentsline {subsection}{\numberline {12.2.2}MinDisagreement}{84}%
|
||
\contentsline {chapter}{\numberline {13}Lecture 13 - 27-04-2020}{85}%
|
||
\contentsline {section}{\numberline {13.1}Linear prediction}{85}%
|
||
\contentsline {subsection}{\numberline {13.1.1}MinDisOpt}{85}%
|
||
\contentsline {section}{\numberline {13.2}The Perception Algorithm}{88}%
|
||
\contentsline {subsection}{\numberline {13.2.1}Perception convergence Theorem}{89}%
|
||
\contentsline {chapter}{\numberline {14}Lecture 14 - 28-04-2020}{92}%
|
||
\contentsline {section}{\numberline {14.1}Linear Regression}{92}%
|
||
\contentsline {subsection}{\numberline {14.1.1}The problem of linear regression}{92}%
|
||
\contentsline {subsection}{\numberline {14.1.2}Ridge regression}{93}%
|
||
\contentsline {section}{\numberline {14.2}Percetron}{94}%
|
||
\contentsline {subsection}{\numberline {14.2.1}Online Learning }{95}%
|
||
\contentsline {subsection}{\numberline {14.2.2}Online Gradiant Descent (OGD)}{97}%
|
||
\contentsline {chapter}{\numberline {15}Lecture 15 - 04-05-2020}{98}%
|
||
\contentsline {section}{\numberline {15.1}Regret analysis of OGD}{98}%
|
||
\contentsline {subsection}{\numberline {15.1.1}Projected OGD}{99}%
|
||
\contentsline {chapter}{\numberline {16}Lecture 16 - 05-05-2020}{103}%
|
||
\contentsline {section}{\numberline {16.1}Analysis of Perceptron in the non-separable case using OGD framework.}{103}%
|
||
\contentsline {subsection}{\numberline {16.1.1}Strongly convex loss functions}{107}%
|
||
\contentsline {chapter}{\numberline {17}Lecture 17 - 11-05-2020}{109}%
|
||
\contentsline {section}{\numberline {17.1}Strongly convex loss functions}{109}%
|
||
\contentsline {subsection}{\numberline {17.1.1}OGD for Strongly Convex losses}{109}%
|
||
\contentsline {subsection}{\numberline {17.1.2}Relate sequential risk and statistical risk}{110}%
|
||
\contentsline {chapter}{\numberline {18}Lecture 18 - 12-05-2020}{113}%
|
||
\contentsline {section}{\numberline {18.1}Kernel functions}{113}%
|
||
\contentsline {subsection}{\numberline {18.1.1}Feature expansion}{113}%
|
||
\contentsline {subsection}{\numberline {18.1.2}Kernels implements feature expansion (Efficiently}{114}%
|
||
\contentsline {section}{\numberline {18.2}Gaussian Kernel}{115}%
|
||
\contentsline {chapter}{\numberline {19}Lecture 19 - 18-05-2020}{118}%
|
||
\contentsline {section}{\numberline {19.1}Support Vector Machine (SVM)}{121}%
|
||
\contentsline {chapter}{\numberline {20}Lecture 20 - 19-05-2020}{122}%
|
||
\contentsline {section}{\numberline {20.1}Support Vector Machine Analysis}{122}%
|
||
\contentsline {subsection}{\numberline {20.1.1}Fritz John Optimality Conditions}{122}%
|
||
\contentsline {subsection}{\numberline {20.1.2}Non-separable case}{123}%
|
||
\contentsline {section}{\numberline {20.2}Pegasos: OGD to solve SVM}{125}%
|
||
\contentsline {chapter}{\numberline {21}Lecture 21 - 25-05-2020}{127}%
|
||
\contentsline {section}{\numberline {21.1}Pegasos in Kernel space}{127}%
|
||
\contentsline {section}{\numberline {21.2}Stability}{127}%
|
||
\contentsline {chapter}{\numberline {22}Lecture 22 - 26-05-2020}{132}%
|
||
\contentsline {section}{\numberline {22.1}Continous of Pegasos}{132}%
|
||
\contentsline {section}{\numberline {22.2}Boosting and ensemble predictors }{133}%
|
||
\contentsline {subsection}{\numberline {22.2.1}Bagging}{135}%
|
||
\contentsline {subsection}{\numberline {22.2.2}Random Forest}{135}%
|
||
\contentsline {chapter}{\numberline {23}Lecture 23 - 08-06-2020}{137}%
|
||
\contentsline {section}{\numberline {23.1}Boosting}{137}%
|
||
\contentsline {section}{\numberline {23.2}Adaboost}{140}%
|
||
\contentsline {chapter}{\numberline {24}Lecture 24 - 09-06-2020}{142}%
|