mirror of
https://github.com/Andreaierardi/Master-DataScience-Notes.git
synced 2024-11-30 23:52:57 +01:00
84 lines
7.2 KiB
TeX
84 lines
7.2 KiB
TeX
\relax
|
||
\@nameuse{bbl@beforestart}
|
||
\babel@aux{english}{}
|
||
\@writefile{toc}{\contentsline {chapter}{\numberline {1}Lecture 1 - 09-03-2020}{4}\protected@file@percent }
|
||
\@writefile{lof}{\addvspace {10\p@ }}
|
||
\@writefile{lot}{\addvspace {10\p@ }}
|
||
\@writefile{toc}{\contentsline {section}{\numberline {1.1}Introduction of the course}{4}\protected@file@percent }
|
||
\@writefile{toc}{\contentsline {section}{\numberline {1.2}Examples}{4}\protected@file@percent }
|
||
\@writefile{toc}{\contentsline {subsection}{\numberline {1.2.1}Spam filtering}{7}\protected@file@percent }
|
||
\@writefile{toc}{\contentsline {chapter}{\numberline {2}Lecture 2 - 07-04-2020}{8}\protected@file@percent }
|
||
\@writefile{lof}{\addvspace {10\p@ }}
|
||
\@writefile{lot}{\addvspace {10\p@ }}
|
||
\@writefile{toc}{\contentsline {section}{\numberline {2.1}Argomento}{8}\protected@file@percent }
|
||
\@writefile{toc}{\contentsline {section}{\numberline {2.2}Loss}{8}\protected@file@percent }
|
||
\@writefile{toc}{\contentsline {subsection}{\numberline {2.2.1}Absolute Loss}{8}\protected@file@percent }
|
||
\@writefile{toc}{\contentsline {subsection}{\numberline {2.2.2}Square Loss}{9}\protected@file@percent }
|
||
\@writefile{toc}{\contentsline {subsection}{\numberline {2.2.3}Example of information of square loss}{9}\protected@file@percent }
|
||
\@writefile{toc}{\contentsline {subsection}{\numberline {2.2.4}labels and losses}{10}\protected@file@percent }
|
||
\@writefile{toc}{\contentsline {subsection}{\numberline {2.2.5}Example TF(idf) documents encoding}{12}\protected@file@percent }
|
||
\@writefile{toc}{\contentsline {chapter}{\numberline {3}Lecture 3 - 07-04-2020}{14}\protected@file@percent }
|
||
\@writefile{lof}{\addvspace {10\p@ }}
|
||
\@writefile{lot}{\addvspace {10\p@ }}
|
||
\@writefile{toc}{\contentsline {section}{\numberline {3.1}Overfitting}{16}\protected@file@percent }
|
||
\@writefile{toc}{\contentsline {subsection}{\numberline {3.1.1}Noise in the data}{16}\protected@file@percent }
|
||
\@writefile{toc}{\contentsline {section}{\numberline {3.2}Underfitting}{17}\protected@file@percent }
|
||
\@writefile{toc}{\contentsline {section}{\numberline {3.3}Nearest neighbour}{18}\protected@file@percent }
|
||
\@writefile{toc}{\contentsline {chapter}{\numberline {4}Lecture 4 - 07-04-2020}{20}\protected@file@percent }
|
||
\@writefile{lof}{\addvspace {10\p@ }}
|
||
\@writefile{lot}{\addvspace {10\p@ }}
|
||
\@writefile{toc}{\contentsline {section}{\numberline {4.1}Computing $h_{NN}$}{20}\protected@file@percent }
|
||
\@writefile{toc}{\contentsline {section}{\numberline {4.2}Tree Predictor}{21}\protected@file@percent }
|
||
\@writefile{toc}{\contentsline {chapter}{\numberline {5}Lecture 5 - 07-04-2020}{24}\protected@file@percent }
|
||
\@writefile{lof}{\addvspace {10\p@ }}
|
||
\@writefile{lot}{\addvspace {10\p@ }}
|
||
\@writefile{toc}{\contentsline {section}{\numberline {5.1}Tree Classifier}{24}\protected@file@percent }
|
||
\@writefile{toc}{\contentsline {section}{\numberline {5.2}Jensen’s inequality}{25}\protected@file@percent }
|
||
\@writefile{toc}{\contentsline {section}{\numberline {5.3}Tree Predictor}{27}\protected@file@percent }
|
||
\@writefile{toc}{\contentsline {section}{\numberline {5.4}Statistical model for Machine Learning}{28}\protected@file@percent }
|
||
\@writefile{toc}{\contentsline {chapter}{\numberline {6}Lecture 6 - 07-04-2020}{30}\protected@file@percent }
|
||
\@writefile{lof}{\addvspace {10\p@ }}
|
||
\@writefile{lot}{\addvspace {10\p@ }}
|
||
\@writefile{toc}{\contentsline {section}{\numberline {6.1}Bayes Optimal Predictor}{30}\protected@file@percent }
|
||
\@writefile{toc}{\contentsline {subsection}{\numberline {6.1.1}Square Loss}{31}\protected@file@percent }
|
||
\@writefile{toc}{\contentsline {subsection}{\numberline {6.1.2}Zero-one loss for binary classification}{32}\protected@file@percent }
|
||
\@writefile{toc}{\contentsline {section}{\numberline {6.2}Bayes Risk}{34}\protected@file@percent }
|
||
\@writefile{lof}{\contentsline {figure}{\numberline {6.1}{\ignorespaces Example of Bayes Risk}}{34}\protected@file@percent }
|
||
\@writefile{toc}{\contentsline {chapter}{\numberline {7}Lecture 7 - 07-04-2020}{35}\protected@file@percent }
|
||
\@writefile{lof}{\addvspace {10\p@ }}
|
||
\@writefile{lot}{\addvspace {10\p@ }}
|
||
\@writefile{toc}{\contentsline {section}{\numberline {7.1}Chernoff-Hoffding bound}{35}\protected@file@percent }
|
||
\@writefile{toc}{\contentsline {section}{\numberline {7.2}Union Bound}{36}\protected@file@percent }
|
||
\@writefile{lof}{\contentsline {figure}{\numberline {7.1}{\ignorespaces Example}}{36}\protected@file@percent }
|
||
\@writefile{lof}{\contentsline {figure}{\numberline {7.2}{\ignorespaces Example}}{37}\protected@file@percent }
|
||
\@writefile{lof}{\contentsline {figure}{\numberline {7.3}{\ignorespaces Example}}{37}\protected@file@percent }
|
||
\@writefile{lof}{\contentsline {figure}{\numberline {7.4}{\ignorespaces Example}}{38}\protected@file@percent }
|
||
\@writefile{lof}{\contentsline {figure}{\numberline {7.5}{\ignorespaces Example}}{38}\protected@file@percent }
|
||
\@writefile{lof}{\contentsline {figure}{\numberline {7.6}{\ignorespaces Draw of how $\hat {h}$, $h^*$ and $f^*$ are represented}}{39}\protected@file@percent }
|
||
\@writefile{toc}{\contentsline {section}{\numberline {7.3}Studying overfitting of a ERM}{40}\protected@file@percent }
|
||
\@writefile{toc}{\contentsline {chapter}{\numberline {8}Lecture 8 - 07-04-2020}{42}\protected@file@percent }
|
||
\@writefile{lof}{\addvspace {10\p@ }}
|
||
\@writefile{lot}{\addvspace {10\p@ }}
|
||
\@writefile{lof}{\contentsline {figure}{\numberline {8.1}{\ignorespaces Representation of $\hat {h}$, $h^*$ and $f^*$ }}{42}\protected@file@percent }
|
||
\@writefile{lof}{\contentsline {figure}{\numberline {8.2}{\ignorespaces Example}}{43}\protected@file@percent }
|
||
\@writefile{toc}{\contentsline {section}{\numberline {8.1}The problem of estimating risk in practise}{43}\protected@file@percent }
|
||
\@writefile{toc}{\contentsline {section}{\numberline {8.2}Cross-validation}{45}\protected@file@percent }
|
||
\@writefile{lof}{\contentsline {figure}{\numberline {8.3}{\ignorespaces Splitting test and training set}}{45}\protected@file@percent }
|
||
\@writefile{lof}{\contentsline {figure}{\numberline {8.4}{\ignorespaces K-folds}}{46}\protected@file@percent }
|
||
\@writefile{toc}{\contentsline {section}{\numberline {8.3}Nested cross validation}{47}\protected@file@percent }
|
||
\@writefile{lof}{\contentsline {figure}{\numberline {8.5}{\ignorespaces Nested Cross Validation}}{47}\protected@file@percent }
|
||
\@writefile{toc}{\contentsline {chapter}{\numberline {9}Lecture 9 - 07-04-2020}{48}\protected@file@percent }
|
||
\@writefile{lof}{\addvspace {10\p@ }}
|
||
\@writefile{lot}{\addvspace {10\p@ }}
|
||
\@writefile{toc}{\contentsline {section}{\numberline {9.1}Tree predictors}{48}\protected@file@percent }
|
||
\@writefile{lof}{\contentsline {figure}{\numberline {9.1}{\ignorespaces Tree building}}{48}\protected@file@percent }
|
||
\@writefile{lof}{\contentsline {figure}{\numberline {9.2}{\ignorespaces Tree with at most N node}}{49}\protected@file@percent }
|
||
\@writefile{toc}{\contentsline {subsection}{\numberline {9.1.1}Catalan Number}{50}\protected@file@percent }
|
||
\@writefile{lof}{\contentsline {figure}{\numberline {9.3}{\ignorespaces Algorithm for tree predictors}}{52}\protected@file@percent }
|
||
\@writefile{toc}{\contentsline {chapter}{\numberline {10}Lecture 10 - 07-04-2020}{54}\protected@file@percent }
|
||
\@writefile{lof}{\addvspace {10\p@ }}
|
||
\@writefile{lot}{\addvspace {10\p@ }}
|
||
\@writefile{toc}{\contentsline {section}{\numberline {10.1}TO BE DEFINE}{54}\protected@file@percent }
|
||
\bibstyle{abbrv}
|
||
\bibdata{main}
|