hw1: initial commit
This commit is contained in:
commit
33ac82ba3e
11 changed files with 880 additions and 0 deletions
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
422
.gitignore
vendored
Normal file
422
.gitignore
vendored
Normal file
|
@ -0,0 +1,422 @@
|
|||
.idea
|
||||
*.DS_Store*
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
|
||||
# LaTeX
|
||||
|
||||
## Core latex/pdflatex auxiliary files:
|
||||
*.aux
|
||||
*.lof
|
||||
*.log
|
||||
*.lot
|
||||
*.fls
|
||||
*.out
|
||||
*.toc
|
||||
*.fmt
|
||||
*.fot
|
||||
*.cb
|
||||
*.cb2
|
||||
.*.lb
|
||||
|
||||
## Intermediate documents:
|
||||
*.dvi
|
||||
*.xdv
|
||||
*-converted-to.*
|
||||
|
||||
## Bibliography auxiliary files (bibtex/biblatex/biber):
|
||||
*.bbl
|
||||
*.bcf
|
||||
*.blg
|
||||
*-blx.aux
|
||||
*-blx.bib
|
||||
*.run.xml
|
||||
|
||||
## Build tool auxiliary files:
|
||||
*.fdb_latexmk
|
||||
*.synctex
|
||||
*.synctex(busy)
|
||||
*.synctex.gz
|
||||
*.synctex.gz(busy)
|
||||
*.pdfsync
|
||||
|
||||
## Build tool directories for auxiliary files
|
||||
# latexrun
|
||||
latex.out/
|
||||
|
||||
## Auxiliary and intermediate files from other packages:
|
||||
# algorithms
|
||||
*.alg
|
||||
*.loa
|
||||
|
||||
# achemso
|
||||
acs-*.bib
|
||||
|
||||
# amsthm
|
||||
*.thm
|
||||
|
||||
# beamer
|
||||
*.nav
|
||||
*.pre
|
||||
*.snm
|
||||
*.vrb
|
||||
|
||||
# changes
|
||||
*.soc
|
||||
|
||||
# comment
|
||||
*.cut
|
||||
|
||||
# cprotect
|
||||
*.cpt
|
||||
|
||||
# elsarticle (documentclass of Elsevier journals)
|
||||
*.spl
|
||||
|
||||
# endnotes
|
||||
*.ent
|
||||
|
||||
# fixme
|
||||
*.lox
|
||||
|
||||
# feynmf/feynmp
|
||||
*.mf
|
||||
*.mp
|
||||
*.t[1-9]
|
||||
*.t[1-9][0-9]
|
||||
*.tfm
|
||||
|
||||
#(r)(e)ledmac/(r)(e)ledpar
|
||||
*.end
|
||||
*.?end
|
||||
*.[1-9]
|
||||
*.[1-9][0-9]
|
||||
*.[1-9][0-9][0-9]
|
||||
*.[1-9]R
|
||||
*.[1-9][0-9]R
|
||||
*.[1-9][0-9][0-9]R
|
||||
*.eledsec[1-9]
|
||||
*.eledsec[1-9]R
|
||||
*.eledsec[1-9][0-9]
|
||||
*.eledsec[1-9][0-9]R
|
||||
*.eledsec[1-9][0-9][0-9]
|
||||
*.eledsec[1-9][0-9][0-9]R
|
||||
|
||||
# glossaries
|
||||
*.acn
|
||||
*.acr
|
||||
*.glg
|
||||
*.glo
|
||||
*.gls
|
||||
*.glsdefs
|
||||
*.lzo
|
||||
*.lzs
|
||||
|
||||
# uncomment this for glossaries-extra (will ignore makeindex's style files!)
|
||||
# *.ist
|
||||
|
||||
# gnuplottex
|
||||
*-gnuplottex-*
|
||||
|
||||
# gregoriotex
|
||||
*.gaux
|
||||
*.gtex
|
||||
|
||||
# htlatex
|
||||
*.4ct
|
||||
*.4tc
|
||||
*.idv
|
||||
*.lg
|
||||
*.trc
|
||||
*.xref
|
||||
|
||||
# hyperref
|
||||
*.brf
|
||||
|
||||
# knitr
|
||||
*-concordance.tex
|
||||
# TODO Uncomment the next line if you use knitr and want to ignore its generated tikz files
|
||||
# *.tikz
|
||||
*-tikzDictionary
|
||||
|
||||
# listings
|
||||
*.lol
|
||||
|
||||
# luatexja-ruby
|
||||
*.ltjruby
|
||||
|
||||
# makeidx
|
||||
*.idx
|
||||
*.ilg
|
||||
*.ind
|
||||
|
||||
# minitoc
|
||||
*.maf
|
||||
*.mlf
|
||||
*.mlt
|
||||
*.mtc[0-9]*
|
||||
*.slf[0-9]*
|
||||
*.slt[0-9]*
|
||||
*.stc[0-9]*
|
||||
|
||||
# minted
|
||||
_minted*
|
||||
*.pyg
|
||||
|
||||
# morewrites
|
||||
*.mw
|
||||
|
||||
# nomencl
|
||||
*.nlg
|
||||
*.nlo
|
||||
*.nls
|
||||
|
||||
# pax
|
||||
*.pax
|
||||
|
||||
# pdfpcnotes
|
||||
*.pdfpc
|
||||
|
||||
# sagetex
|
||||
*.sagetex.sage
|
||||
*.sagetex.py
|
||||
*.sagetex.scmd
|
||||
|
||||
# scrwfile
|
||||
*.wrt
|
||||
|
||||
# sympy
|
||||
*.sout
|
||||
*.sympy
|
||||
sympy-plots-for-*.tex/
|
||||
|
||||
# pdfcomment
|
||||
*.upa
|
||||
*.upb
|
||||
|
||||
# pythontex
|
||||
*.pytxcode
|
||||
pythontex-files-*/
|
||||
|
||||
# tcolorbox
|
||||
*.listing
|
||||
|
||||
# thmtools
|
||||
*.loe
|
||||
|
||||
# TikZ & PGF
|
||||
*.dpth
|
||||
*.md5
|
||||
*.auxlock
|
||||
|
||||
# todonotes
|
||||
*.tdo
|
||||
|
||||
# vhistory
|
||||
*.hst
|
||||
*.ver
|
||||
|
||||
# easy-todo
|
||||
*.lod
|
||||
|
||||
# xcolor
|
||||
*.xcp
|
||||
|
||||
# xmpincl
|
||||
*.xmpi
|
||||
|
||||
# xindy
|
||||
*.xdy
|
||||
|
||||
# xypic precompiled matrices and outlines
|
||||
*.xyc
|
||||
*.xyd
|
||||
|
||||
# endfloat
|
||||
*.ttt
|
||||
*.fff
|
||||
|
||||
# Latexian
|
||||
TSWLatexianTemp*
|
||||
|
||||
## Editors:
|
||||
# WinEdt
|
||||
*.bak
|
||||
*.sav
|
||||
|
||||
# Texpad
|
||||
.texpadtmp
|
||||
|
||||
# LyX
|
||||
*.lyx~
|
||||
|
||||
# Kile
|
||||
*.backup
|
||||
|
||||
# gummi
|
||||
.*.swp
|
||||
|
||||
# KBibTeX
|
||||
*~[0-9]*
|
||||
|
||||
# TeXnicCenter
|
||||
*.tps
|
||||
|
||||
# auto folder when using emacs and auctex
|
||||
./auto/*
|
||||
*.el
|
||||
|
||||
# expex forward references with \gathertags
|
||||
*-tags.tex
|
||||
|
||||
# standalone packages
|
||||
*.sta
|
||||
|
||||
# Makeindex log files
|
||||
*.lpz
|
||||
|
||||
# xwatermark package
|
||||
*.xwm
|
||||
|
||||
# REVTeX puts footnotes in the bibliography by default, unless the nofootinbib
|
||||
# option is specified. Footnotes are the stored in a file with suffix Notes.bib.
|
||||
# Uncomment the next line to have this generated file ignored.
|
||||
#*Notes.bib
|
167
assignment_1/README.md
Normal file
167
assignment_1/README.md
Normal file
|
@ -0,0 +1,167 @@
|
|||
# Assignment 1
|
||||
|
||||
The assignment is split into two parts: you are asked to solve a regression problem, and answer some questions.
|
||||
|
||||
You can use all the books, material, and help you need.
|
||||
Bear in mind that the questions you are asked are similar to those you may find in the final exam, and are related to very important and fundamental machine learning concepts. As such, sooner or later you will need to learn them to pass the course.
|
||||
We will give you some feedback afterwards.
|
||||
|
||||
## Tasks
|
||||
You have to solve a regression problem. You are given a set of data consisting of input-output pairs `(x, y)`, and you have to build a model to fit this data. We will then evaluate the performance of your model on a **different test set**.
|
||||
|
||||
|
||||
In order to complete the assignment, you have to address the tasks listed below and submit your solution as a zip file on the iCorsi platform. More details are in the Instructions section below.
|
||||
|
||||
|
||||
### T1.
|
||||
Use the family of models `f(x, theta) = theta_0 + theta_1 * x_1 + theta_2 * x_2 + theta_3 * x_1 * x_2 + theta_4 * sin(x_1) ` to fit the data:
|
||||
- write in the report the formula of the model substituting parameters `theta_0, ..., theta_4` with the estimates you've found;
|
||||
- evaluate the test performance of your model using the mean squared error as performance measure.
|
||||
|
||||
### T2.
|
||||
Consider any family of non-linear models of your choice to address the above regression problem:
|
||||
- evaluate the test performance of your model using the mean squared error as performance measure;
|
||||
- compare your model with the linear regression of task 1. Which one is **statistically** better?
|
||||
|
||||
### T3. (Bonus)
|
||||
|
||||
In the [Github repository of the course](https://github.com/marshka/ml-20-21), you will find a trained Scikit-learn model that we built using the same dataset you are given. This _baseline_ model is able to achieve a MSE of **0.0194**, when evaluated on the test set.
|
||||
You will get extra points if the test performance of your model is **better** (i.e., the MSE is lower) than ours. Of course, you also have to tell us **why** you think that your model is better.
|
||||
|
||||
|
||||
In order to complete the assignment, you must submit a zip file on the iCorsi platform containing:
|
||||
|
||||
1. a PDF file describing how you solved the assignment, covering all the points described above (at most 2500 words, no code!);
|
||||
2. a working example of how to load your **trained model** from file, and evaluate it;
|
||||
3. the source code you used to build, train, and evaluate your model.
|
||||
|
||||
See below for more details.
|
||||
|
||||
|
||||
## Questions
|
||||
|
||||
Motivate all answers, but keep it short; support the answers with formulas where appropriate.
|
||||
|
||||
|
||||
### Q1. Training versus Validation
|
||||
|
||||
A neural network, trained by gradient descent, is designed to solve a regression problem where the target value is affected by noise. For different complexities of the model family (e.g., varying the number of hidden neurons) the training is performed until convergence (null gradient) and the achieved performance (e.g., mean squared error) is reported in the following plot.
|
||||
|
||||
1. Explain the curves' behavior in each of the three highlighted sections of the figures, namely (a), (b), and (c);
|
||||
2. Is any of the three section associated with the concepts of overfitting and underfitting? If yes, explain it.
|
||||
3. Is there any evidence of high approximation risk? Why? If yes, in which of the below subfigures?
|
||||
4. Do you think that by further increasing the model complexity you will be able to bring the training error to zero?
|
||||
5. Do you think that by further increasing the model complexity you will be able to bring the structural risk to zero?
|
||||
|
||||
<img src="./ex_train_val_test.png" style="width:500px"/>
|
||||
|
||||
|
||||
### Q2. Linear Regression
|
||||
|
||||
Consider the following regression problem in which the task is to estimate the target variable `y = g(x) + eta`, where `g(.)` is unknown, `eta \~ N(0, 1)` and the input variable `x` is a bidimensional vector `x = [x_1, x_2]`.
|
||||
Suppose to have `n` training samples and to fit the data using a linear model family `f(x, theta) = theta_0 + theta_1 * x_1 + theta_2 * x_2`.
|
||||
|
||||
Now, we add another regressor (feature) `x_3` (to obtain `f(x, theta) = theta_0 + theta_1 * x_1 + theta_2 * x_2 + theta_3 * x_3`) and we fit a linear model on the same data again. Comment and compare how (a.) the training error, (b.) test error and (c.) coefficients would change in the following cases:
|
||||
|
||||
1. `x_3` is a normally distributed independent random variable, in particular `x_3` \~ N(1, 2).
|
||||
1. `x_3 = 2.5 * x_1 + x_2`.
|
||||
3. `x_3 = x_1 * x_2`.
|
||||
|
||||
Motivate your answers.
|
||||
|
||||
**NB**: You **don't** have to run any experiment to answer this question, your answers should be based only on your understanding of linear regression.
|
||||
|
||||
|
||||
### Q3. Classification
|
||||
|
||||
Consider the classification problem shown in the picture below and answer each point.
|
||||
|
||||
<img src="./im.png" style="width:200px"/>
|
||||
|
||||
1. Your boss asked you to solve the problem using a perceptron and now he's upset because you are getting poor results. How would you justify the poor performance of your perceptron classifier to your boss?
|
||||
2. Would you expect to have better luck with a neural network with activation function `h(x) = - x * e^(-2)` for the hidden units?
|
||||
3. What are the main differences and similarities between the perceptron and the logistic regression neuron?
|
||||
|
||||
## Instructions
|
||||
|
||||
### Tools
|
||||
Your solution to the regression problem must be entirely coded in `Python 3` ([not `Python 2`](https://python3statement.org/)), using the tools we have seen in the labs.
|
||||
These include:
|
||||
|
||||
- Numpy
|
||||
- Scikit-learn
|
||||
- Keras
|
||||
|
||||
You can develop your code in Colab, like we saw in the labs, or you can install the libraries on your machine and develop locally.
|
||||
If you choose to work in Colab, you can then export the code to a `.py` file by clicking "File > Download .py" in the top menu.
|
||||
If you want to work locally, instead, you can install Python libraries using something like the [Pip](https://pypi.org/project/pip/) package manager. There are plenty of tutorials online.
|
||||
|
||||
|
||||
### Submission
|
||||
|
||||
In the [Github repository of the course](https://github.com/marshka/ml-20-21), you will find a folder named `assignment_1`.
|
||||
The contents of the folder are as follows:
|
||||
|
||||
- `data/`:
|
||||
- `data.npz`: a file storing the dataset in a native Numpy format;
|
||||
- `deliverable/`:
|
||||
- `run_model.py`: a working example of how to evaluate our baseline model;
|
||||
- `baseline_model.pickle`: a binary file storing our baseline model;
|
||||
- `src/`:
|
||||
- `utils.py`: some utility methods to save and load models;
|
||||
- `report_surname_name.pdf`: an example of the report;
|
||||
- `report_surname_name.tex`: the LaTeX source for the provided report pdf;
|
||||
|
||||
The `run_model.py` script loads the data from the data folder, loads a model from file, and evaluates the model's MSE on the loaded data.
|
||||
When evaluating your models on the unseen test set, **we will only run this script**.
|
||||
You cannot edit the script, except for the parts necessary to load your model and pre-process the data. Look at the comments in the file to know where you're allowed to edit the code.
|
||||
|
||||
You must submit a zip file with a structure similar to the repository, but:
|
||||
|
||||
- the `deliverable` folder must contain:
|
||||
- `run_model.py`, edited in order to work with your models;
|
||||
- the saved models for both tasks (linear regression and the model of your choice);
|
||||
- any additional file to load the trained models and evaluate their performance using `run_model.py`;
|
||||
- the `src` folder must contain all the source files that your used to build, train, and evaluate your models;
|
||||
- the report must be a pdf file (no need for the `.tex` file) covering both the tasks and the questions.
|
||||
|
||||
The file should have the following structure:
|
||||
```bash
|
||||
as1_surname_name/
|
||||
report_surname_name.pdf
|
||||
deliverable/
|
||||
run_model.py
|
||||
linear_regression.pickle # or any other file storing your linear regression
|
||||
nonlinear_model.pickle # or any other file storing your model of choice
|
||||
src/
|
||||
file1.py
|
||||
file2.py
|
||||
...
|
||||
```
|
||||
Remember that we will **only execute** `run_model.py` to grade your assignment, so make sure that everything works out of the box.
|
||||
|
||||
We don't accept photos or scans of handwritten answers. We strongly suggest you to create your submission in LaTeX (e.g. using [Overleaf](https://www.overleaf.com)), so that any formula you may want to write is understandable. Following the `.tex` sample provided is suggested but not mandatory. You can add figures and tables, where appropriate.
|
||||
|
||||
|
||||
### Evaluation criteria
|
||||
|
||||
You will get a positive evaluation if:
|
||||
|
||||
- you demonstrate a clear understanding of the main tasks and concepts;
|
||||
- you provide a clear description of your solution to the task;
|
||||
- you provide sensible motivations for your choice of model and hyper-parameters;
|
||||
- the statistical comparison between models is conducted appropriately;
|
||||
- your code runs out of the box (i.e., without us needing to change your code to evaluate the assignment);
|
||||
- your code is properly commented;
|
||||
- your model has a good test performance on the unseen data;
|
||||
- your model has a better test performance than the baseline model provided by us;
|
||||
- your answers are complete: all the claims are justified, and supported by formulas (where appropriate);
|
||||
- your answers are re-elaboration of the concepts presented in class, and not only cut-past from a book, Wikipedia or a classmate answer.
|
||||
|
||||
You will get a negative evaluation if:
|
||||
|
||||
- we realize that you copied your solution (it's important that you explain in your own words, so that it's clear that you understood, even if you discussed your solution with others);
|
||||
- the description of your solution is not clear, or it incomplete;
|
||||
- the statistical comparison between models is not thorough;
|
||||
- your code requires us to edit things manually in order to work;
|
||||
- your code is not properly commented.
|
BIN
assignment_1/data/data.npz
Normal file
BIN
assignment_1/data/data.npz
Normal file
Binary file not shown.
BIN
assignment_1/deliverable/baseline_model.pickle
Normal file
BIN
assignment_1/deliverable/baseline_model.pickle
Normal file
Binary file not shown.
74
assignment_1/deliverable/run_model.py
Normal file
74
assignment_1/deliverable/run_model.py
Normal file
|
@ -0,0 +1,74 @@
|
|||
import joblib
|
||||
import numpy as np
|
||||
|
||||
|
||||
def load_data(filename):
|
||||
"""
|
||||
Loads the data from a saved .npz file.
|
||||
### YOU CAN NOT EDIT THIS FUNCTION ###
|
||||
|
||||
:param filename: string, path to the .npz file storing the data.
|
||||
:return: two numpy arrays:
|
||||
- x, a Numpy array of shape (n_samples, n_features) with the inputs;
|
||||
- y, a Numpy array of shape (n_samples, ) with the targets.
|
||||
"""
|
||||
data = np.load(filename)
|
||||
x = data['x']
|
||||
y = data['y']
|
||||
|
||||
return x, y
|
||||
|
||||
|
||||
def evaluate_predictions(y_true, y_pred):
|
||||
"""
|
||||
Evaluates the mean squared error between the values in y_true and the values
|
||||
in y_pred.
|
||||
### YOU CAN NOT EDIT THIS FUNCTION ###
|
||||
|
||||
:param y_true: Numpy array, the true target values from the test set;
|
||||
:param y_pred: Numpy array, the values predicted by your model.
|
||||
:return: float, the the mean squared error between the two arrays.
|
||||
"""
|
||||
assert y_true.shape == y_pred.shape
|
||||
return ((y_true - y_pred) ** 2).mean()
|
||||
|
||||
|
||||
def load_model(filename):
|
||||
"""
|
||||
Loads a Scikit-learn model saved with joblib.dump.
|
||||
This is just an example, you can write your own function to load the model.
|
||||
Some examples can be found in src/utils.py.
|
||||
|
||||
:param filename: string, path to the file storing the model.
|
||||
:return: the model.
|
||||
"""
|
||||
model = joblib.load(filename)
|
||||
|
||||
return model
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Load the data
|
||||
# This will be replaced with the test data when grading the assignment
|
||||
data_path = '../data/data.npz'
|
||||
x, y = load_data(data_path)
|
||||
|
||||
############################################################################
|
||||
# EDITABLE SECTION OF THE SCRIPT: if you need to edit the script, do it here
|
||||
############################################################################
|
||||
|
||||
# Load the trained model
|
||||
baseline_model_path = './baseline_model.pickle'
|
||||
baseline_model = load_model(baseline_model_path)
|
||||
|
||||
# Predict on the given samples
|
||||
y_pred = baseline_model.predict(x)
|
||||
|
||||
|
||||
############################################################################
|
||||
# STOP EDITABLE SECTION: do not modify anything below this point.
|
||||
############################################################################
|
||||
|
||||
# Evaluate the prediction using MSE
|
||||
mse = evaluate_predictions(y_pred, y)
|
||||
print('MSE: {}'.format(mse))
|
BIN
assignment_1/ex_train_val_test.png
Normal file
BIN
assignment_1/ex_train_val_test.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 153 KiB |
BIN
assignment_1/im.png
Normal file
BIN
assignment_1/im.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 45 KiB |
BIN
assignment_1/report_surname_name.pdf
Normal file
BIN
assignment_1/report_surname_name.pdf
Normal file
Binary file not shown.
155
assignment_1/report_surname_name.tex
Normal file
155
assignment_1/report_surname_name.tex
Normal file
|
@ -0,0 +1,155 @@
|
|||
|
||||
%----------------------------------------------------------------------------------------
|
||||
% Machine Learning Assignment Template
|
||||
%----------------------------------------------------------------------------------------
|
||||
|
||||
\documentclass[11pt]{scrartcl}
|
||||
\newcommand*\student[1]{\newcommand{\thestudent}{{#1}}}
|
||||
|
||||
%----------------------------------------------------------------------------------------
|
||||
% INSERT HERE YOUR NAME
|
||||
%----------------------------------------------------------------------------------------
|
||||
|
||||
\student{Surname Name}
|
||||
|
||||
%----------------------------------------------------------------------------------------
|
||||
% PACKAGES AND OTHER DOCUMENT CONFIGURATIONS
|
||||
%----------------------------------------------------------------------------------------
|
||||
|
||||
\usepackage[utf8]{inputenc} % Required for inputting international characters
|
||||
\usepackage[T1]{fontenc} % Use 8-bit encoding
|
||||
\usepackage[sc]{mathpazo}
|
||||
\usepackage{caption, subcaption}
|
||||
\usepackage{hyperref}
|
||||
\usepackage{inconsolata}
|
||||
|
||||
\usepackage[english]{babel} % English language hyphenation
|
||||
\usepackage{amsmath, amsfonts} % Math packages
|
||||
\usepackage{listings} % Code listings, with syntax highlighting
|
||||
\usepackage{graphicx} % Required for inserting images
|
||||
\graphicspath{{Figures/}{./}} % Specifies where to look for included images (trailing slash required)
|
||||
\usepackage{float}
|
||||
|
||||
%----------------------------------------------------------------------------------------
|
||||
% DOCUMENT MARGINS
|
||||
%----------------------------------------------------------------------------------------
|
||||
|
||||
\usepackage{geometry} % For page dimensions and margins
|
||||
\geometry{
|
||||
paper=a4paper,
|
||||
top=2.5cm, % Top margin
|
||||
bottom=3cm, % Bottom margin
|
||||
left=3cm, % Left margin
|
||||
right=3cm, % Right margin
|
||||
}
|
||||
|
||||
%----------------------------------------------------------------------------------------
|
||||
% SECTION TITLES
|
||||
%----------------------------------------------------------------------------------------
|
||||
|
||||
\usepackage{sectsty}
|
||||
\sectionfont{\vspace{6pt}\centering\normalfont\scshape}
|
||||
\subsectionfont{\normalfont\bfseries} % \subsection{} styling
|
||||
\subsubsectionfont{\normalfont\itshape} % \subsubsection{} styling
|
||||
\paragraphfont{\normalfont\scshape} % \paragraph{} styling
|
||||
|
||||
%----------------------------------------------------------------------------------------
|
||||
% HEADERS AND FOOTERS
|
||||
%----------------------------------------------------------------------------------------
|
||||
|
||||
\usepackage{scrlayer-scrpage}
|
||||
\ofoot*{\pagemark} % Right footer
|
||||
\ifoot*{\thestudent} % Left footer
|
||||
\cfoot*{} % Centre footer
|
||||
|
||||
%----------------------------------------------------------------------------------------
|
||||
% TITLE SECTION
|
||||
%----------------------------------------------------------------------------------------
|
||||
|
||||
\title{
|
||||
\normalfont\normalsize
|
||||
\textsc{Machine Learning\\%
|
||||
Universit\`a della Svizzera italiana}\\
|
||||
\vspace{25pt}
|
||||
\rule{\linewidth}{0.5pt}\\
|
||||
\vspace{20pt}
|
||||
{\huge Assignment 1}\\
|
||||
\vspace{12pt}
|
||||
\rule{\linewidth}{1pt}\\
|
||||
\vspace{12pt}
|
||||
}
|
||||
|
||||
\author{\LARGE \thestudent}
|
||||
|
||||
\date{\normalsize\today}
|
||||
|
||||
\begin{document}
|
||||
|
||||
\maketitle
|
||||
|
||||
The assignment is split into two parts: you are asked to solve a regression problem, and answer some questions. You can use all the books, material, and help you need. Bear in mind that the questions you are asked are similar to those you may find in the final exam, and are related to very important and fundamental machine learning concepts. As such, sooner or later you will need to learn them to pass the course. We will give you some feedback afterwards.\\
|
||||
|
||||
\noindent !! Note that this file is just meant as a template for the report, in which we reported \textbf{part of} the assignment text for convenience. You must always refer to the text in the README.md file as the assignment requirements.
|
||||
|
||||
%----------------------------------------------------------------------------------------
|
||||
% Tasks
|
||||
%----------------------------------------------------------------------------------------
|
||||
|
||||
\section{Regression problem}
|
||||
|
||||
This section should contain a detailed description of how you solved the assignment, including all required statistical analyses of the models' performance and a comparison between the linear regression and the model of your choice. Limit the assignment to 2500 words (formulas, tables, figures, etc., do not count as words) and do not include any code in the report.
|
||||
|
||||
\subsection{Task 1}
|
||||
Use the family of models $f(\mathbf{x}, \boldsymbol{\theta}) = \theta_0 + \theta_1 \cdot x_1 + \theta_2 \cdot x_2 + \theta_3 \cdot x_1 \cdot x_2 + \theta_4 \cdot \sin(x_1)$ to fit the data. Write in the report the formula of the model substituting parameters $\theta_0, \ldots, \theta_4$ with the estimates you've found:
|
||||
$$f(\mathbf{x}, \boldsymbol{\theta}) = \_ + \_ \cdot x_1 + \_ \cdot x_2 + \_ \cdot x_1 \cdot x_2 + \_ \cdot \sin(x_1)$$
|
||||
Evaluate the test performance of your model using the mean squared error as performance measure.
|
||||
|
||||
\subsection{Task 2}
|
||||
Consider any family of non-linear models of your choice to address the above regression problem. Evaluate the test performance of your model using the mean squared error as performance measure. Compare your model with the linear regression of Task 1. Which one is \textbf{statistically} better?
|
||||
|
||||
\subsection{Task 3 (Bonus)}
|
||||
In the \href{https://github.com/marshka/ml-20-21}{\textbf{Github repository of the course}}, you will find a trained Scikit-learn model that we built using the same dataset you are given. This baseline model is able to achieve a MSE of \textbf{0.0194}, when evaluated on the test set. You will get extra points if the test performance of your model is better (i.e., the MSE is lower) than ours. Of course, you also have to tell us why you think that your model is better.
|
||||
|
||||
|
||||
%----------------------------------------------------------------------------------------
|
||||
% Questions
|
||||
%----------------------------------------------------------------------------------------
|
||||
\newpage
|
||||
\section{Questions}
|
||||
|
||||
\subsection{Q1. Training versus Validation}
|
||||
\begin{itemize}
|
||||
\item[1.Q.] Explain the curves' behavior in each of the three highlighted sections of the figures, namely (a), (b), and (c);
|
||||
\item[1.A.] ~\\
|
||||
\item[2.Q.] Is any of the three section associated with the concepts of overfitting and underfitting? If yes, explain it.
|
||||
\item[2.A.] ~\\
|
||||
\item[3.Q.] Is there any evidence of high approximation risk? Why? If yes, in which of the below subfigures?
|
||||
\item[3.A.] ~\\
|
||||
\item[4.Q.] Do you think that by further increasing the model complexity you will be able to bring the training error to zero?
|
||||
\item[4.A.] ~\\
|
||||
\item[5.Q.] Do you think that by further increasing the model complexity you will be able to bring the structural risk to zero?
|
||||
\item[5.A.] ~\\
|
||||
\end{itemize}
|
||||
|
||||
\subsection{Q2. Linear Regression}
|
||||
Comment and compare how the (a.) training error, (b.) test error and (c.) coefficients would change in the following cases:
|
||||
\begin{itemize}
|
||||
\item[1.Q.] $x_3$ is a normally distributed independent random variable $x_3 \sim \mathcal{N}(1, 2)$
|
||||
\item[1.A.] ~\\
|
||||
\item[2.Q.] $x_3 = 2.5 \cdot x_1 + x_2$
|
||||
\item[2.A.] ~\\
|
||||
\item[3.Q.] $x_3 = x_1 \cdot x_2$
|
||||
\item[3.A.] ~\\
|
||||
\end{itemize}
|
||||
|
||||
\subsection{Q3. Classification}
|
||||
\begin{itemize}
|
||||
\item[1.Q.] Your boss asked you to solve the problem using a perceptron and now he's upset because you are getting poor results. How would you justify the poor performance of your perceptron classifier to your boss?
|
||||
\item[1.A.] ~\\
|
||||
\item[2.Q.] Would you expect to have better luck with a neural network with activation function $h(x) = - x \cdot e^{-2}$ for the hidden units?
|
||||
\item[2.A.] ~\\
|
||||
\item[3.Q.] What are the main differences and similarities between the perceptron and the logistic regression neuron?
|
||||
\item[3.A.] ~\\
|
||||
\end{itemize}
|
||||
|
||||
\end{document}
|
61
assignment_1/src/utils.py
Normal file
61
assignment_1/src/utils.py
Normal file
|
@ -0,0 +1,61 @@
|
|||
import joblib
|
||||
from keras import models
|
||||
|
||||
|
||||
def save_sklearn_model(model, filename):
|
||||
"""
|
||||
Saves a Scikit-learn model to disk.
|
||||
Example of usage:
|
||||
|
||||
>>> reg = sklearn.linear_models.LinearRegression()
|
||||
>>> reg.fit(x_train, y_train)
|
||||
>>> save_sklearn_model(reg, 'my_model.pickle')
|
||||
|
||||
:param model: the model to save;
|
||||
:param filename: string, path to the file in which to store the model.
|
||||
:return: the model.
|
||||
"""
|
||||
joblib.dump(model, filename)
|
||||
|
||||
|
||||
def load_sklearn_model(filename):
|
||||
"""
|
||||
Loads a Scikit-learn model saved with joblib.dump.
|
||||
|
||||
:param filename: string, path to the file storing the model.
|
||||
:return: the model.
|
||||
"""
|
||||
model = joblib.load(filename)
|
||||
|
||||
return model
|
||||
|
||||
|
||||
def save_keras_model(model, filename):
|
||||
"""
|
||||
Saves a Keras model to disk.
|
||||
Example of usage:
|
||||
|
||||
>>> model = Sequential()
|
||||
>>> model.add(Dense(...))
|
||||
>>> model.compile(...)
|
||||
>>> model.fit(...)
|
||||
>>> save_keras_model(model, 'my_model')
|
||||
|
||||
:param model: the model to save;
|
||||
:param filename: string, path to the file in which to store the model.
|
||||
:return: the model.
|
||||
"""
|
||||
models.save_model(model, filename)
|
||||
|
||||
|
||||
def load_keras_model(filename):
|
||||
"""
|
||||
Loads a compiled Keras model saved with models.save_model.
|
||||
|
||||
:param filename: string, path to the file storing the model.
|
||||
:return: the model.
|
||||
"""
|
||||
model = models.load_model(filename)
|
||||
|
||||
return model
|
||||
|
Reference in a new issue