ml/assignment_1/src/build_models.py

106 lines
3.2 KiB
Python

from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from tensorflow.keras import Input, Model
from tensorflow.keras.models import Sequential, save_model
from tensorflow.keras.layers import Dense
from tensorflow.keras import losses
from sklearn.metrics import mean_squared_error
from utils import save_sklearn_model, save_keras_model
import os
import random
import numpy as np
import tensorflow as tf
from keras import backend as K
#
# FIX THE RANDOM GENERATOR SEEDS
#
seed_value = 0
# 1. Set `PYTHONHASHSEED` environment variable at a fixed value
os.environ['PYTHONHASHSEED'] = str(seed_value)
# 2. Set `python` built-in pseudo-random generator at a fixed value
random.seed(seed_value)
# 3. Set `numpy` pseudo-random generator at a fixed value
np.random.seed(seed_value)
# 4. Set the `tensorflow` pseudo-random generator at a fixed value
tf.random.set_seed(seed_value)
# 5. Configure a new global `tensorflow` session
session_conf = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(),
config=session_conf)
tf.compat.v1.keras.backend.set_session(sess)
d = os.path.dirname(__file__)
data = np.load("../data/data.npz")
xs = data["x"] # 2000x2
y = data["y"] # 2000x1
points = np.shape(xs)[0]
# We manually include in the feature vectors a '1' column corresponding to theta_0,
# so disable
lr = LinearRegression(fit_intercept=False)
# Build x feature vector with columns for theta_3 and theta_4
X = np.zeros([points, 5])
X[:, 0] = 1
X[:, 1:3] = xs
X[:, 3] = xs[:, 0] * xs[:, 1]
X[:, 4] = np.sin(xs[:, 0])
# Shuffle our data for division in training, and test set
train_ratio = 0.1
validation_ratio = 0.1
X_t, X_test, y_t, y_test = train_test_split(X, y, test_size=train_ratio)
X_train, X_val, y_train, y_val = train_test_split(X_t, y_t, test_size=validation_ratio)
np.savez('test', x=X_test, y=y_test, allow_pickle=True)
# Fit with train data
reg = lr.fit(X_t, y_t)
print("# Linear regression:")
# Print the resulting parameters
print("f(x) = %g + %g * x_1 + %g * x_2 + %g * x_1 * x_2 + %g * sin(x_1)" % tuple(reg.coef_))
save_sklearn_model(reg, "../deliverable/linear_regression.pickle")
# Non-linear regression:
print("\n# Feed-forward NN:")
A = X_val
X_train = X_train[:, 1:3]
X_val = X_val[:, 1:3]
mean = np.mean(X_train, axis=0)
std = np.std(X_train, axis=0)
X_train -= mean
X_train /= std
X_val -= mean
X_val /= std
network = Sequential()
network.add(Dense(22, activation='tanh'))
network.add(Dense(15, activation='sigmoid'))
network.add(Dense(1, activation='linear'))
network.compile(optimizer='adam', loss='mse')
epochs = 5000
callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=120)
network.fit(X_train, y_train, epochs=epochs, verbose=1,
validation_data=(X_val, y_val), callbacks=[callback])
network.save("../deliverable/nonlinear_model")
save_sklearn_model({"mean": mean, "std": std}, "../deliverable/nonlinear_model_normalizers.pickle")
# Print the final validation set MSE, which was used to tailor the NN architecture after
# several manual trials
msq = mean_squared_error(network.predict(X_val), y_val)
print(msq)