Commit 8ae28015 authored by SHREYANSH JAIN's avatar SHREYANSH JAIN

added other err fn

parent 225f736a
import matplotlib.pyplot as plt
import csv
x = []
y = []
i=1
with open('error.log','r') as csvfile:
for row in csvfile:
x.append(i)
i+=1
y.append(float(row[:-1]))
plt.plot(x,y, label='Loaded from file!')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Interesting Graph\nCheck it out')
plt.legend()
plt.show()
\ No newline at end of file
import numpy as np import numpy as np
import argparse import argparse
import csv import csv
import matplotlib.pyplot as plt # import matplotlib.pyplot as plt
''' '''
You are only required to fill the following functions You are only required to fill the following functions
mean_squared_loss mean_squared_loss
...@@ -24,7 +24,7 @@ def mean_squared_loss(xdata, ydata, weights): ...@@ -24,7 +24,7 @@ def mean_squared_loss(xdata, ydata, weights):
guess = np.dot(xdata,weights) guess = np.dot(xdata,weights)
samples = np.shape(guess)[0] samples = np.shape(guess)[0]
err = 0.5*samples*np.sum(np.square(ydata.T-guess)) err = (0.5/samples)*np.sum(np.square(ydata-guess))
return err return err
raise NotImplementedError raise NotImplementedError
...@@ -32,33 +32,61 @@ def mean_squared_gradient(xdata, ydata, weights): ...@@ -32,33 +32,61 @@ def mean_squared_gradient(xdata, ydata, weights):
samples = np.shape(xdata)[0] samples = np.shape(xdata)[0]
guess = np.dot(xdata,weights) guess = np.dot(xdata,weights)
gradient = (1/samples)*np.dot(xdata.T,(guess-ydata.T)) gradient = (1/samples)*np.dot(xdata.T,(guess-ydata))
return gradient return gradient
raise NotImplementedError raise NotImplementedError
def mean_absolute_loss(xdata, ydata, weights): def mean_absolute_loss(xdata, ydata, weights):
guess = np.dot(xdata,weights)
samples = np.shape(guess)[0]
err = (1/samples)*np.sum(np.absolute(ydata-guess))
return err
raise NotImplementedError raise NotImplementedError
def mean_absolute_gradient(xdata, ydata, weights): def mean_absolute_gradient(xdata, ydata, weights):
guess = np.dot(xdata,weights)
if np.sum(ydata-guess) < 0:
gradient = np.random.randint(0,10,np.shape(weights)[0])
else:
gradient = np.random.randint(-10,0,np.shape(weights)[0])
return gradient
raise NotImplementedError raise NotImplementedError
def mean_log_cosh_loss(xdata, ydata, weights): def mean_log_cosh_loss(xdata, ydata, weights):
guess = np.dot(xdata,weights)
samples = np.shape(guess)[0]
err = (1/samples)*np.sum(np.square(ydata-guess))
return err
raise NotImplementedError raise NotImplementedError
def mean_log_cosh_gradient(xdata, ydata, weights): def mean_log_cosh_gradient(xdata, ydata, weights):
guess = np.dot(xdata,weights)
samples = np.shape(guess)[0]
gradient = np.dot(xdata.T,np.tanh(guess-ydata))
return gradient
raise NotImplementedError raise NotImplementedError
def root_mean_squared_loss(xdata, ydata, weights): def root_mean_squared_loss(xdata, ydata, weights):
guess = np.dot(xdata,weights)
samples = np.shape(guess)[0]
err = np.sqrt(np.divide(np.sum(np.square(ydata.T-guess)),samples))
return err
raise NotImplementedError raise NotImplementedError
def root_mean_squared_gradient(xdata, ydata, weights): def root_mean_squared_gradient(xdata, ydata, weights):
samples = np.shape(xdata)[0]
gradient = -weights.T/np.sqrt(samples)
return gradient
raise NotImplementedError raise NotImplementedError
class LinearRegressor: class LinearRegressor:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment