Commit 2c147717 authored by SHREYANSH JAIN's avatar SHREYANSH JAIN

autograder 28

parent 10f7bda2
......@@ -14,7 +14,7 @@ def taskXor():
# raise NotImplementedError
###############################################
YTrain , YVal, YTest = np.array([int(i[1]==1) for i in YTrain]).reshape((-1,1)), np.array([int(i[1]==1) for i in YVal]).reshape((-1,1)), np.array([int(i[1]==1) for i in YTest]).reshape((-1,1))
lr,batchSize,epochs = 0.2,50,10
lr,batchSize,epochs = 0.01,50,10
nn1 = nn.NeuralNetwork(lr, batchSize, epochs)
# Add layers to neural network corresponding to inputs and outputs of given data
input_layer = XTrain.shape[1]
......@@ -48,7 +48,7 @@ def preprocessMnist(X):
def taskMnist():
XTrain, YTrain, XVal, YVal, XTest, _ = loadMnist()
# Create a NeuralNetwork object 'nn1' as follows with optimal parameters. For parameter definition, refer to py file.
lr,batchSize,epochs = 0.2,256,50
lr,batchSize,epochs = 0.01,256,50
nn1 = nn.NeuralNetwork(lr, batchSize, epochs)
# Add layers to neural network corresponding to inputs and outputs of given data
input_layer = XTrain.shape[1]
......
......@@ -131,7 +131,7 @@ class FullyConnectedLayer:
# TASK 1a (Marks 0) - YOUR CODE HERE
# raise NotImplementedError
self.weights = np.random.randn(in_nodes,out_nodes)/np.sqrt(in_nodes)
self.biases = np.zeros((out_nodes,1))
self.biases = np.zeros(out_nodes)
###############################################
# NOTE: You must NOT change the above code but you can add extra variables if necessary
......@@ -207,12 +207,12 @@ class FullyConnectedLayer:
###############################################
# TASK 1d (Marks 4) - YOUR CODE HERE
if self.activation == 'relu':
self.data = X @ self.weights + self.biases.T
return self.relu_of_X(self.data)
self.data = self.relu_of_X(np.add(X @ self.weights , self.biases))
return self.data
raise NotImplementedError
elif self.activation == 'softmax':
self.data = X @ self.weights + self.biases.T
return self.softmax_of_X(self.data)
self.data = self.softmax_of_X(np.add(X @ self.weights , self.biases))
return self.data
raise NotImplementedError
else:
print("ERROR: Incorrect activation specified: " + self.activation)
......@@ -239,7 +239,7 @@ class FullyConnectedLayer:
print("ERROR: Incorrect activation specified: " + self.activation)
exit()
self.weightsGrad = (activation_prev.T @ inp_delta)/delta.shape[0]
self.biasesGrad = np.average(inp_delta,axis=0).reshape((delta.shape[1],-1))
self.biasesGrad = np.average(inp_delta,axis=0)
new_delta = inp_delta @ self.weights.T
return new_delta
###############################################
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment