Commit 2e1de191 authored by prashantsaroj's avatar prashantsaroj

Added solution to assignments

parent a1142a40
function checkNNGradients(lambda)
%CHECKNNGRADIENTS Creates a small neural network to check the
%backpropagation gradients
% CHECKNNGRADIENTS(lambda) Creates a small neural network to check the
% backpropagation gradients, it will output the analytical gradients
% produced by your backprop code and the numerical gradients (computed
% using computeNumericalGradient). These two gradient computations should
% result in very similar values.
%
if ~exist('lambda', 'var') || isempty(lambda)
lambda = 0;
end
input_layer_size = 3;
hidden_layer_size = 5;
num_labels = 3;
m = 5;
% We generate some 'random' test data
Theta1 = debugInitializeWeights(hidden_layer_size, input_layer_size);
Theta2 = debugInitializeWeights(num_labels, hidden_layer_size);
% Reusing debugInitializeWeights to generate X
X = debugInitializeWeights(m, input_layer_size - 1);
y = 1 + mod(1:m, num_labels)';
% Unroll parameters
nn_params = [Theta1(:) ; Theta2(:)];
% Short hand for cost function
costFunc = @(p) nnCostFunction(p, input_layer_size, hidden_layer_size, ...
num_labels, X, y, lambda);
[cost, grad] = costFunc(nn_params);
numgrad = computeNumericalGradient(costFunc, nn_params);
% Visually examine the two gradient computations. The two columns
% you get should be very similar.
disp([numgrad grad]);
fprintf(['The above two columns you get should be very similar.\n' ...
'(Left-Your Numerical Gradient, Right-Analytical Gradient)\n\n']);
% Evaluate the norm of the difference between two solutions.
% If you have a correct implementation, and assuming you used EPSILON = 0.0001
% in computeNumericalGradient.m, then diff below should be less than 1e-9
diff = norm(numgrad-grad)/norm(numgrad+grad);
fprintf(['If your backpropagation implementation is correct, then \n' ...
'the relative difference will be small (less than 1e-9). \n' ...
'\nRelative Difference: %g\n'], diff);
end
function numgrad = computeNumericalGradient(J, theta)
%COMPUTENUMERICALGRADIENT Computes the gradient using "finite differences"
%and gives us a numerical estimate of the gradient.
% numgrad = COMPUTENUMERICALGRADIENT(J, theta) computes the numerical
% gradient of the function J around theta. Calling y = J(theta) should
% return the function value at theta.
% Notes: The following code implements numerical gradient checking, and
% returns the numerical gradient.It sets numgrad(i) to (a numerical
% approximation of) the partial derivative of J with respect to the
% i-th input argument, evaluated at theta. (i.e., numgrad(i) should
% be the (approximately) the partial derivative of J with respect
% to theta(i).)
%
numgrad = zeros(size(theta));
perturb = zeros(size(theta));
e = 1e-4;
for p = 1:numel(theta)
% Set perturbation vector
perturb(p) = e;
loss1 = J(theta - perturb);
loss2 = J(theta + perturb);
% Compute Numerical Gradient
numgrad(p) = (loss2 - loss1) / (2*e);
perturb(p) = 0;
end
end
function W = debugInitializeWeights(fan_out, fan_in)
%DEBUGINITIALIZEWEIGHTS Initialize the weights of a layer with fan_in
%incoming connections and fan_out outgoing connections using a fixed
%strategy, this will help you later in debugging
% W = DEBUGINITIALIZEWEIGHTS(fan_in, fan_out) initializes the weights
% of a layer with fan_in incoming connections and fan_out outgoing
% connections using a fix set of values
%
% Note that W should be set to a matrix of size(1 + fan_in, fan_out) as
% the first row of W handles the "bias" terms
%
% Set W to zeros
W = zeros(fan_out, 1 + fan_in);
% Initialize W using "sin", this ensures that W is always of the same
% values and will be useful for debugging
W = reshape(sin(1:numel(W)), size(W)) / 10;
% =========================================================================
end
function [h, display_array] = displayData(X, example_width)
%DISPLAYDATA Display 2D data in a nice grid
% [h, display_array] = DISPLAYDATA(X, example_width) displays 2D data
% stored in X in a nice grid. It returns the figure handle h and the
% displayed array if requested.
% Set example_width automatically if not passed in
if ~exist('example_width', 'var') || isempty(example_width)
example_width = round(sqrt(size(X, 2)));
end
% Gray Image
colormap(gray);
% Compute rows, cols
[m n] = size(X);
example_height = (n / example_width);
% Compute number of items to display
display_rows = floor(sqrt(m));
display_cols = ceil(m / display_rows);
% Between images padding
pad = 1;
% Setup blank display
display_array = - ones(pad + display_rows * (example_height + pad), ...
pad + display_cols * (example_width + pad));
% Copy each example into a patch on the display array
curr_ex = 1;
for j = 1:display_rows
for i = 1:display_cols
if curr_ex > m,
break;
end
% Copy the patch
% Get the max value of the patch
max_val = max(abs(X(curr_ex, :)));
display_array(pad + (j - 1) * (example_height + pad) + (1:example_height), ...
pad + (i - 1) * (example_width + pad) + (1:example_width)) = ...
reshape(X(curr_ex, :), example_height, example_width) / max_val;
curr_ex = curr_ex + 1;
end
if curr_ex > m,
break;
end
end
% Display Image
h = imagesc(display_array, [-1 1]);
% Do not show axis
axis image off
drawnow;
end
%% Machine Learning Online Class - Exercise 4 Neural Network Learning
% Instructions
% ------------
%
% This file contains code that helps you get started on the
% linear exercise. You will need to complete the following functions
% in this exericse:
%
% sigmoidGradient.m
% randInitializeWeights.m
% nnCostFunction.m
%
% For this exercise, you will not need to change any code in this file,
% or any other files other than those mentioned above.
%
%% Initialization
clear ; close all; clc
%% Setup the parameters you will use for this exercise
input_layer_size = 400; % 20x20 Input Images of Digits
hidden_layer_size = 25; % 25 hidden units
num_labels = 10; % 10 labels, from 1 to 10
% (note that we have mapped "0" to label 10)
%% =========== Part 1: Loading and Visualizing Data =============
% We start the exercise by first loading and visualizing the dataset.
% You will be working with a dataset that contains handwritten digits.
%
% Load Training Data
fprintf('Loading and Visualizing Data ...\n')
load('ex4data1.mat');
m = size(X, 1);
% Randomly select 100 data points to display
sel = randperm(size(X, 1));
sel = sel(1:100);
displayData(X(sel, :));
fprintf('Program paused. Press enter to continue.\n');
pause;
%% ================ Part 2: Loading Parameters ================
% In this part of the exercise, we load some pre-initialized
% neural network parameters.
fprintf('\nLoading Saved Neural Network Parameters ...\n')
% Load the weights into variables Theta1 and Theta2
load('ex4weights.mat');
% Unroll parameters
nn_params = [Theta1(:) ; Theta2(:)];
%% ================ Part 3: Compute Cost (Feedforward) ================
% To the neural network, you should first start by implementing the
% feedforward part of the neural network that returns the cost only. You
% should complete the code in nnCostFunction.m to return cost. After
% implementing the feedforward to compute the cost, you can verify that
% your implementation is correct by verifying that you get the same cost
% as us for the fixed debugging parameters.
%
% We suggest implementing the feedforward cost *without* regularization
% first so that it will be easier for you to debug. Later, in part 4, you
% will get to implement the regularized cost.
%
fprintf('\nFeedforward Using Neural Network ...\n')
% Weight regularization parameter (we set this to 0 here).
lambda = 0;
J = nnCostFunction(nn_params, input_layer_size, hidden_layer_size, ...
num_labels, X, y, lambda);
fprintf(['Cost at parameters (loaded from ex4weights): %f '...
'\n(this value should be about 0.287629)\n'], J);
fprintf('\nProgram paused. Press enter to continue.\n');
pause;
%% =============== Part 4: Implement Regularization ===============
% Once your cost function implementation is correct, you should now
% continue to implement the regularization with the cost.
%
fprintf('\nChecking Cost Function (w/ Regularization) ... \n')
% Weight regularization parameter (we set this to 1 here).
lambda = 1;
J = nnCostFunction(nn_params, input_layer_size, hidden_layer_size, ...
num_labels, X, y, lambda);
fprintf(['Cost at parameters (loaded from ex4weights): %f '...
'\n(this value should be about 0.383770)\n'], J);
fprintf('Program paused. Press enter to continue.\n');
pause;
%% ================ Part 5: Sigmoid Gradient ================
% Before you start implementing the neural network, you will first
% implement the gradient for the sigmoid function. You should complete the
% code in the sigmoidGradient.m file.
%
fprintf('\nEvaluating sigmoid gradient...\n')
g = sigmoidGradient([-1 -0.5 0 0.5 1]);
fprintf('Sigmoid gradient evaluated at [-1 -0.5 0 0.5 1]:\n ');
fprintf('%f ', g);
fprintf('\n\n');
fprintf('Program paused. Press enter to continue.\n');
pause;
%% ================ Part 6: Initializing Pameters ================
% In this part of the exercise, you will be starting to implment a two
% layer neural network that classifies digits. You will start by
% implementing a function to initialize the weights of the neural network
% (randInitializeWeights.m)
fprintf('\nInitializing Neural Network Parameters ...\n')
initial_Theta1 = randInitializeWeights(input_layer_size, hidden_layer_size);
initial_Theta2 = randInitializeWeights(hidden_layer_size, num_labels);
% Unroll parameters
initial_nn_params = [initial_Theta1(:) ; initial_Theta2(:)];
%% =============== Part 7: Implement Backpropagation ===============
% Once your cost matches up with ours, you should proceed to implement the
% backpropagation algorithm for the neural network. You should add to the
% code you've written in nnCostFunction.m to return the partial
% derivatives of the parameters.
%
fprintf('\nChecking Backpropagation... \n');
% Check gradients by running checkNNGradients
checkNNGradients;
fprintf('\nProgram paused. Press enter to continue.\n');
pause;
%% =============== Part 8: Implement Regularization ===============
% Once your backpropagation implementation is correct, you should now
% continue to implement the regularization with the cost and gradient.
%
fprintf('\nChecking Backpropagation (w/ Regularization) ... \n')
% Check gradients by running checkNNGradients
lambda = 3;
checkNNGradients(lambda);
% Also output the costFunction debugging values
debug_J = nnCostFunction(nn_params, input_layer_size, ...
hidden_layer_size, num_labels, X, y, lambda);
fprintf(['\n\nCost at (fixed) debugging parameters (w/ lambda = %f): %f ' ...
'\n(for lambda = 3, this value should be about 0.576051)\n\n'], lambda, debug_J);
fprintf('Program paused. Press enter to continue.\n');
pause;
%% =================== Part 8: Training NN ===================
% You have now implemented all the code necessary to train a neural
% network. To train your neural network, we will now use "fmincg", which
% is a function which works similarly to "fminunc". Recall that these
% advanced optimizers are able to train our cost functions efficiently as
% long as we provide them with the gradient computations.
%
fprintf('\nTraining Neural Network... \n')
% After you have completed the assignment, change the MaxIter to a larger
% value to see how more training helps.
options = optimset('MaxIter', 50);
% You should also try different values of lambda
lambda = 1;
% Create "short hand" for the cost function to be minimized
costFunction = @(p) nnCostFunction(p, ...
input_layer_size, ...
hidden_layer_size, ...
num_labels, X, y, lambda);
% Now, costFunction is a function that takes in only one argument (the
% neural network parameters)
[nn_params, cost] = fmincg(costFunction, initial_nn_params, options);
% Obtain Theta1 and Theta2 back from nn_params
Theta1 = reshape(nn_params(1:hidden_layer_size * (input_layer_size + 1)), ...
hidden_layer_size, (input_layer_size + 1));
Theta2 = reshape(nn_params((1 + (hidden_layer_size * (input_layer_size + 1))):end), ...
num_labels, (hidden_layer_size + 1));
fprintf('Program paused. Press enter to continue.\n');
pause;
%% ================= Part 9: Visualize Weights =================
% You can now "visualize" what the neural network is learning by
% displaying the hidden units to see what features they are capturing in
% the data.
fprintf('\nVisualizing Neural Network... \n')
displayData(Theta1(:, 2:end));
fprintf('\nProgram paused. Press enter to continue.\n');
pause;
%% ================= Part 10: Implement Predict =================
% After training the neural network, we would like to use it to predict
% the labels. You will now implement the "predict" function to use the
% neural network to predict the labels of the training set. This lets
% you compute the training set accuracy.
pred = predict(Theta1, Theta2, X);
fprintf('\nTraining Set Accuracy: %f\n', mean(double(pred == y)) * 100);
function [X, fX, i] = fmincg(f, X, options, P1, P2, P3, P4, P5)
% Minimize a continuous differentialble multivariate function. Starting point
% is given by "X" (D by 1), and the function named in the string "f", must
% return a function value and a vector of partial derivatives. The Polack-
% Ribiere flavour of conjugate gradients is used to compute search directions,
% and a line search using quadratic and cubic polynomial approximations and the
% Wolfe-Powell stopping criteria is used together with the slope ratio method
% for guessing initial step sizes. Additionally a bunch of checks are made to
% make sure that exploration is taking place and that extrapolation will not
% be unboundedly large. The "length" gives the length of the run: if it is
% positive, it gives the maximum number of line searches, if negative its
% absolute gives the maximum allowed number of function evaluations. You can
% (optionally) give "length" a second component, which will indicate the
% reduction in function value to be expected in the first line-search (defaults
% to 1.0). The function returns when either its length is up, or if no further
% progress can be made (ie, we are at a minimum, or so close that due to
% numerical problems, we cannot get any closer). If the function terminates
% within a few iterations, it could be an indication that the function value
% and derivatives are not consistent (ie, there may be a bug in the
% implementation of your "f" function). The function returns the found
% solution "X", a vector of function values "fX" indicating the progress made
% and "i" the number of iterations (line searches or function evaluations,
% depending on the sign of "length") used.
%
% Usage: [X, fX, i] = fmincg(f, X, options, P1, P2, P3, P4, P5)
%
% See also: checkgrad
%
% Copyright (C) 2001 and 2002 by Carl Edward Rasmussen. Date 2002-02-13
%
%
% (C) Copyright 1999, 2000 & 2001, Carl Edward Rasmussen
%
% Permission is granted for anyone to copy, use, or modify these
% programs and accompanying documents for purposes of research or
% education, provided this copyright notice is retained, and note is
% made of any changes that have been made.
%
% These programs and documents are distributed without any warranty,
% express or implied. As the programs were written for research
% purposes only, they have not been tested to the degree that would be
% advisable in any important application. All use of these programs is
% entirely at the user's own risk.
%
% [ml-class] Changes Made:
% 1) Function name and argument specifications
% 2) Output display
%
% Read options
if exist('options', 'var') && ~isempty(options) && isfield(options, 'MaxIter')
length = options.MaxIter;
else
length = 100;
end
RHO = 0.01; % a bunch of constants for line searches
SIG = 0.5; % RHO and SIG are the constants in the Wolfe-Powell conditions
INT = 0.1; % don't reevaluate within 0.1 of the limit of the current bracket
EXT = 3.0; % extrapolate maximum 3 times the current bracket
MAX = 20; % max 20 function evaluations per line search
RATIO = 100; % maximum allowed slope ratio
argstr = ['feval(f, X']; % compose string used to call function
for i = 1:(nargin - 3)
argstr = [argstr, ',P', int2str(i)];
end
argstr = [argstr, ')'];
if max(size(length)) == 2, red=length(2); length=length(1); else red=1; end
S=['Iteration '];
i = 0; % zero the run length counter
ls_failed = 0; % no previous line search has failed
fX = [];
[f1 df1] = eval(argstr); % get function value and gradient
i = i + (length<0); % count epochs?!
s = -df1; % search direction is steepest
d1 = -s'*s; % this is the slope
z1 = red/(1-d1); % initial step is red/(|s|+1)
while i < abs(length) % while not finished
i = i + (length>0); % count iterations?!
X0 = X; f0 = f1; df0 = df1; % make a copy of current values
X = X + z1*s; % begin line search
[f2 df2] = eval(argstr);
i = i + (length<0); % count epochs?!
d2 = df2'*s;
f3 = f1; d3 = d1; z3 = -z1; % initialize point 3 equal to point 1
if length>0, M = MAX; else M = min(MAX, -length-i); end
success = 0; limit = -1; % initialize quanteties
while 1
while ((f2 > f1+z1*RHO*d1) || (d2 > -SIG*d1)) && (M > 0)
limit = z1; % tighten the bracket
if f2 > f1
z2 = z3 - (0.5*d3*z3*z3)/(d3*z3+f2-f3); % quadratic fit
else
A = 6*(f2-f3)/z3+3*(d2+d3); % cubic fit
B = 3*(f3-f2)-z3*(d3+2*d2);
z2 = (sqrt(B*B-A*d2*z3*z3)-B)/A; % numerical error possible - ok!
end
if isnan(z2) || isinf(z2)
z2 = z3/2; % if we had a numerical problem then bisect
end
z2 = max(min(z2, INT*z3),(1-INT)*z3); % don't accept too close to limits
z1 = z1 + z2; % update the step
X = X + z2*s;
[f2 df2] = eval(argstr);
M = M - 1; i = i + (length<0); % count epochs?!
d2 = df2'*s;
z3 = z3-z2; % z3 is now relative to the location of z2
end
if f2 > f1+z1*RHO*d1 || d2 > -SIG*d1
break; % this is a failure
elseif d2 > SIG*d1
success = 1; break; % success
elseif M == 0
break; % failure
end
A = 6*(f2-f3)/z3+3*(d2+d3); % make cubic extrapolation
B = 3*(f3-f2)-z3*(d3+2*d2);
z2 = -d2*z3*z3/(B+sqrt(B*B-A*d2*z3*z3)); % num. error possible - ok!
if ~isreal(z2) || isnan(z2) || isinf(z2) || z2 < 0 % num prob or wrong sign?
if limit < -0.5 % if we have no upper limit
z2 = z1 * (EXT-1); % the extrapolate the maximum amount
else
z2 = (limit-z1)/2; % otherwise bisect
end
elseif (limit > -0.5) && (z2+z1 > limit) % extraplation beyond max?
z2 = (limit-z1)/2; % bisect
elseif (limit < -0.5) && (z2+z1 > z1*EXT) % extrapolation beyond limit
z2 = z1*(EXT-1.0); % set to extrapolation limit
elseif z2 < -z3*INT
z2 = -z3*INT;
elseif (limit > -0.5) && (z2 < (limit-z1)*(1.0-INT)) % too close to limit?
z2 = (limit-z1)*(1.0-INT);
end
f3 = f2; d3 = d2; z3 = -z2; % set point 3 equal to point 2
z1 = z1 + z2; X = X + z2*s; % update current estimates
[f2 df2] = eval(argstr);
M = M - 1; i = i + (length<0); % count epochs?!
d2 = df2'*s;
end % end of line search
if success % if line search succeeded
f1 = f2; fX = [fX' f1]';
fprintf('%s %4i | Cost: %4.6e\r', S, i, f1);
s = (df2'*df2-df1'*df2)/(df1'*df1)*s - df2; % Polack-Ribiere direction
tmp = df1; df1 = df2; df2 = tmp; % swap derivatives
d2 = df1'*s;
if d2 > 0 % new slope must be negative
s = -df1; % otherwise use steepest direction
d2 = -s'*s;
end
z1 = z1 * min(RATIO, d1/(d2-realmin)); % slope ratio but max RATIO
d1 = d2;
ls_failed = 0; % this line search did not fail
else
X = X0; f1 = f0; df1 = df0; % restore point from before failed line search
if ls_failed || i > abs(length) % line search failed twice in a row
break; % or we ran out of time, so we give up
end
tmp = df1; df1 = df2; df2 = tmp; % swap derivatives
s = -df1; % try steepest
d1 = -s'*s;
z1 = 1/(1-d1);
ls_failed = 1; % this line search failed
end
if exist('OCTAVE_VERSION')
fflush(stdout);
end
end
fprintf('\n');
The author of "jsonlab" toolbox is Qianqian Fang. Qianqian
is currently an Assistant Professor at Massachusetts General Hospital,
Harvard Medical School.
Address: Martinos Center for Biomedical Imaging,
Massachusetts General Hospital,
Harvard Medical School
Bldg 149, 13th St, Charlestown, MA 02129, USA
URL: http://nmr.mgh.harvard.edu/~fangq/
Email: <fangq at nmr.mgh.harvard.edu> or <fangqq at gmail.com>
The script loadjson.m was built upon previous works by
- Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713
date: 2009/11/02
- François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393
date: 2009/03/22
- Joel Feenstra: http://www.mathworks.com/matlabcentral/fileexchange/20565
date: 2008/07/03
This toolbox contains patches submitted by the following contributors:
- Blake Johnson <bjohnso at bbn.com>
part of revision 341
- Niclas Borlin <Niclas.Borlin at cs.umu.se>
various fixes in revision 394, including
- loadjson crashes for all-zero sparse matrix.
- loadjson crashes for empty sparse matrix.
- Non-zero size of 0-by-N and N-by-0 empty matrices is lost after savejson/loadjson.
- loadjson crashes for sparse real column vector.
- loadjson crashes for sparse complex column vector.
- Data is corrupted by savejson for sparse real row vector.
- savejson crashes for sparse complex row vector.
- Yul Kang <yul.kang.on at gmail.com>
patches for svn revision 415.
- savejson saves an empty cell array as [] instead of null
- loadjson differentiates an empty struct from an empty array
============================================================================
JSONlab - a toolbox to encode/decode JSON/UBJSON files in MATLAB/Octave
----------------------------------------------------------------------------
JSONlab ChangeLog (key features marked by *):
== JSONlab 1.0 (codename: Optimus - Final), FangQ <fangq (at) nmr.mgh.harvard.edu> ==
2015/01/02 polish help info for all major functions, update examples, finalize 1.0
2014/12/19 fix a bug to strictly respect NoRowBracket in savejson
== JSONlab 1.0.0-RC2 (codename: Optimus - RC2), FangQ <fangq (at) nmr.mgh.harvard.edu> ==
2014/11/22 show progress bar in loadjson ('ShowProgress')
2014/11/17 add Compact option in savejson to output compact JSON format ('Compact')
2014/11/17 add FastArrayParser in loadjson to specify fast parser applicable levels
2014/09/18 start official github mirror: https://github.com/fangq/jsonlab
== JSONlab 1.0.0-RC1 (codename: Optimus - RC1), FangQ <fangq (at) nmr.mgh.harvard.edu> ==
2014/09/17 fix several compatibility issues when running on octave versions 3.2-3.8
2014/09/17 support 2D cell and struct arrays in both savejson and saveubjson
2014/08/04 escape special characters in a JSON string
2014/02/16 fix a bug when saving ubjson files
== JSONlab 0.9.9 (codename: Optimus - beta), FangQ <fangq (at) nmr.mgh.harvard.edu> ==
2014/01/22 use binary read and write in saveubjson and loadubjson
== JSONlab 0.9.8-1 (codename: Optimus - alpha update 1), FangQ <fangq (at) nmr.mgh.harvard.edu> ==
2013/10/07 better round-trip conservation for empty arrays and structs (patch submitted by Yul Kang)
== JSONlab 0.9.8 (codename: Optimus - alpha), FangQ <fangq (at) nmr.mgh.harvard.edu> ==
2013/08/23 *universal Binary JSON (UBJSON) support, including both saveubjson and loadubjson
== JSONlab 0.9.1 (codename: Rodimus, update 1), FangQ <fangq (at) nmr.mgh.harvard.edu> ==
2012/12/18 *handling of various empty and sparse matrices (fixes submitted by Niclas Borlin)
== JSONlab 0.9.0 (codename: Rodimus), FangQ <fangq (at) nmr.mgh.harvard.edu> ==
2012/06/17 *new format for an invalid leading char, unpacking hex code in savejson
2012/06/01 support JSONP in savejson
2012/05/25 fix the empty cell bug (reported by Cyril Davin)
2012/04/05 savejson can save to a file (suggested by Patrick Rapin)
== JSONlab 0.8.1 (codename: Sentiel, Update 1), FangQ <fangq (at) nmr.mgh.harvard.edu> ==
2012/02/28 loadjson quotation mark escape bug, see http://bit.ly/yyk1nS
2012/01/25 patch to handle root-less objects, contributed by Blake Johnson
== JSONlab 0.8.0 (codename: Sentiel), FangQ <fangq (at) nmr.mgh.harvard.edu> ==
2012/01/13 *speed up loadjson by 20 fold when parsing large data arrays in matlab
2012/01/11 remove row bracket if an array has 1 element, suggested by Mykel Kochenderfer
2011/12/22 *accept sequence of 'param',value input in savejson and loadjson
2011/11/18 fix struct array bug reported by Mykel Kochenderfer
== JSONlab 0.5.1 (codename: Nexus Update 1), FangQ <fangq (at) nmr.mgh.harvard.edu> ==
2011/10/21 fix a bug in loadjson, previous code does not use any of the acceleration
2011/10/20 loadjson supports JSON collections - concatenated JSON objects
== JSONlab 0.5.0 (codename: Nexus), FangQ <fangq (at) nmr.mgh.harvard.edu> ==
2011/10/16 package and release jsonlab 0.5.0
2011/10/15 *add json demo and regression test, support cpx numbers, fix double quote bug
2011/10/11 *speed up readjson dramatically, interpret _Array* tags, show data in root level
2011/10/10 create jsonlab project, start jsonlab website, add online documentation
2011/10/07 *speed up savejson by 25x using sprintf instead of mat2str, add options support
2011/10/06 *savejson works for structs, cells and arrays
2011/09/09 derive loadjson from JSON parser from MATLAB Central, draft savejson.m
Copyright 2011-2015 Qianqian Fang <fangq at nmr.mgh.harvard.edu>. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are
permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list
of conditions and the following disclaimer in the documentation and/or other materials
provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those of the
authors and should not be interpreted as representing official policies, either expressed
or implied, of the copyright holders.
This diff is collapsed.
function val=jsonopt(key,default,varargin)
%
% val=jsonopt(key,default,optstruct)
%
% setting options based on a struct. The struct can be produced
% by varargin2struct from a list of 'param','value' pairs
%
% authors:Qianqian Fang (fangq<at> nmr.mgh.harvard.edu)
%
% $Id: loadjson.m 371 2012-06-20 12:43:06Z fangq $
%
% input:
% key: a string with which one look up a value from a struct
% default: if the key does not exist, return default
% optstruct: a struct where each sub-field is a key
%
% output:
% val: if key exists, val=optstruct.key; otherwise val=default
%
% license:
% BSD, see LICENSE_BSD.txt files for details
%
% -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)
%
val=default;
if(nargin<=2) return; end
opt=varargin{1};
if(isstruct(opt) && isfield(opt,key))
val=getfield(opt,key);
end
This diff is collapsed.
This diff is collapsed.
function s=mergestruct(s1,s2)
%
% s=mergestruct(s1,s2)
%
% merge two struct objects into one
%
% authors:Qianqian Fang (fangq<at> nmr.mgh.harvard.edu)
% date: 2012/12/22
%
% input:
% s1,s2: a struct object, s1 and s2 can not be arrays
%
% output:
% s: the merged struct object. fields in s1 and s2 will be combined in s.
%
% license:
% BSD, see LICENSE_BSD.txt files for details
%
% -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)
%
if(~isstruct(s1) || ~isstruct(s2))
error('input parameters contain non-struct');
end
if(length(s1)>1 || length(s2)>1)
error('can not merge struct arrays');
end
fn=fieldnames(s2);
s=s1;
for i=1:length(fn)
s=setfield(s,fn{i},getfield(s2,fn{i}));
end
This diff is collapsed.
This diff is collapsed.
function opt=varargin2struct(varargin)
%
% opt=varargin2struct('param1',value1,'param2',value2,...)
% or
% opt=varargin2struct(...,optstruct,...)
%
% convert a series of input parameters into a structure
%
% authors:Qianqian Fang (fangq<at> nmr.mgh.harvard.edu)
% date: 2012/12/22
%
% input:
% 'param', value: the input parameters should be pairs of a string and a value
% optstruct: if a parameter is a struct, the fields will be merged to the output struct
%
% output:
% opt: a struct where opt.param1=value1, opt.param2=value2 ...
%
% license:
% BSD, see LICENSE_BSD.txt files for details
%
% -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)
%
len=length(varargin);
opt=struct;
if(len==0) return; end
i=1;
while(i<=len)
if(isstruct(varargin{i}))
opt=mergestruct(opt,varargin{i});
elseif(ischar(varargin{i}) && i<len)
opt=setfield(opt,varargin{i},varargin{i+1});
i=i+1;
else
error('input must be in the form of ...,''name'',value,... pairs or structs');
end
i=i+1;
end
function str = makeValidFieldName(str)
% From MATLAB doc: field names must begin with a letter, which may be
% followed by any combination of letters, digits, and underscores.
% Invalid characters will be converted to underscores, and the prefix
% "x0x[Hex code]_" will be added if the first character is not a letter.
isoct=exist('OCTAVE_VERSION','builtin');
pos=regexp(str,'^[^A-Za-z]','once');
if(~isempty(pos))
if(~isoct)
str=regexprep(str,'^([^A-Za-z])','x0x${sprintf(''%X'',unicode2native($1))}_','once');
else
str=sprintf('x0x%X_%s',char(str(1)),str(2:end));
end
end
if(isempty(regexp(str,'[^0-9A-Za-z_]', 'once' ))) return; end
if(~isoct)
str=regexprep(str,'([^0-9A-Za-z_])','_0x${sprintf(''%X'',unicode2native($1))}_');
else
pos=regexp(str,'[^0-9A-Za-z_]');
if(isempty(pos)) return; end
str0=str;
pos0=[0 pos(:)' length(str)];
str='';
for i=1:length(pos)
str=[str str0(pos0(i)+1:pos(i)-1) sprintf('_0x%X_',str0(pos(i)))];
end
if(pos(end)~=length(str))
str=[str str0(pos0(end-1)+1:pos0(end))];
end
end
function submitWithConfiguration(conf)
addpath('./lib/jsonlab');
parts = parts(conf);
fprintf('== Submitting solutions | %s...\n', conf.itemName);
tokenFile = 'token.mat';
if exist(tokenFile, 'file')
load(tokenFile);
[email token] = promptToken(email, token, tokenFile);
else
[email token] = promptToken('', '', tokenFile);
end
if isempty(token)
fprintf('!! Submission Cancelled\n');
return
end
try
response = submitParts(conf, email, token, parts);
catch
e = lasterror();
fprintf('\n!! Submission failed: %s\n', e.message);
fprintf('\n\nFunction: %s\nFileName: %s\nLineNumber: %d\n', ...
e.stack(1,1).name, e.stack(1,1).file, e.stack(1,1).line);
fprintf('\nPlease correct your code and resubmit.\n');
return
end
if isfield(response, 'errorMessage')
fprintf('!! Submission failed: %s\n', response.errorMessage);
elseif isfield(response, 'errorCode')
fprintf('!! Submission failed: %s\n', response.message);
else
showFeedback(parts, response);
save(tokenFile, 'email', 'token');
end
end
function [email token] = promptToken(email, existingToken, tokenFile)
if (~isempty(email) && ~isempty(existingToken))
prompt = sprintf( ...
'Use token from last successful submission (%s)? (Y/n): ', ...
email);
reenter = input(prompt, 's');
if (isempty(reenter) || reenter(1) == 'Y' || reenter(1) == 'y')
token = existingToken;
return;
else
delete(tokenFile);
end
end
email = input('Login (email address): ', 's');
token = input('Token: ', 's');
end
function isValid = isValidPartOptionIndex(partOptions, i)
isValid = (~isempty(i)) && (1 <= i) && (i <= numel(partOptions));
end
function response = submitParts(conf, email, token, parts)
body = makePostBody(conf, email, token, parts);
submissionUrl = submissionUrl();
responseBody = getResponse(submissionUrl, body);
jsonResponse = validateResponse(responseBody);
response = loadjson(jsonResponse);
end
function body = makePostBody(conf, email, token, parts)
bodyStruct.assignmentSlug = conf.assignmentSlug;
bodyStruct.submitterEmail = email;
bodyStruct.secret = token;
bodyStruct.parts = makePartsStruct(conf, parts);
opt.Compact = 1;
body = savejson('', bodyStruct, opt);
end
function partsStruct = makePartsStruct(conf, parts)
for part = parts
partId = part{:}.id;
fieldName = makeValidFieldName(partId);
outputStruct.output = conf.output(partId);
partsStruct.(fieldName) = outputStruct;
end
end
function [parts] = parts(conf)
parts = {};
for partArray = conf.partArrays
part.id = partArray{:}{1};
part.sourceFiles = partArray{:}{2};
part.name = partArray{:}{3};
parts{end + 1} = part;
end
end
function showFeedback(parts, response)
fprintf('== \n');
fprintf('== %43s | %9s | %-s\n', 'Part Name', 'Score', 'Feedback');
fprintf('== %43s | %9s | %-s\n', '---------', '-----', '--------');
for part = parts
score = '';
partFeedback = '';
partFeedback = response.partFeedbacks.(makeValidFieldName(part{:}.id));
partEvaluation = response.partEvaluations.(makeValidFieldName(part{:}.id));
score = sprintf('%d / %3d', partEvaluation.score, partEvaluation.maxScore);
fprintf('== %43s | %9s | %-s\n', part{:}.name, score, partFeedback);
end
evaluation = response.evaluation;
totalScore = sprintf('%d / %d', evaluation.score, evaluation.maxScore);
fprintf('== --------------------------------\n');
fprintf('== %43s | %9s | %-s\n', '', totalScore, '');
fprintf('== \n');
end
% use urlread or curl to send submit results to the grader and get a response
function response = getResponse(url, body)
% try using urlread() and a secure connection
params = {'jsonBody', body};
[response, success] = urlread(url, 'post', params);
if (success == 0)
% urlread didn't work, try curl & the peer certificate patch
if ispc
% testing note: use 'jsonBody =' for a test case
json_command = sprintf('echo jsonBody=%s | curl -k -X POST -d @- %s', body, url);
else
% it's linux/OS X, so use the other form
json_command = sprintf('echo ''jsonBody=%s'' | curl -k -X POST -d @- %s', body, url);
end
% get the response body for the peer certificate patch method
[code, response] = system(json_command);
% test the success code
if (code ~= 0)
fprintf('[error] submission with curl() was not successful\n');
end
end
end
% validate the grader's response
function response = validateResponse(resp)
% test if the response is json or an HTML page
isJson = length(resp) > 0 && resp(1) == '{';
isHtml = findstr(lower(resp), '<html');
if (isJson)
response = resp;
elseif (isHtml)
% the response is html, so it's probably an error message
printHTMLContents(resp);
error('Grader response is an HTML message');
else
error('Grader sent no response');
end
end
% parse a HTML response and print it's contents
function printHTMLContents(response)
strippedResponse = regexprep(response, '<[^>]+>', ' ');
strippedResponse = regexprep(strippedResponse, '[\t ]+', ' ');
fprintf(strippedResponse);
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Service configuration
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function submissionUrl = submissionUrl()
submissionUrl = 'https://www-origin.coursera.org/api/onDemandProgrammingImmediateFormSubmissions.v1';
end
function [J grad] = nnCostFunction(nn_params, ...
input_layer_size, ...
hidden_layer_size, ...
num_labels, ...
X, y, lambda)
%NNCOSTFUNCTION Implements the neural network cost function for a two layer
%neural network which performs classification
% [J grad] = NNCOSTFUNCTON(nn_params, hidden_layer_size, num_labels, ...
% X, y, lambda) computes the cost and gradient of the neural network. The
% parameters for the neural network are "unrolled" into the vector
% nn_params and need to be converted back into the weight matrices.
%
% The returned parameter grad should be a "unrolled" vector of the
% partial derivatives of the neural network.
%
% Reshape nn_params back into the parameters Theta1 and Theta2, the weight matrices
% for our 2 layer neural network
Theta1 = reshape(nn_params(1:hidden_layer_size * (input_layer_size + 1)), ...
hidden_layer_size, (input_layer_size + 1));
Theta2 = reshape(nn_params((1 + (hidden_layer_size * (input_layer_size + 1))):end), ...
num_labels, (hidden_layer_size + 1));
% Setup some useful variables
m = size(X, 1);
% You need to return the following variables correctly
J = 0;
Theta1_grad = zeros(size(Theta1));
Theta2_grad = zeros(size(Theta2));
Y = zeros(m, num_labels);
for i = 1:m
Y(i,y(i)) = 1;
end
% ====================== YOUR CODE HERE ======================
% Instructions: You should complete the code by working through the
% following parts.
%
% Part 1: Feedforward the neural network and return the cost in the
% variable J. After implementing Part 1, you can verify that your
% cost function computation is correct by verifying the cost
% computed in ex4.m
%
% Part 2: Implement the backpropagation algorithm to compute the gradients
% Theta1_grad and Theta2_grad. You should return the partial derivatives of
% the cost function with respect to Theta1 and Theta2 in Theta1_grad and
% Theta2_grad, respectively. After implementing Part 2, you can check
% that your implementation is correct by running checkNNGradients
%
% Note: The vector y passed into the function is a vector of labels
% containing values from 1..K. You need to map this vector into a
% binary vector of 1's and 0's to be used with the neural network
% cost function.
%
% Hint: We recommend implementing backpropagation using a for-loop
% over the training examples if you are implementing it for the
% first time.
%
% Part 3: Implement regularization with the cost function and gradients.
%
% Hint: You can implement this around the code for
% backpropagation. That is, you can compute the gradients for
% the regularization separately and then add them to Theta1_grad
% and Theta2_grad from Part 2.
X = [ones(m,1), X];
a2 = [ones(m,1), sigmoid(X*(Theta1'))]; % 5000x26
h_theta = sigmoid(a2*(Theta2')); % 5000x10 matrix
J = -1*sum(sum((Y.*log(h_theta) + (1-Y).*(log(1-h_theta))),2))/m;
J_reg = lambda*(sum(sum(Theta1(:,2:end).^2,2)) + sum(sum(Theta2(:,2:end).^2,2)))/(2*m);
J = J + J_reg;
for i = 1:m
a1 = X(i,:)'; % 401x1
z2 = Theta1*a1; % 25x1
a2 = [1; sigmoid(z2)]; % 26x1
a3 = sigmoid(Theta2*a2); % 10x1
del3 = a3 - Y(i,:)'; % 10x1
del2 = ((Theta2(:,2:end)')*del3).*(sigmoidGradient(z2)); % 25x1
Theta1_grad = Theta1_grad + del2*(a1'); % 25x401
Theta2_grad = Theta2_grad + del3*(a2');
end
Theta1_grad = Theta1_grad/m;
Theta2_grad = Theta2_grad/m;
Theta1_grad(:, 2:end) = Theta1_grad(:, 2:end) + lambda*(Theta1(:,2:end))/m;
Theta2_grad(:, 2:end) = Theta2_grad(:, 2:end) + lambda*(Theta2(:,2:end))/m;
% =========================================================================
% Unroll gradients
grad = [Theta1_grad(:) ; Theta2_grad(:)];
end
function p = predict(Theta1, Theta2, X)
%PREDICT Predict the label of an input given a trained neural network
% p = PREDICT(Theta1, Theta2, X) outputs the predicted label of X given the
% trained weights of a neural network (Theta1, Theta2)
% Useful values
m = size(X, 1);
num_labels = size(Theta2, 1);
% You need to return the following variables correctly
p = zeros(size(X, 1), 1);
h1 = sigmoid([ones(m, 1) X] * Theta1');
h2 = sigmoid([ones(m, 1) h1] * Theta2');
[dummy, p] = max(h2, [], 2);
% =========================================================================
end
function W = randInitializeWeights(L_in, L_out)
%RANDINITIALIZEWEIGHTS Randomly initialize the weights of a layer with L_in
%incoming connections and L_out outgoing connections
% W = RANDINITIALIZEWEIGHTS(L_in, L_out) randomly initializes the weights
% of a layer with L_in incoming connections and L_out outgoing
% connections.
%
% Note that W should be set to a matrix of size(L_out, 1 + L_in) as
% the first column of W handles the "bias" terms
%
% You need to return the following variables correctly
W = zeros(L_out, 1 + L_in);
epsilon_init = 0.12;
W = rand(L_out, 1 + L_in) * 2 * epsilon_init - epsilon_init;
% ====================== YOUR CODE HERE ======================
% Instructions: Initialize W randomly so that we break the symmetry while
% training the neural network.
%
% Note: The first column of W corresponds to the parameters for the bias unit
%
% =========================================================================
end
function g = sigmoid(z)
%SIGMOID Compute sigmoid functoon
% J = SIGMOID(z) computes the sigmoid of z.
g = 1.0 ./ (1.0 + exp(-z));
end
function g = sigmoidGradient(z)
%SIGMOIDGRADIENT returns the gradient of the sigmoid function
%evaluated at z
% g = SIGMOIDGRADIENT(z) computes the gradient of the sigmoid function
% evaluated at z. This should work regardless if z is a matrix or a
% vector. In particular, if z is a vector or matrix, you should return
% the gradient for each element.
g = zeros(size(z));
g = sigmoid(z).*(1-sigmoid(z));
% ====================== YOUR CODE HERE ======================
% Instructions: Compute the gradient of the sigmoid function evaluated at
% each value of z (z can be a matrix, vector or scalar).
% =============================================================
end
function submit()
addpath('./lib');
conf.assignmentSlug = 'neural-network-learning';
conf.itemName = 'Neural Networks Learning';
conf.partArrays = { ...
{ ...
'1', ...
{ 'nnCostFunction.m' }, ...
'Feedforward and Cost Function', ...
}, ...
{ ...
'2', ...
{ 'nnCostFunction.m' }, ...
'Regularized Cost Function', ...
}, ...
{ ...
'3', ...
{ 'sigmoidGradient.m' }, ...
'Sigmoid Gradient', ...
}, ...
{ ...
'4', ...
{ 'nnCostFunction.m' }, ...
'Neural Network Gradient (Backpropagation)', ...
}, ...
{ ...
'5', ...
{ 'nnCostFunction.m' }, ...
'Regularized Gradient', ...
}, ...
};
conf.output = @output;
submitWithConfiguration(conf);
end
function out = output(partId, auxstring)
% Random Test Cases
X = reshape(3 * sin(1:1:30), 3, 10);
Xm = reshape(sin(1:32), 16, 2) / 5;
ym = 1 + mod(1:16,4)';
t1 = sin(reshape(1:2:24, 4, 3));
t2 = cos(reshape(1:2:40, 4, 5));
t = [t1(:) ; t2(:)];
if partId == '1'
[J] = nnCostFunction(t, 2, 4, 4, Xm, ym, 0);
out = sprintf('%0.5f ', J);
elseif partId == '2'
[J] = nnCostFunction(t, 2, 4, 4, Xm, ym, 1.5);
out = sprintf('%0.5f ', J);
elseif partId == '3'
out = sprintf('%0.5f ', sigmoidGradient(X));
elseif partId == '4'
[J, grad] = nnCostFunction(t, 2, 4, 4, Xm, ym, 0);
out = sprintf('%0.5f ', J);
out = [out sprintf('%0.5f ', grad)];
elseif partId == '5'
[J, grad] = nnCostFunction(t, 2, 4, 4, Xm, ym, 1.5);
out = sprintf('%0.5f ', J);
out = [out sprintf('%0.5f ', grad)];
end
end
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment