Commit ca54197d authored by prashantsaroj's avatar prashantsaroj

Added solution to assignments

parents
function J = computeCost(X, y, theta)
%COMPUTECOST Compute cost for linear regression
% J = COMPUTECOST(X, y, theta) computes the cost of using theta as the
% parameter for linear regression to fit the data points in X and y
% Initialize some useful values
m = length(y); % number of training examples
% You need to return the following variables correctly
% ====================== YOUR CODE HERE ======================
% Instructions: Compute the cost of a particular choice of theta
% You should set J to the cost.
hx = X*theta;
mse = sum((hx - y).^2, 1);
J = mse/(2*m);
% =========================================================================
end
function J = computeCostMulti(X, y, theta)
%COMPUTECOSTMULTI Compute cost for linear regression with multiple variables
% J = COMPUTECOSTMULTI(X, y, theta) computes the cost of using theta as the
% parameter for linear regression to fit the data points in X and y
% Initialize some useful values
m = length(y); % number of training examples
% You need to return the following variables correctly
J = 0;
% ====================== YOUR CODE HERE ======================
% Instructions: Compute the cost of a particular choice of theta
% You should set J to the cost.
% =========================================================================
end
%% Machine Learning Online Class - Exercise 1: Linear Regression
% Instructions
% ------------
%
% This file contains code that helps you get started on the
% linear exercise. You will need to complete the following functions
% in this exericse:
%
% warmUpExercise.m
% plotData.m
% gradientDescent.m
% computeCost.m
% gradientDescentMulti.m
% computeCostMulti.m
% featureNormalize.m
% normalEqn.m
%
% For this exercise, you will not need to change any code in this file,
% or any other files other than those mentioned above.
%
% x refers to the population size in 10,000s
% y refers to the profit in $10,000s
%
%% Initialization
clear ; close all; clc
%% ==================== Part 1: Basic Function ====================
% Complete warmUpExercise.m
fprintf('Running warmUpExercise ... \n');
fprintf('5x5 Identity Matrix: \n');
warmUpExercise()
fprintf('Program paused. Press enter to continue.\n');
pause;
%% ======================= Part 2: Plotting =======================
fprintf('Plotting Data ...\n')
data = load('ex1data1.txt');
X = data(:, 1); y = data(:, 2);
m = length(y); % number of training examples
% Plot Data
% Note: You have to complete the code in plotData.m
plotData(X, y);
plot(X, y, 'rx', 'MarkerSize', 10); % Plot the data
ylabel('Profit in $10,000s'); % Set the y?axis label
xlabel('Population of City in 10,000s'); % Set the x?axis label
fprintf('Program paused. Press enter to continue.\n');
pause;
%% =================== Part 3: Cost and Gradient descent ===================
X = [ones(m, 1), data(:,1)]; % Add a column of ones to x
theta = zeros(2, 1); % initialize fitting parameters
% Some gradient descent settings
iterations = 1500;
alpha = 0.01;
fprintf('\nTesting the cost function ...\n')
% compute and display initial cost
J = computeCost(X, y, theta);
fprintf('With theta = [0 ; 0]\nCost computed = %f\n', J);
fprintf('Expected cost value (approx) 32.07\n');
% further testing of the cost function
J = computeCost(X, y, [-1 ; 2]);
fprintf('\nWith theta = [-1 ; 2]\nCost computed = %f\n', J);
fprintf('Expected cost value (approx) 54.24\n');
fprintf('Program paused. Press enter to continue.\n');
pause;
fprintf('\nRunning Gradient Descent ...\n')
% run gradient descent
theta = gradientDescent(X, y, theta, alpha, iterations);
% print theta to screen
fprintf('Theta found by gradient descent:\n');
fprintf('%f\n', theta);
fprintf('Expected theta values (approx)\n');
fprintf(' -3.6303\n 1.1664\n\n');
% Plot the linear fit
hold on; % keep previous plot visible
plot(X(:,2), X*theta, '-')
legend('Training data', 'Linear regression')
hold off % don't overlay any more plots on this figure
% Predict values for population sizes of 35,000 and 70,000
predict1 = [1, 3.5] *theta;
fprintf('For population = 35,000, we predict a profit of %f\n',...
predict1*10000);
predict2 = [1, 7] * theta;
fprintf('For population = 70,000, we predict a profit of %f\n',...
predict2*10000);
fprintf('Program paused. Press enter to continue.\n');
pause;
%% ============= Part 4: Visualizing J(theta_0, theta_1) =============
fprintf('Visualizing J(theta_0, theta_1) ...\n')
% Grid over which we will calculate J
theta0_vals = linspace(-10, 10, 100);
theta1_vals = linspace(-1, 4, 100);
% initialize J_vals to a matrix of 0's
J_vals = zeros(length(theta0_vals), length(theta1_vals));
% Fill out J_vals
for i = 1:length(theta0_vals)
for j = 1:length(theta1_vals)
t = [theta0_vals(i); theta1_vals(j)];
J_vals(i,j) = computeCost(X, y, t);
end
end
% Because of the way meshgrids work in the surf command, we need to
% transpose J_vals before calling surf, or else the axes will be flipped
J_vals = J_vals';
% Surface plot
figure;
surf(theta0_vals, theta1_vals, J_vals)
xlabel('\theta_0'); ylabel('\theta_1');
% Contour plot
figure;
% Plot J_vals as 15 contours spaced logarithmically between 0.01 and 100
contour(theta0_vals, theta1_vals, J_vals, logspace(-2, 3, 20))
xlabel('\theta_0'); ylabel('\theta_1');
hold on;
plot(theta(1), theta(2), 'rx', 'MarkerSize', 10, 'LineWidth', 2);
%% Machine Learning Online Class
% Exercise 1: Linear regression with multiple variables
%
% Instructions
% ------------
%
% This file contains code that helps you get started on the
% linear regression exercise.
%
% You will need to complete the following functions in this
% exericse:
%
% warmUpExercise.m
% plotData.m
% gradientDescent.m
% computeCost.m
% gradientDescentMulti.m
% computeCostMulti.m
% featureNormalize.m
% normalEqn.m
%
% For this part of the exercise, you will need to change some
% parts of the code below for various experiments (e.g., changing
% learning rates).
%
%% Initialization
%% ================ Part 1: Feature Normalization ================
%% Clear and Close Figures
clear ; close all; clc
fprintf('Loading data ...\n');
%% Load Data
data = load('ex1data2.txt');
X = data(:, 1:2);
y = data(:, 3);
m = length(y);
% Print out some data points
fprintf('First 10 examples from the dataset: \n');
fprintf(' x = [%.0f %.0f], y = %.0f \n', [X(1:10,:) y(1:10,:)]');
fprintf('Program paused. Press enter to continue.\n');
pause;
% Scale features and set them to zero mean
fprintf('Normalizing Features ...\n');
[X mu sigma] = featureNormalize(X);
% Add intercept term to X
X = [ones(m, 1) X];
%% ================ Part 2: Gradient Descent ================
% ====================== YOUR CODE HERE ======================
% Instructions: We have provided you with the following starter
% code that runs gradient descent with a particular
% learning rate (alpha).
%
% Your task is to first make sure that your functions -
% computeCost and gradientDescent already work with
% this starter code and support multiple variables.
%
% After that, try running gradient descent with
% different values of alpha and see which one gives
% you the best result.
%
% Finally, you should complete the code at the end
% to predict the price of a 1650 sq-ft, 3 br house.
%
% Hint: By using the 'hold on' command, you can plot multiple
% graphs on the same figure.
%
% Hint: At prediction, make sure you do the same feature normalization.
%
fprintf('Running gradient descent ...\n');
% Choose some alpha value
alpha = 0.01;
num_iters = 400;
% Init Theta and Run Gradient Descent
theta = zeros(3, 1);
[theta, J_history] = gradientDescentMulti(X, y, theta, alpha, num_iters);
% Plot the convergence graph
figure;
plot(1:numel(J_history), J_history, '-b', 'LineWidth', 2);
xlabel('Number of iterations');
ylabel('Cost J');
% Display gradient descent's result
fprintf('Theta computed from gradient descent: \n');
fprintf(' %f \n', theta);
fprintf('\n');
% Estimate the price of a 1650 sq-ft, 3 br house
% ====================== YOUR CODE HERE ======================
% Recall that the first column of X is all-ones. Thus, it does
% not need to be normalized.
price = 0; % You should change this
% ============================================================
fprintf(['Predicted price of a 1650 sq-ft, 3 br house ' ...
'(using gradient descent):\n $%f\n'], price);
fprintf('Program paused. Press enter to continue.\n');
pause;
%% ================ Part 3: Normal Equations ================
fprintf('Solving with normal equations...\n');
% ====================== YOUR CODE HERE ======================
% Instructions: The following code computes the closed form
% solution for linear regression using the normal
% equations. You should complete the code in
% normalEqn.m
%
% After doing so, you should complete this code
% to predict the price of a 1650 sq-ft, 3 br house.
%
%% Load Data
data = csvread('ex1data2.txt');
X = data(:, 1:2);
y = data(:, 3);
m = length(y);
% Add intercept term to X
X = [ones(m, 1) X];
% Calculate the parameters from the normal equation
theta = normalEqn(X, y);
% Display normal equation's result
fprintf('Theta computed from the normal equations: \n');
fprintf(' %f \n', theta);
fprintf('\n');
% Estimate the price of a 1650 sq-ft, 3 br house
% ====================== YOUR CODE HERE ======================
price = 0; % You should change this
% ============================================================
fprintf(['Predicted price of a 1650 sq-ft, 3 br house ' ...
'(using normal equations):\n $%f\n'], price);
6.1101,17.592
5.5277,9.1302
8.5186,13.662
7.0032,11.854
5.8598,6.8233
8.3829,11.886
7.4764,4.3483
8.5781,12
6.4862,6.5987
5.0546,3.8166
5.7107,3.2522
14.164,15.505
5.734,3.1551
8.4084,7.2258
5.6407,0.71618
5.3794,3.5129
6.3654,5.3048
5.1301,0.56077
6.4296,3.6518
7.0708,5.3893
6.1891,3.1386
20.27,21.767
5.4901,4.263
6.3261,5.1875
5.5649,3.0825
18.945,22.638
12.828,13.501
10.957,7.0467
13.176,14.692
22.203,24.147
5.2524,-1.22
6.5894,5.9966
9.2482,12.134
5.8918,1.8495
8.2111,6.5426
7.9334,4.5623
8.0959,4.1164
5.6063,3.3928
12.836,10.117
6.3534,5.4974
5.4069,0.55657
6.8825,3.9115
11.708,5.3854
5.7737,2.4406
7.8247,6.7318
7.0931,1.0463
5.0702,5.1337
5.8014,1.844
11.7,8.0043
5.5416,1.0179
7.5402,6.7504
5.3077,1.8396
7.4239,4.2885
7.6031,4.9981
6.3328,1.4233
6.3589,-1.4211
6.2742,2.4756
5.6397,4.6042
9.3102,3.9624
9.4536,5.4141
8.8254,5.1694
5.1793,-0.74279
21.279,17.929
14.908,12.054
18.959,17.054
7.2182,4.8852
8.2951,5.7442
10.236,7.7754
5.4994,1.0173
20.341,20.992
10.136,6.6799
7.3345,4.0259
6.0062,1.2784
7.2259,3.3411
5.0269,-2.6807
6.5479,0.29678
7.5386,3.8845
5.0365,5.7014
10.274,6.7526
5.1077,2.0576
5.7292,0.47953
5.1884,0.20421
6.3557,0.67861
9.7687,7.5435
6.5159,5.3436
8.5172,4.2415
9.1802,6.7981
6.002,0.92695
5.5204,0.152
5.0594,2.8214
5.7077,1.8451
7.6366,4.2959
5.8707,7.2029
5.3054,1.9869
8.2934,0.14454
13.394,9.0551
5.4369,0.61705
2104,3,399900
1600,3,329900
2400,3,369000
1416,2,232000
3000,4,539900
1985,4,299900
1534,3,314900
1427,3,198999
1380,3,212000
1494,3,242500
1940,4,239999
2000,3,347000
1890,3,329999
4478,5,699900
1268,3,259900
2300,4,449900
1320,2,299900
1236,3,199900
2609,4,499998
3031,4,599000
1767,3,252900
1888,2,255000
1604,3,242900
1962,4,259900
3890,3,573900
1100,3,249900
1458,3,464500
2526,3,469000
2200,3,475000
2637,3,299900
1839,2,349900
1000,1,169900
2040,4,314900
3137,3,579900
1811,4,285900
1437,3,249900
1239,3,229900
2132,4,345000
4215,4,549000
2162,4,287000
1664,2,368500
2238,3,329900
2567,4,314000
1200,3,299000
852,2,179900
1852,4,299900
1203,3,239500
function [X_norm, mu, sigma] = featureNormalize(X)
%FEATURENORMALIZE Normalizes the features in X
% FEATURENORMALIZE(X) returns a normalized version of X where
% the mean value of each feature is 0 and the standard deviation
% is 1. This is often a good preprocessing step to do when
% working with learning algorithms.
% You need to set these values correctly
X_norm = X;
mu = zeros(1, size(X, 2));
sigma = zeros(1, size(X, 2));
% ====================== YOUR CODE HERE ======================
% Instructions: First, for each feature dimension, compute the mean
% of the feature and subtract it from the dataset,
% storing the mean value in mu. Next, compute the
% standard deviation of each feature and divide
% each feature by it's standard deviation, storing
% the standard deviation in sigma.
%
% Note that X is a matrix where each column is a
% feature and each row is an example. You need
% to perform the normalization separately for
% each feature.
%
% Hint: You might find the 'mean' and 'std' functions useful.
%
% ============================================================
end
function [theta, J_history] = gradientDescent(X, y, theta, alpha, num_iters)
%GRADIENTDESCENT Performs gradient descent to learn theta
% theta = GRADIENTDESCENT(X, y, theta, alpha, num_iters) updates theta by
% taking num_iters gradient steps with learning rate alpha
% Initialize some useful values
m = length(y); % number of training examples
J_history = zeros(num_iters, 1);
for iter = 1:num_iters
% ====================== YOUR CODE HERE ======================
% Instructions: Perform a single gradient step on the parameter vector
% theta.
%
% Hint: While debugging, it can be useful to print out the values
% of the cost function (computeCost) and gradient here.
% ============================================================
% Save the cost J in every iteration
factor1 = sum(theta(1) + X(:,2)*theta(2) - y)/m
factor2 = sum((theta(1) + X(:,2)*theta(2) - y).*X(:,2))/m
theta(1) = theta(1) - alpha*factor1;
theta(2) = theta(2) - alpha*factor2;
J_history(iter) = computeCost(X, y, theta);
end
end
function [theta, J_history] = gradientDescentMulti(X, y, theta, alpha, num_iters)
%GRADIENTDESCENTMULTI Performs gradient descent to learn theta
% theta = GRADIENTDESCENTMULTI(x, y, theta, alpha, num_iters) updates theta by
% taking num_iters gradient steps with learning rate alpha
% Initialize some useful values
m = length(y); % number of training examples
J_history = zeros(num_iters, 1);
for iter = 1:num_iters
% ====================== YOUR CODE HERE ======================
% Instructions: Perform a single gradient step on the parameter vector
% theta.
%
% Hint: While debugging, it can be useful to print out the values
% of the cost function (computeCostMulti) and gradient here.
%
% ============================================================
% Save the cost J in every iteration
J_history(iter) = computeCostMulti(X, y, theta);
end
end
The author of "jsonlab" toolbox is Qianqian Fang. Qianqian
is currently an Assistant Professor at Massachusetts General Hospital,
Harvard Medical School.
Address: Martinos Center for Biomedical Imaging,
Massachusetts General Hospital,
Harvard Medical School
Bldg 149, 13th St, Charlestown, MA 02129, USA
URL: http://nmr.mgh.harvard.edu/~fangq/
Email: <fangq at nmr.mgh.harvard.edu> or <fangqq at gmail.com>
The script loadjson.m was built upon previous works by
- Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713
date: 2009/11/02
- François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393
date: 2009/03/22
- Joel Feenstra: http://www.mathworks.com/matlabcentral/fileexchange/20565
date: 2008/07/03
This toolbox contains patches submitted by the following contributors:
- Blake Johnson <bjohnso at bbn.com>
part of revision 341
- Niclas Borlin <Niclas.Borlin at cs.umu.se>
various fixes in revision 394, including
- loadjson crashes for all-zero sparse matrix.
- loadjson crashes for empty sparse matrix.
- Non-zero size of 0-by-N and N-by-0 empty matrices is lost after savejson/loadjson.
- loadjson crashes for sparse real column vector.
- loadjson crashes for sparse complex column vector.
- Data is corrupted by savejson for sparse real row vector.
- savejson crashes for sparse complex row vector.
- Yul Kang <yul.kang.on at gmail.com>
patches for svn revision 415.
- savejson saves an empty cell array as [] instead of null
- loadjson differentiates an empty struct from an empty array
============================================================================
JSONlab - a toolbox to encode/decode JSON/UBJSON files in MATLAB/Octave
----------------------------------------------------------------------------
JSONlab ChangeLog (key features marked by *):
== JSONlab 1.0 (codename: Optimus - Final), FangQ <fangq (at) nmr.mgh.harvard.edu> ==
2015/01/02 polish help info for all major functions, update examples, finalize 1.0
2014/12/19 fix a bug to strictly respect NoRowBracket in savejson
== JSONlab 1.0.0-RC2 (codename: Optimus - RC2), FangQ <fangq (at) nmr.mgh.harvard.edu> ==
2014/11/22 show progress bar in loadjson ('ShowProgress')
2014/11/17 add Compact option in savejson to output compact JSON format ('Compact')
2014/11/17 add FastArrayParser in loadjson to specify fast parser applicable levels
2014/09/18 start official github mirror: https://github.com/fangq/jsonlab
== JSONlab 1.0.0-RC1 (codename: Optimus - RC1), FangQ <fangq (at) nmr.mgh.harvard.edu> ==
2014/09/17 fix several compatibility issues when running on octave versions 3.2-3.8
2014/09/17 support 2D cell and struct arrays in both savejson and saveubjson
2014/08/04 escape special characters in a JSON string
2014/02/16 fix a bug when saving ubjson files
== JSONlab 0.9.9 (codename: Optimus - beta), FangQ <fangq (at) nmr.mgh.harvard.edu> ==
2014/01/22 use binary read and write in saveubjson and loadubjson
== JSONlab 0.9.8-1 (codename: Optimus - alpha update 1), FangQ <fangq (at) nmr.mgh.harvard.edu> ==
2013/10/07 better round-trip conservation for empty arrays and structs (patch submitted by Yul Kang)
== JSONlab 0.9.8 (codename: Optimus - alpha), FangQ <fangq (at) nmr.mgh.harvard.edu> ==
2013/08/23 *universal Binary JSON (UBJSON) support, including both saveubjson and loadubjson
== JSONlab 0.9.1 (codename: Rodimus, update 1), FangQ <fangq (at) nmr.mgh.harvard.edu> ==
2012/12/18 *handling of various empty and sparse matrices (fixes submitted by Niclas Borlin)
== JSONlab 0.9.0 (codename: Rodimus), FangQ <fangq (at) nmr.mgh.harvard.edu> ==
2012/06/17 *new format for an invalid leading char, unpacking hex code in savejson
2012/06/01 support JSONP in savejson
2012/05/25 fix the empty cell bug (reported by Cyril Davin)
2012/04/05 savejson can save to a file (suggested by Patrick Rapin)
== JSONlab 0.8.1 (codename: Sentiel, Update 1), FangQ <fangq (at) nmr.mgh.harvard.edu> ==
2012/02/28 loadjson quotation mark escape bug, see http://bit.ly/yyk1nS
2012/01/25 patch to handle root-less objects, contributed by Blake Johnson
== JSONlab 0.8.0 (codename: Sentiel), FangQ <fangq (at) nmr.mgh.harvard.edu> ==
2012/01/13 *speed up loadjson by 20 fold when parsing large data arrays in matlab
2012/01/11 remove row bracket if an array has 1 element, suggested by Mykel Kochenderfer
2011/12/22 *accept sequence of 'param',value input in savejson and loadjson
2011/11/18 fix struct array bug reported by Mykel Kochenderfer
== JSONlab 0.5.1 (codename: Nexus Update 1), FangQ <fangq (at) nmr.mgh.harvard.edu> ==
2011/10/21 fix a bug in loadjson, previous code does not use any of the acceleration
2011/10/20 loadjson supports JSON collections - concatenated JSON objects
== JSONlab 0.5.0 (codename: Nexus), FangQ <fangq (at) nmr.mgh.harvard.edu> ==
2011/10/16 package and release jsonlab 0.5.0
2011/10/15 *add json demo and regression test, support cpx numbers, fix double quote bug
2011/10/11 *speed up readjson dramatically, interpret _Array* tags, show data in root level
2011/10/10 create jsonlab project, start jsonlab website, add online documentation
2011/10/07 *speed up savejson by 25x using sprintf instead of mat2str, add options support
2011/10/06 *savejson works for structs, cells and arrays
2011/09/09 derive loadjson from JSON parser from MATLAB Central, draft savejson.m
Copyright 2011-2015 Qianqian Fang <fangq at nmr.mgh.harvard.edu>. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are
permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list
of conditions and the following disclaimer in the documentation and/or other materials
provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those of the
authors and should not be interpreted as representing official policies, either expressed
or implied, of the copyright holders.
This diff is collapsed.
function val=jsonopt(key,default,varargin)
%
% val=jsonopt(key,default,optstruct)
%
% setting options based on a struct. The struct can be produced
% by varargin2struct from a list of 'param','value' pairs
%
% authors:Qianqian Fang (fangq<at> nmr.mgh.harvard.edu)
%
% $Id: loadjson.m 371 2012-06-20 12:43:06Z fangq $
%
% input:
% key: a string with which one look up a value from a struct
% default: if the key does not exist, return default
% optstruct: a struct where each sub-field is a key
%
% output:
% val: if key exists, val=optstruct.key; otherwise val=default
%
% license:
% BSD, see LICENSE_BSD.txt files for details
%
% -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)
%
val=default;
if(nargin<=2) return; end
opt=varargin{1};
if(isstruct(opt) && isfield(opt,key))
val=getfield(opt,key);
end
This diff is collapsed.
This diff is collapsed.
function s=mergestruct(s1,s2)
%
% s=mergestruct(s1,s2)
%
% merge two struct objects into one
%
% authors:Qianqian Fang (fangq<at> nmr.mgh.harvard.edu)
% date: 2012/12/22
%
% input:
% s1,s2: a struct object, s1 and s2 can not be arrays
%
% output:
% s: the merged struct object. fields in s1 and s2 will be combined in s.
%
% license:
% BSD, see LICENSE_BSD.txt files for details
%
% -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)
%
if(~isstruct(s1) || ~isstruct(s2))
error('input parameters contain non-struct');
end
if(length(s1)>1 || length(s2)>1)
error('can not merge struct arrays');
end
fn=fieldnames(s2);
s=s1;
for i=1:length(fn)
s=setfield(s,fn{i},getfield(s2,fn{i}));
end
This diff is collapsed.
This diff is collapsed.
function opt=varargin2struct(varargin)
%
% opt=varargin2struct('param1',value1,'param2',value2,...)
% or
% opt=varargin2struct(...,optstruct,...)
%
% convert a series of input parameters into a structure
%
% authors:Qianqian Fang (fangq<at> nmr.mgh.harvard.edu)
% date: 2012/12/22
%
% input:
% 'param', value: the input parameters should be pairs of a string and a value
% optstruct: if a parameter is a struct, the fields will be merged to the output struct
%
% output:
% opt: a struct where opt.param1=value1, opt.param2=value2 ...
%
% license:
% BSD, see LICENSE_BSD.txt files for details
%
% -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab)
%
len=length(varargin);
opt=struct;
if(len==0) return; end
i=1;
while(i<=len)
if(isstruct(varargin{i}))
opt=mergestruct(opt,varargin{i});
elseif(ischar(varargin{i}) && i<len)
opt=setfield(opt,varargin{i},varargin{i+1});
i=i+1;
else
error('input must be in the form of ...,''name'',value,... pairs or structs');
end
i=i+1;
end
function str = makeValidFieldName(str)
% From MATLAB doc: field names must begin with a letter, which may be
% followed by any combination of letters, digits, and underscores.
% Invalid characters will be converted to underscores, and the prefix
% "x0x[Hex code]_" will be added if the first character is not a letter.
isoct=exist('OCTAVE_VERSION','builtin');
pos=regexp(str,'^[^A-Za-z]','once');
if(~isempty(pos))
if(~isoct)
str=regexprep(str,'^([^A-Za-z])','x0x${sprintf(''%X'',unicode2native($1))}_','once');
else
str=sprintf('x0x%X_%s',char(str(1)),str(2:end));
end
end
if(isempty(regexp(str,'[^0-9A-Za-z_]', 'once' ))) return; end
if(~isoct)
str=regexprep(str,'([^0-9A-Za-z_])','_0x${sprintf(''%X'',unicode2native($1))}_');
else
pos=regexp(str,'[^0-9A-Za-z_]');
if(isempty(pos)) return; end
str0=str;
pos0=[0 pos(:)' length(str)];
str='';
for i=1:length(pos)
str=[str str0(pos0(i)+1:pos(i)-1) sprintf('_0x%X_',str0(pos(i)))];
end
if(pos(end)~=length(str))
str=[str str0(pos0(end-1)+1:pos0(end))];
end
end
function submitWithConfiguration(conf)
addpath('./lib/jsonlab');
parts = parts(conf);
fprintf('== Submitting solutions | %s...\n', conf.itemName);
tokenFile = 'token.mat';
if exist(tokenFile, 'file')
load(tokenFile);
[email token] = promptToken(email, token, tokenFile);
else
[email token] = promptToken('', '', tokenFile);
end
if isempty(token)
fprintf('!! Submission Cancelled\n');
return
end
try
response = submitParts(conf, email, token, parts);
catch
e = lasterror();
fprintf('\n!! Submission failed: %s\n', e.message);
fprintf('\n\nFunction: %s\nFileName: %s\nLineNumber: %d\n', ...
e.stack(1,1).name, e.stack(1,1).file, e.stack(1,1).line);
fprintf('\nPlease correct your code and resubmit.\n');
return
end
if isfield(response, 'errorMessage')
fprintf('!! Submission failed: %s\n', response.errorMessage);
elseif isfield(response, 'errorCode')
fprintf('!! Submission failed: %s\n', response.message);
else
showFeedback(parts, response);
save(tokenFile, 'email', 'token');
end
end
function [email token] = promptToken(email, existingToken, tokenFile)
if (~isempty(email) && ~isempty(existingToken))
prompt = sprintf( ...
'Use token from last successful submission (%s)? (Y/n): ', ...
email);
reenter = input(prompt, 's');
if (isempty(reenter) || reenter(1) == 'Y' || reenter(1) == 'y')
token = existingToken;
return;
else
delete(tokenFile);
end
end
email = input('Login (email address): ', 's');
token = input('Token: ', 's');
end
function isValid = isValidPartOptionIndex(partOptions, i)
isValid = (~isempty(i)) && (1 <= i) && (i <= numel(partOptions));
end
function response = submitParts(conf, email, token, parts)
body = makePostBody(conf, email, token, parts);
submissionUrl = submissionUrl();
responseBody = getResponse(submissionUrl, body);
jsonResponse = validateResponse(responseBody);
response = loadjson(jsonResponse);
end
function body = makePostBody(conf, email, token, parts)
bodyStruct.assignmentSlug = conf.assignmentSlug;
bodyStruct.submitterEmail = email;
bodyStruct.secret = token;
bodyStruct.parts = makePartsStruct(conf, parts);
opt.Compact = 1;
body = savejson('', bodyStruct, opt);
end
function partsStruct = makePartsStruct(conf, parts)
for part = parts
partId = part{:}.id;
fieldName = makeValidFieldName(partId);
outputStruct.output = conf.output(partId);
partsStruct.(fieldName) = outputStruct;
end
end
function [parts] = parts(conf)
parts = {};
for partArray = conf.partArrays
part.id = partArray{:}{1};
part.sourceFiles = partArray{:}{2};
part.name = partArray{:}{3};
parts{end + 1} = part;
end
end
function showFeedback(parts, response)
fprintf('== \n');
fprintf('== %43s | %9s | %-s\n', 'Part Name', 'Score', 'Feedback');
fprintf('== %43s | %9s | %-s\n', '---------', '-----', '--------');
for part = parts
score = '';
partFeedback = '';
partFeedback = response.partFeedbacks.(makeValidFieldName(part{:}.id));
partEvaluation = response.partEvaluations.(makeValidFieldName(part{:}.id));
score = sprintf('%d / %3d', partEvaluation.score, partEvaluation.maxScore);
fprintf('== %43s | %9s | %-s\n', part{:}.name, score, partFeedback);
end
evaluation = response.evaluation;
totalScore = sprintf('%d / %d', evaluation.score, evaluation.maxScore);
fprintf('== --------------------------------\n');
fprintf('== %43s | %9s | %-s\n', '', totalScore, '');
fprintf('== \n');
end
% use urlread or curl to send submit results to the grader and get a response
function response = getResponse(url, body)
% try using urlread() and a secure connection
params = {'jsonBody', body};
[response, success] = urlread(url, 'post', params);
if (success == 0)
% urlread didn't work, try curl & the peer certificate patch
if ispc
% testing note: use 'jsonBody =' for a test case
json_command = sprintf('echo jsonBody=%s | curl -k -X POST -d @- %s', body, url);
else
% it's linux/OS X, so use the other form
json_command = sprintf('echo ''jsonBody=%s'' | curl -k -X POST -d @- %s', body, url);
end
% get the response body for the peer certificate patch method
[code, response] = system(json_command);
% test the success code
if (code ~= 0)
fprintf('[error] submission with curl() was not successful\n');
end
end
end
% validate the grader's response
function response = validateResponse(resp)
% test if the response is json or an HTML page
isJson = length(resp) > 0 && resp(1) == '{';
isHtml = findstr(lower(resp), '<html');
if (isJson)
response = resp;
elseif (isHtml)
% the response is html, so it's probably an error message
printHTMLContents(resp);
error('Grader response is an HTML message');
else
error('Grader sent no response');
end
end
% parse a HTML response and print it's contents
function printHTMLContents(response)
strippedResponse = regexprep(response, '<[^>]+>', ' ');
strippedResponse = regexprep(strippedResponse, '[\t ]+', ' ');
fprintf(strippedResponse);
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Service configuration
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function submissionUrl = submissionUrl()
submissionUrl = 'https://www-origin.coursera.org/api/onDemandProgrammingImmediateFormSubmissions.v1';
end
function [theta] = normalEqn(X, y)
%NORMALEQN Computes the closed-form solution to linear regression
% NORMALEQN(X,y) computes the closed-form solution to linear
% regression using the normal equations.
theta = zeros(size(X, 2), 1);
% ====================== YOUR CODE HERE ======================
% Instructions: Complete the code to compute the closed form solution
% to linear regression and put the result in theta.
%
% ---------------------- Sample Solution ----------------------
% -------------------------------------------------------------
% ============================================================
end
function plotData(x, y)
%PLOTDATA Plots the data points x and y into a new figure
% PLOTDATA(x,y) plots the data points and gives the figure axes labels of
% population and profit.
figure; % open a new figure window
% ====================== YOUR CODE HERE ======================
% Instructions: Plot the training data into a figure using the
% "figure" and "plot" commands. Set the axes labels using
% the "xlabel" and "ylabel" commands. Assume the
% population and revenue data have been passed in
% as the x and y arguments of this function.
%
% Hint: You can use the 'rx' option with plot to have the markers
% appear as red crosses. Furthermore, you can make the
% markers larger by using plot(..., 'rx', 'MarkerSize', 10);
% ============================================================
end
function submit()
addpath('./lib');
conf.assignmentSlug = 'linear-regression';
conf.itemName = 'Linear Regression with Multiple Variables';
conf.partArrays = { ...
{ ...
'1', ...
{ 'warmUpExercise.m' }, ...
'Warm-up Exercise', ...
}, ...
{ ...
'2', ...
{ 'computeCost.m' }, ...
'Computing Cost (for One Variable)', ...
}, ...
{ ...
'3', ...
{ 'gradientDescent.m' }, ...
'Gradient Descent (for One Variable)', ...
}, ...
{ ...
'4', ...
{ 'featureNormalize.m' }, ...
'Feature Normalization', ...
}, ...
{ ...
'5', ...
{ 'computeCostMulti.m' }, ...
'Computing Cost (for Multiple Variables)', ...
}, ...
{ ...
'6', ...
{ 'gradientDescentMulti.m' }, ...
'Gradient Descent (for Multiple Variables)', ...
}, ...
{ ...
'7', ...
{ 'normalEqn.m' }, ...
'Normal Equations', ...
}, ...
};
conf.output = @output;
submitWithConfiguration(conf);
end
function out = output(partId)
% Random Test Cases
X1 = [ones(20,1) (exp(1) + exp(2) * (0.1:0.1:2))'];
Y1 = X1(:,2) + sin(X1(:,1)) + cos(X1(:,2));
X2 = [X1 X1(:,2).^0.5 X1(:,2).^0.25];
Y2 = Y1.^0.5 + Y1;
if partId == '1'
out = sprintf('%0.5f ', warmUpExercise());
elseif partId == '2'
out = sprintf('%0.5f ', computeCost(X1, Y1, [0.5 -0.5]'));
elseif partId == '3'
out = sprintf('%0.5f ', gradientDescent(X1, Y1, [0.5 -0.5]', 0.01, 10));
elseif partId == '4'
out = sprintf('%0.5f ', featureNormalize(X2(:,2:4)));
elseif partId == '5'
out = sprintf('%0.5f ', computeCostMulti(X2, Y2, [0.1 0.2 0.3 0.4]'));
elseif partId == '6'
out = sprintf('%0.5f ', gradientDescentMulti(X2, Y2, [-0.1 -0.2 -0.3 -0.4]', 0.01, 10));
elseif partId == '7'
out = sprintf('%0.5f ', normalEqn(X2, Y2));
end
end
function A = warmUpExercise()
%WARMUPEXERCISE Example function in octave
% A = WARMUPEXERCISE() is an example function that returns the 5x5 identity matrix
A = eye(5);
% ============= YOUR CODE HERE ==============
% Instructions: Return the 5x5 identity matrix
% In octave, we return values by defining which variables
% represent the return values (at the top of the file)
% and then set them accordingly.
% ===========================================
end
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment