# 机器学习实战：多变量线性回归的实现

2012-09-07

computeCostMulti函数

[plain]
function J = computeCostMulti(X, y, theta)

m = length(y); % number of training examples
J = 0;
predictions = X * theta;
J = 1/(2*m)*(predictions - y)&#39; * (predictions - y);

end

[plain]
function [theta, J_history] = gradientDescentMulti(X, y, theta, alpha, num_iters)

m = length(y); % number of training examples
J_history = zeros(num_iters, 1);
feature_number = size(X,2);
temp = zeros(feature_number,1);
for iter = 1:num_iters

for i=1:feature_number
temp(i) = theta(i) - (alpha / m) * sum((X * theta - y).* X(:,i));
end
for j=1:feature_number
theta(j) = temp(j);
end

J_history(iter) = computeCostMulti(X, y, theta);

end

end

[plain]
function [X_norm, mu, sigma] = featureNormalize(X)

X_norm = X;
mu = zeros(1, size(X, 2));
sigma = zeros(1, size(X, 2));
mu = mean(X);
sigma = std(X);
for i=1:size(mu,2)
X_norm(:,i) = (X(:,i).-mu(i))./sigma(i);
end

end

Normal Equation算法的实现

[plain]
function [theta] = normalEqn(X, y)

theta = zeros(size(X, 2), 1);
theta = pinv(X&#39;*X)*X&#39;*y;

end