12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697 |
- function [J, grad] = nnCostFunction(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, lambda)
- m = size(X, 1);
- % You need to return the following variables correctly
- Theta1 = reshape(nn_params(1:hidden_layer_size * (input_layer_size + 1)), ...
- hidden_layer_size, (input_layer_size + 1));
- Theta2 = reshape(nn_params((1 + (hidden_layer_size * (input_layer_size + 1))):end), ...
- num_labels, (hidden_layer_size + 1));
-
- ThetaGrad1 = zeros(size(Theta1));
- ThetaGrad2 = zeros(size(Theta2));
- a1 = [ones(size(X, 1),1) X]';
- z2 = Theta1 * a1;
- a2 = [ones(1, size(z2,2)); sigmoid(z2)];
- z3 = Theta2 * a2;
- a3 = sigmoid(z3);
- y_map = zeros(length(y), num_labels);
- for i = 1:length(y_map)
- y_map(i, y(i)) = 1;
- end
- d3 = a3 - y_map';
- t = Theta2' * d3;
- d2 = t(2:end,:) .* sigmoidGradient(z2);
- ThetaGrad2 = ThetaGrad2 + d3 * a2';
- ThetaGrad1 = ThetaGrad1 + d2 * a1';
- ThetaGrad1 = ThetaGrad1 + lambda * [zeros(size(Theta1,1),1) Theta1(:,2:end)];
- ThetaGrad2 = ThetaGrad2 + lambda * [zeros(size(Theta2,1),1) Theta2(:,2:end)];
- grad = [ThetaGrad1(:); ThetaGrad2(:)];
- grad = grad / m;
- J = mean(sum( - y_map' .* log(a3) + (y_map' -1) .* log(1 - a3)));
- J = J + lambda * 0.5 / size(X, 1) * (sum(sum(Theta1(:,2:end) .^2)) + sum(sum(Theta2(:,2:end) .^2)));
- % ====================== YOUR CODE HERE ======================
- % Instructions: You should complete the code by working through the
- % following parts.
- %
- % Part 1: Feedforward the neural network and return the cost in the
- % variable J. After implementing Part 1, you can verify that your
- % cost function computation is correct by verifying the cost
- % computed in ex4.m
- %
- % Part 2: Implement the backpropagation algorithm to compute the gradients
- % Theta1_grad and Theta2_grad. You should return the partial derivatives of
- % the cost function with respect to Theta1 and Theta2 in Theta1_grad and
- % Theta2_grad, respectively. After implementing Part 2, you can check
- % that your implementation is correct by running checkNNGradients
- %
- % Note: The vector y passed into the function is a vector of labels
- % containing values from 1..K. You need to map this vector into a
- % binary vector of 1's and 0's to be used with the neural network
- % cost function.
- %
- % Hint: We recommend implementing backpropagation using a for-loop
- % over the training examples if you are implementing it for the
- % first time.
- %
- % Part 3: Implement regularization with the cost function and gradients.
- %
- % Hint: You can implement this around the code for
- % backpropagation. That is, you can compute the gradients for
- % the regularization separately and then add them to Theta1_grad
- % and Theta2_grad from Part 2.
- %
- % -------------------------------------------------------------
- % =========================================================================
- % Unroll gradients
- end
|