-
Notifications
You must be signed in to change notification settings - Fork 2
Python3 compatibility #4
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from all commits
4f90aab
4981b00
25b7008
7850745
7347802
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -2,64 +2,78 @@ | |
|
|
||
|
|
||
| def applySigmoid(x, giveMeTheDerivative = False): | ||
| if(giveMeTheDerivative == True): | ||
| return applySigmoid(x) * (1 - applySigmoid(x)) | ||
| return 1 / (1 + np.exp(-x)) | ||
|
|
||
| def print_data(iter, inputs, keys, weights, prediction): | ||
| print "This is iteration # ", iter | ||
| print "Your original input data was... \n", inputs | ||
| print "Your orignal keys were... \n", keys | ||
| print "Your weights at this specific iteration are... \n", weights | ||
| print "Our prediction at this iteration was... \n", prediction | ||
| print "--------------------------------------------------\n" | ||
| if(giveMeTheDerivative == True): | ||
| return applySigmoid(x) * (1 - applySigmoid(x)) | ||
| return 1 / (1 + np.exp(-x)) | ||
|
|
||
| def print_data(iter, inputs, keys, layer_one_weights, layer_two_weights, prediction): | ||
| print ("This is iteration # %d" % iter) | ||
| print ("Your original input data was...\n%s" % inputs) | ||
| print ("Your orignal keys were...\n%s" % keys) | ||
| print ("Layer one weights at this specific iteration are... \n%s" % layer_one_weights) | ||
| print ("Layer two weights at this specific iteration are... \n%s" % layer_two_weights) | ||
| print ("Our prediction at this iteration was...\n%s" % prediction) | ||
| print ("--------------------------------------------------\n") | ||
|
|
||
| def train(inputs, keys, layer_one_weights, layer_two_weights): | ||
| for iter in xrange(20000): | ||
| for iter in range(40000): | ||
|
|
||
| # Layer one will have its own inputs and they are the ones directly given to us from main. | ||
| layer_one_inputs = inputs; | ||
| # Layer one will have its own inputs and they are the ones directly given to us from main. | ||
| layer_one_inputs = inputs | ||
|
|
||
| # Predict just like in simple_mlp.py | ||
| layer_one_prediction = applySigmoid(np.dot(layer_one_inputs, layer_one_weights)) | ||
| # Predict just like in simple_mlp.py | ||
| layer_one_prediction = applySigmoid(np.dot(layer_one_inputs, layer_one_weights)) | ||
|
|
||
| # Take the prediction from layer one and forward proogate it to the second layer of weights for a final output. | ||
| layer_two_prediction = applySigmoid(np.dot(layer_one_prediction, layer_two_weights)) | ||
| # Take the prediction from layer one and forward proogate it to the second layer of weights for a final output. | ||
| layer_two_prediction = applySigmoid(np.dot(layer_one_prediction, layer_two_weights)) | ||
|
|
||
| # How much were we off by? | ||
| layer_two_error = keys - layer_two_prediction | ||
| # How much were we off by? | ||
| layer_two_error = keys - layer_two_prediction | ||
|
|
||
| # Change in error just like in simple_mlp.py | ||
| layer_two_change_in_error = layer_two_error * applySigmoid(layer_two_prediction, True) | ||
| # Change in error just like in simple_mlp.py | ||
| layer_two_change_in_error = layer_two_error * applySigmoid(layer_two_prediction, True) | ||
|
|
||
| # Figure out how wrong our output for layer_one was by seeing how wrong the layer_two_prediction was | ||
| layer_one_error = np.dot(layer_two_change_in_error, layer_two_weights.T) | ||
| # Figure out how wrong our output for layer_one was by seeing how wrong the layer_two_prediction was | ||
| layer_one_error = np.dot(layer_two_change_in_error, layer_two_weights.T) | ||
|
|
||
| # Just like in simple_mlp.py | ||
| layer_one_change_in_error = layer_one_error * applySigmoid(layer_one_error, True) | ||
| # Just like in simple_mlp.py | ||
| layer_one_change_in_error = layer_one_error * applySigmoid(layer_one_prediction, True) | ||
|
Owner
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Wait, why did this change? This should be layer_one_error
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Again, this is inconsistent with I refer to Wikipedia Backpropagation (See Phase 2: weight update in Algorithm in code) where for each weight:
Give it a try on your own laptop, you will see the difference! |
||
|
|
||
| # adjust your weights accoridngly. | ||
| layer_one_weights += np.dot(layer_one_prediction.T, layer_one_change_in_error) | ||
| layer_two_weights += np.dot(layer_two_prediction.T, layer_two_change_in_error) | ||
| if iter == 0: | ||
| assert layer_one_prediction.shape[0] == 4 | ||
| assert layer_one_prediction.shape[1] == 4 | ||
| assert layer_two_prediction.shape[0] == 4 | ||
| assert layer_two_prediction.shape[1] == 1 | ||
| assert layer_one_weights.shape[0] == 3 | ||
| assert layer_one_weights.shape[1] == 4 | ||
| assert layer_two_weights.shape[0] == 4 | ||
| assert layer_two_weights.shape[1] == 1 | ||
|
|
||
| if(iter == 0 or iter == 5000 or iter == 9999): | ||
| print_data(iter, inputs, keys, weights, prediction) | ||
| # adjust your weights accoridngly. | ||
| layer_one_weights += np.dot(inputs.T, layer_one_change_in_error) | ||
| layer_two_weights += np.dot(layer_one_prediction.T, | ||
|
Owner
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'll look at this more later, but shouldn't this be
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Then it's inconsistent with the Have you tried running your First off it throws an error since the shapes do not match (See PR commit message for details). Output After Training:
[[0.49949126]
[0.49944283]
[0.49888797]
[0.50120373]]This is why I also change Output After Training:
[[0.0052001 ]
[0.99379807]
[0.99356872]
[0.00481841]] |
||
| layer_two_change_in_error) | ||
|
|
||
| print "Output After Training:" | ||
| print prediction | ||
| if(iter == 0 or iter == 5000 or iter == 9999): | ||
| print_data(iter, | ||
| inputs, | ||
| keys, | ||
| layer_one_weights, | ||
| layer_two_weights, | ||
| layer_two_prediction) | ||
|
|
||
| def main(): | ||
| np.random.seed(1) | ||
| inputs = np.array( [[0,0,1], | ||
| [1,0,1], | ||
| [0,1,1], | ||
| [1,1,1]]) | ||
| print ("Output After Training:\n%s" % layer_two_prediction) | ||
|
|
||
| keys = np.array([[0,1,1,0]]).T | ||
| layer_one_weights = 2*np.random.random((3,4)) - 1 | ||
| layer_two_weights = 2*np.random.random((4,1)) - 1 | ||
| train(inputs, keys, layer_one_weights, layer_two_weights) | ||
| def main(): | ||
| np.random.seed(1) | ||
| inputs = np.array([[0,0,1], | ||
| [1,0,1], | ||
| [0,1,1], | ||
| [1,1,1]]) | ||
| keys = np.array([[0,1,1,0]]).T | ||
| layer_one_weights = 2 * np.random.random((3,4)) - 1 | ||
| layer_two_weights = 2 * np.random.random((4,1)) - 1 | ||
| train(inputs, keys, layer_one_weights, layer_two_weights) | ||
|
|
||
| if __name__ == "__main__": | ||
| main() | ||
|
|
||
| main() | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why did the spacing change here?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Woops, I have been trying out vim and messed up the spacing. Reverting it :)