home · contact · privacy
Add basic neural networking experiments.
[plomrogue2-experiments] / neural / single_neuron_with_backprop.py
1 import random
2
3 def sigmoid(x):
4     import math
5     return 1 / (1 + math.exp(-x))
6
7 def d_sigmoid(x):
8     return sigmoid(x) * (1 - sigmoid(x))
9
10 def result(inputs):
11     end_node['inputs'] = inputs[:]
12     s = 0
13     for i in range(len(inputs)):
14         s += inputs[i] * end_node['weights'][i]
15     end_node['weighted_biased_input'] = s + end_node['bias']
16     end_node['sigmoid_output'] = sigmoid(end_node['weighted_biased_input'])
17     return end_node['sigmoid_output']
18
19 def backprop(end_result, target, cost):
20     d_cost_over_sigmoid_output = 2*(end_result - target)
21     for i in range(len(end_node['weights'])):
22         d_weighted_biased_input_over_weight = end_node['inputs'][i]
23         d_sigmoid_output_over_weighted_biased_input = d_sigmoid(end_node['weighted_biased_input'])
24         d_cost_over_weight = d_cost_over_sigmoid_output * d_sigmoid_output_over_weighted_biased_input * d_weighted_biased_input_over_weight
25         end_node['weights'][i] -= d_cost_over_weight
26     d_cost_over_bias = d_cost_over_sigmoid_output
27     end_node['bias'] -= d_cost_over_bias
28
29 # identity
30 training_set = [((0,), 0),
31                 ((1,), 1)]
32
33 # NOT
34 #training_set = [((0,), 1),
35 #                ((1,), 0)]
36
37 # AND
38 #training_set = [((0,0), 0),
39 #                ((1,0), 0),
40 #                ((0,1), 0),
41 #                ((1,1), 1)]
42
43 # OR
44 #training_set = [((0,0), 0),
45 #                ((1,0), 1),
46 #                ((0,1), 1),
47 #                ((1,1), 1)]
48
49 # NOT (with one irrelevant column)
50 #training_set = [((0,0), 1),
51 #                ((1,0), 0),
52 #                ((0,1), 1),
53 #                ((1,1), 0)]
54
55 # XOR (will fail)
56 #training_set = [((0,0), 0),
57 #                ((1,0), 1),
58 #                ((0,1), 1),
59 #                ((1,1), 0)]
60
61 # 1 if above f(x)=x line, else 0
62 #training_set = [((0,1), 1),
63 #                ((2,3), 1),
64 #                ((1,1), 0),
65 #                ((2,2), 0)]
66
67 # 1 if above f(x)=x**2, else 0 (will fail: no linear separability)
68 #training_set = [((2,4), 0),
69 #                ((2,5), 1),
70 #                ((3,9), 0),
71 #                ((3,10), 1)]
72
73 end_node = {'weights': [random.random() for i in range(len(training_set[0][0]))],
74             'bias': random.random()}
75 n_training_runs = 100
76 for i in range(n_training_runs):
77     print()
78     for element in training_set:
79         inputs = element[0]
80         target = element[1]
81         result_ = result(inputs)
82         cost = (result_ - target)**2
83         formatted_weights = []
84         for w in end_node['weights']:
85             formatted_weights += ['%1.3f' % w]
86         print("inputs %s target %s result %0.9f cost %0.9f weights [%s] bias %1.3f" % (inputs, target, result_, cost, ','.join(formatted_weights), end_node['bias']))
87         backprop(result_, target, cost)