home · contact · privacy
Clear neuron with backprop code.
[plomrogue2-experiments] / neural / single_neuron_with_backprop.py
1 def sigmoid(x):
2     import math
3     return 1 / (1 + math.exp(-x))
4
5 def d_sigmoid(x):
6     return sigmoid(x) * (1 - sigmoid(x))
7
8 class Node:
9
10     def __init__(self, size):
11         self.n_inputs = size
12         self.weights = [0] * self.n_inputs
13         self.bias = 0
14
15     def output(self, inputs):
16         self.inputs = inputs
17         weighted_inputs_sum = 0
18         for i in range(self.n_inputs):
19             weighted_inputs_sum += inputs[i] * self.weights[i]
20         self.weighted_biased_input = weighted_inputs_sum + self.bias
21         self.sigmoid_output = sigmoid(self.weighted_biased_input)
22         return self.sigmoid_output
23
24     def backprop(self, target):
25         d_cost_over_sigmoid_output = 2*(self.sigmoid_output - target)
26         for i in range(self.n_inputs):
27             d_weighted_biased_input_over_weight = self.inputs[i]
28             d_sigmoid_output_over_weighted_biased_input = d_sigmoid(self.weighted_biased_input)
29             d_cost_over_weight = d_cost_over_sigmoid_output * d_sigmoid_output_over_weighted_biased_input * d_weighted_biased_input_over_weight
30             self.weights[i] -= d_cost_over_weight
31         d_cost_over_bias = d_cost_over_sigmoid_output
32         self.bias -= d_cost_over_bias
33
34
35 class TrainingUnit:
36
37     def __init__(self, inputs, target):
38         self.inputs = inputs
39         self.target = target
40
41 # identity
42 #training_set = [TrainingUnit((0,), 0),
43 #                TrainingUnit((1,), 1)]
44
45 # NOT
46 #training_set = [TrainingUnit((0,), 1),
47 #                TrainingUnit((1,), 0)]
48
49 # AND
50 #training_set = [TrainingUnit((0,0), 0),
51 #                TrainingUnit((1,0), 0),
52 #                TrainingUnit((0,1), 0),
53 #                TrainingUnit((1,1), 1)]
54
55 # OR
56 #training_set = [TrainingUnit((0,0), 0),
57 #                TrainingUnit((1,0), 1),
58 #                TrainingUnit((0,1), 1),
59 #                TrainingUnit((1,1), 1)]
60
61 # NOT (with one irrelevant column)
62 #training_set = [TrainingUnit((0,0), 0),
63 #                TrainingUnit((1,0), 1),
64 #                TrainingUnit((0,1), 0),
65 #                TrainingUnit((1,1), 1)]
66
67 # XOR (will fail, as Minsky/Papert say)
68 #training_set = [TrainingUnit((0,0), 0),
69 #                TrainingUnit((1,0), 1),
70 #                TrainingUnit((0,1), 1),
71 #                TrainingUnit((1,1), 0)]
72
73 # 1 if above f(x)=x line, else 0
74 training_set = [TrainingUnit((0,1), 1),
75                 TrainingUnit((2,3), 1),
76                 TrainingUnit((1,1), 0),
77                 TrainingUnit((2,2), 0)]
78
79 # 1 if above f(x)=x**2, else 0 (will fail: no linear separability)
80 #training_set = [TrainingUnit((2,4), 0),
81 #                TrainingUnit((2,5), 1),
82 #                TrainingUnit((3,9), 0),
83 #                TrainingUnit((3,10), 1)]
84
85 end_node = Node(len(training_set[0].inputs))
86 n_training_runs = 100
87 for i in range(n_training_runs):
88     print()
89     for unit in training_set:
90         result_ = end_node.output(unit.inputs)
91         cost = (result_ - unit.target)**2
92         formatted_inputs = []
93         for i in unit.inputs:
94             formatted_inputs += ['%2d' % i]
95         formatted_weights = []
96         for w in end_node.weights:
97             formatted_weights += ['%1.3f' % w]
98         print("inputs (%s) target %s result %0.9f cost %0.9f weights [%s] bias %1.3f" % (', '.join(formatted_inputs), unit.target, result_, cost, ', '.join(formatted_weights), end_node.bias))
99         end_node.backprop(unit.target)