home · contact · privacy
Register game commands and tasks outside of game module.
[plomrogue2-experiments] / neural / simple_perceptron.py
1 class Perceptron:
2
3     def __init__(self, size):
4         self.n_inputs = size
5         self.weights = [0] * self.n_inputs
6         self.bias = 0
7
8     def output(self, inputs):
9         step = 0  # If 0.5, we need no bias for AND and OR; if 0, none for NOT.
10                   # With learning, the bias will slowly balance any choice.
11         weighted_inputs_sum = 0
12         for i in range(self.n_inputs):
13             weighted_inputs_sum += inputs[i] * self.weights[i]
14         if weighted_inputs_sum + self.bias >= step:
15             return 1
16         else:
17             return 0
18
19 class TrainingUnit:
20
21     def __init__(self, inputs, target):
22         self.inputs = inputs
23         self.target = target
24
25 # identity
26 #training_set = [TrainingUnit((0,), 0),
27 #                TrainingUnit((1,), 1)]
28
29 # NOT
30 #training_set = [TrainingUnit((0,), 1),
31 #                TrainingUnit((1,), 0)]
32
33 # AND
34 #training_set = [TrainingUnit((0,0), 0),
35 #                TrainingUnit((1,0), 0),
36 #                TrainingUnit((0,1), 0),
37 #                TrainingUnit((1,1), 1)]
38
39 # OR
40 #training_set = [TrainingUnit((0,0), 0),
41 #                TrainingUnit((1,0), 1),
42 #                TrainingUnit((0,1), 1),
43 #                TrainingUnit((1,1), 1)]
44
45 # NOT (with one irrelevant column)
46 #training_set = [TrainingUnit((0,0), 0),
47 #                TrainingUnit((1,0), 1),
48 #                TrainingUnit((0,1), 0),
49 #                TrainingUnit((1,1), 1)]
50
51 # XOR (will fail, as Minsky/Papert say)
52 #training_set = [TrainingUnit((0,0), 0),
53 #                TrainingUnit((1,0), 1),
54 #                TrainingUnit((0,1), 1),
55 #                TrainingUnit((1,1), 0)]
56
57 # 1 if above f(x)=x line, else 0
58 training_set = [TrainingUnit((0,1), 1),
59                 TrainingUnit((2,3), 1),
60                 TrainingUnit((1,1), 0),
61                 TrainingUnit((2,2), 0)]
62
63 # 1 if above f(x)=x**2, else 0 (will fail: no linear separability)
64 #training_set = [TrainingUnit((2,4), 0),
65 #                TrainingUnit((2,5), 1),
66 #                TrainingUnit((3,9), 0),
67 #                TrainingUnit((3,10), 1)]
68
69
70 p = Perceptron(len(training_set[0].inputs))
71 adaption_step = 0.1
72 max_rounds = 100
73 for i in range(max_rounds):
74     print()
75     go_on = False
76     for unit in training_set:
77         result_ = p.output(unit.inputs)
78         formatted_inputs = []
79         for i in unit.inputs:
80             formatted_inputs += ['%2d' % i]
81         formatted_weights = []
82         for w in p.weights:
83             formatted_weights += ['% .1f' % w]
84         print("inputs (%s) target %s result %s correctness %5s weights [%s] bias % .1f" % (', '.join(formatted_inputs), unit.target, result_, unit.target==result_, ', '.join(formatted_weights), p.bias))
85         if unit.target != result_:
86             go_on=True
87         p.bias += adaption_step * (unit.target - result_)
88         for i in range(p.n_inputs):
89             p.weights[i] += adaption_step * (unit.target - result_) * unit.inputs[i]
90     if not go_on:
91         break
92 print()
93 if go_on:
94     print('COULD NOT SOLVE WITHIN %s ROUNDS.' % max_rounds)
95 else:
96     print('SUCCESS.')