From: Christian Heller Date: Mon, 13 May 2019 11:13:26 +0000 (+0200) Subject: Clear neuron with backprop code. X-Git-Url: https://plomlompom.com/repos/%7B%7Bdb.prefix%7D%7D/%7B%7B%20web_path%20%7D%7D/decks/index.html?a=commitdiff_plain;h=a466115714f7da37c45d3fd0d054d67f85a725f0;p=plomrogue2-experiments Clear neuron with backprop code. --- diff --git a/neural/single_neuron_with_backprop.py b/neural/single_neuron_with_backprop.py index f438c7e..c1624b5 100755 --- a/neural/single_neuron_with_backprop.py +++ b/neural/single_neuron_with_backprop.py @@ -1,5 +1,3 @@ -import random - def sigmoid(x): import math return 1 / (1 + math.exp(-x)) @@ -7,81 +5,95 @@ def sigmoid(x): def d_sigmoid(x): return sigmoid(x) * (1 - sigmoid(x)) -def result(inputs): - end_node['inputs'] = inputs[:] - s = 0 - for i in range(len(inputs)): - s += inputs[i] * end_node['weights'][i] - end_node['weighted_biased_input'] = s + end_node['bias'] - end_node['sigmoid_output'] = sigmoid(end_node['weighted_biased_input']) - return end_node['sigmoid_output'] - -def backprop(end_result, target, cost): - d_cost_over_sigmoid_output = 2*(end_result - target) - for i in range(len(end_node['weights'])): - d_weighted_biased_input_over_weight = end_node['inputs'][i] - d_sigmoid_output_over_weighted_biased_input = d_sigmoid(end_node['weighted_biased_input']) - d_cost_over_weight = d_cost_over_sigmoid_output * d_sigmoid_output_over_weighted_biased_input * d_weighted_biased_input_over_weight - end_node['weights'][i] -= d_cost_over_weight - d_cost_over_bias = d_cost_over_sigmoid_output - end_node['bias'] -= d_cost_over_bias +class Node: + + def __init__(self, size): + self.n_inputs = size + self.weights = [0] * self.n_inputs + self.bias = 0 + + def output(self, inputs): + self.inputs = inputs + weighted_inputs_sum = 0 + for i in range(self.n_inputs): + weighted_inputs_sum += inputs[i] * self.weights[i] + self.weighted_biased_input = weighted_inputs_sum + self.bias + self.sigmoid_output = sigmoid(self.weighted_biased_input) + return self.sigmoid_output + + def backprop(self, target): + d_cost_over_sigmoid_output = 2*(self.sigmoid_output - target) + for i in range(self.n_inputs): + d_weighted_biased_input_over_weight = self.inputs[i] + d_sigmoid_output_over_weighted_biased_input = d_sigmoid(self.weighted_biased_input) + d_cost_over_weight = d_cost_over_sigmoid_output * d_sigmoid_output_over_weighted_biased_input * d_weighted_biased_input_over_weight + self.weights[i] -= d_cost_over_weight + d_cost_over_bias = d_cost_over_sigmoid_output + self.bias -= d_cost_over_bias + + +class TrainingUnit: + + def __init__(self, inputs, target): + self.inputs = inputs + self.target = target # identity -training_set = [((0,), 0), - ((1,), 1)] +#training_set = [TrainingUnit((0,), 0), +# TrainingUnit((1,), 1)] # NOT -#training_set = [((0,), 1), -# ((1,), 0)] +#training_set = [TrainingUnit((0,), 1), +# TrainingUnit((1,), 0)] # AND -#training_set = [((0,0), 0), -# ((1,0), 0), -# ((0,1), 0), -# ((1,1), 1)] +#training_set = [TrainingUnit((0,0), 0), +# TrainingUnit((1,0), 0), +# TrainingUnit((0,1), 0), +# TrainingUnit((1,1), 1)] # OR -#training_set = [((0,0), 0), -# ((1,0), 1), -# ((0,1), 1), -# ((1,1), 1)] +#training_set = [TrainingUnit((0,0), 0), +# TrainingUnit((1,0), 1), +# TrainingUnit((0,1), 1), +# TrainingUnit((1,1), 1)] # NOT (with one irrelevant column) -#training_set = [((0,0), 1), -# ((1,0), 0), -# ((0,1), 1), -# ((1,1), 0)] +#training_set = [TrainingUnit((0,0), 0), +# TrainingUnit((1,0), 1), +# TrainingUnit((0,1), 0), +# TrainingUnit((1,1), 1)] -# XOR (will fail) -#training_set = [((0,0), 0), -# ((1,0), 1), -# ((0,1), 1), -# ((1,1), 0)] +# XOR (will fail, as Minsky/Papert say) +#training_set = [TrainingUnit((0,0), 0), +# TrainingUnit((1,0), 1), +# TrainingUnit((0,1), 1), +# TrainingUnit((1,1), 0)] # 1 if above f(x)=x line, else 0 -#training_set = [((0,1), 1), -# ((2,3), 1), -# ((1,1), 0), -# ((2,2), 0)] +training_set = [TrainingUnit((0,1), 1), + TrainingUnit((2,3), 1), + TrainingUnit((1,1), 0), + TrainingUnit((2,2), 0)] # 1 if above f(x)=x**2, else 0 (will fail: no linear separability) -#training_set = [((2,4), 0), -# ((2,5), 1), -# ((3,9), 0), -# ((3,10), 1)] +#training_set = [TrainingUnit((2,4), 0), +# TrainingUnit((2,5), 1), +# TrainingUnit((3,9), 0), +# TrainingUnit((3,10), 1)] -end_node = {'weights': [random.random() for i in range(len(training_set[0][0]))], - 'bias': random.random()} +end_node = Node(len(training_set[0].inputs)) n_training_runs = 100 for i in range(n_training_runs): print() - for element in training_set: - inputs = element[0] - target = element[1] - result_ = result(inputs) - cost = (result_ - target)**2 + for unit in training_set: + result_ = end_node.output(unit.inputs) + cost = (result_ - unit.target)**2 + formatted_inputs = [] + for i in unit.inputs: + formatted_inputs += ['%2d' % i] formatted_weights = [] - for w in end_node['weights']: + for w in end_node.weights: formatted_weights += ['%1.3f' % w] - print("inputs %s target %s result %0.9f cost %0.9f weights [%s] bias %1.3f" % (inputs, target, result_, cost, ','.join(formatted_weights), end_node['bias'])) - backprop(result_, target, cost) + print("inputs (%s) target %s result %0.9f cost %0.9f weights [%s] bias %1.3f" % (', '.join(formatted_inputs), unit.target, result_, cost, ', '.join(formatted_weights), end_node.bias)) + end_node.backprop(unit.target)