Skip to content

Refactor the neural net benchmark #3

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
159 changes: 83 additions & 76 deletions time/bench_neural_net.rb
Original file line number Diff line number Diff line change
@@ -1,131 +1,136 @@
# from bryanbibat's gist: https://gist.github.com/2348802

class Synapse
WEIGHT_RANGE = (-1.0..1.0).freeze

attr_accessor :weight, :prev_weight
attr_accessor :source_neuron, :dest_neuron
attr_reader :source_neuron, :dest_neuron

def initialize(source_neuron, dest_neuron, prng)
self.source_neuron = source_neuron
self.dest_neuron = dest_neuron
self.prev_weight = self.weight = prng.rand(-1.0..1.0)
def initialize(source_neuron:, dest_neuron:)
@source_neuron = source_neuron
@dest_neuron = dest_neuron
@prev_weight = @weight = rand(WEIGHT_RANGE)
end
end

class Neuron

LEARNING_RATE = 1.0
MOMENTUM = 0.3
WEIGHT_RANGE = (-1.0..1.0).freeze

attr_accessor :synapses_in, :synapses_out
attr_accessor :threshold, :prev_threshold, :error
attr_reader :synapses_in, :synapses_out
attr_reader :threshold, :prev_threshold, :error
attr_accessor :output

def initialize(prng)
self.prev_threshold = self.threshold = prng.rand(-1.0..1.0)
self.synapses_in = []
self.synapses_out = []
def initialize
@synapses_in = []
@synapses_out = []
@prev_threshold = @threshold = rand(WEIGHT_RANGE)
end

def calculate_output
# calculate output based on the previous layer
# use logistic function

activation = synapses_in.inject(0.0) do |sum, synapse|
sum + synapse.weight * synapse.source_neuron.output
end
activation -= threshold
activation = synapses_in.sum do |synapse|
synapse.weight * synapse.source_neuron.output
end - @threshold

self.output = 1.0 / (1.0 + Math.exp(-activation))
end

def derivative
output * (1 - output)
@output = 1.fdiv(Math.exp(-activation) + 1)
end

def output_train(rate, target)
self.error = (target - output) * derivative
def output_train(rate:, target:)
@error = (target - @output) * derivative
update_weights(rate)
end

def hidden_train(rate)
self.error = synapses_out.inject(0.0) do |sum, synapse|
sum + synapse.prev_weight * synapse.dest_neuron.error
def hidden_train(rate:)
@error = synapses_out.sum do |synapse|
synapse.prev_weight * synapse.dest_neuron.error
end * derivative
update_weights(rate)
end

private

def derivative
@output * (1 - @output)
end

def update_weights(rate)
synapses_in.each do |synapse|
temp_weight = synapse.weight
synapse.weight += (rate * LEARNING_RATE * error * synapse.source_neuron.output) +
(MOMENTUM * ( synapse.weight - synapse.prev_weight))
synapse.prev_weight = temp_weight
update_synapses_in(rate)
update_thresholds(rate)
end

def update_synapses_in(rate)
@synapses_in.each do |synapse|
prev_weight = synapse.weight
synapse.weight += LEARNING_RATE * rate * @error * synapse.source_neuron.output
synapse.weight += MOMENTUM * (synapse.weight - synapse.prev_weight)
synapse.prev_weight = prev_weight
end
temp_threshold = threshold
self.threshold += (rate * LEARNING_RATE * error * -1) +
(MOMENTUM * (threshold - prev_threshold))
self.prev_threshold = temp_threshold
end

def update_thresholds(rate)
prev_threshold = @threshold
@threshold += LEARNING_RATE * rate * @error * -1
@threshold += MOMENTUM * (@threshold - @prev_threshold)
@prev_threshold = prev_threshold
end
end

class NeuralNetwork
attr_accessor :prng

def initialize(inputs, hidden, outputs)
self.prng = Random.new
def initialize(inputs:, hidden:, outputs:)
@input_layer = Array.new(inputs) { Neuron.new }
@hidden_layer = Array.new(hidden) { Neuron.new }
@output_layer = Array.new(outputs) { Neuron.new }

@input_layer = (1..inputs).map { Neuron.new(prng) }
@hidden_layer = (1..hidden).map { Neuron.new(prng) }
@output_layer = (1..outputs).map { Neuron.new(prng) }

@input_layer.product(@hidden_layer).each do |source, dest|
synapse = Synapse.new(source, dest, prng)
source.synapses_out << synapse
dest.synapses_in << synapse
end
@hidden_layer.product(@output_layer).each do |source, dest|
synapse = Synapse.new(source, dest, prng)
source.synapses_out << synapse
dest.synapses_in << synapse
end
add_synapses(@input_layer, @hidden_layer)
add_synapses(@hidden_layer, @output_layer)
end

def train(inputs, targets)
def train(inputs:, targets:)
feed_forward(inputs)

@output_layer.zip(targets).each do |neuron, target|
neuron.output_train(0.3, target)
neuron.output_train(rate: 0.3, target: target)
end
@hidden_layer.each { |neuron| neuron.hidden_train(0.3) }
@hidden_layer.each { |neuron| neuron.hidden_train(rate: 0.3) }
end

def feed_forward(inputs)
@input_layer.zip(inputs).each do |neuron, input|
neuron.output = input
end
@hidden_layer.each { |neuron| neuron.calculate_output }
@output_layer.each { |neuron| neuron.calculate_output }
@input_layer.zip(inputs) { |neuron, input| neuron.output = input }
@hidden_layer.each(&:calculate_output)
@output_layer.each(&:calculate_output)
end

def current_outputs
@output_layer.map { |neuron| neuron.output }
@output_layer.map(&:output)
end

private

def add_synapses(source_layer, dest_layer)
source_layer.product(dest_layer) do |source_neuron, dest_neuron|
synapse = Synapse.new(source_neuron: source_neuron,
dest_neuron: dest_neuron)

source_neuron.synapses_out << synapse
dest_neuron.synapses_in << synapse
end
end
end

require 'benchmark'

(ARGV[0] || 5).to_i.times do
x = Benchmark.measure do |x|
xor = NeuralNetwork.new(2, 10, 1)
10000.times do
xor.train([0, 0], [0])
xor.train([1, 0], [1])
xor.train([0, 1], [1])
xor.train([1, 1], [0])
ARGV.fetch(0) { 5 }.to_i.times do
results = Benchmark.measure do
xor = NeuralNetwork.new(inputs: 2, hidden: 10, outputs: 1)

10_000.times do
xor.train(inputs: [0, 0], targets: [0])
xor.train(inputs: [1, 0], targets: [1])
xor.train(inputs: [0, 1], targets: [1])
xor.train(inputs: [1, 1], targets: [0])
end

xor.feed_forward([0, 0])
puts xor.current_outputs
xor.feed_forward([0, 1])
Expand All @@ -135,5 +140,7 @@ def current_outputs
xor.feed_forward([1, 1])
puts xor.current_outputs
end
puts x

puts results
end