Commit 43f8e6a1 authored by Artem Oppermann's avatar Artem Oppermann

Update model_v2.py

parent 30ddfdcc
...@@ -2,28 +2,29 @@ import numpy as np ...@@ -2,28 +2,29 @@ import numpy as np
from data_gen import gen_data from data_gen import gen_data
import random import random
n_input = 100 N_INPUT = 100 # number of input features
n_output=4 N_OUTPUT= 4 # number of output classes
val_after_iter=25 val_after_iter= 10 # evaluate the training process after number of iterations
n_samples=1000 n_samples= 500 # how many data samples should be used during training
learning_rate=0.05 learning_rate= 0.1 # learning rate
class Model: class Model:
def __init__(self, nodes=[N_INPUT, 50, N_OUTPUT]):
def __init__(self, nodes=[n_input, 25, n_output]):
self.nodes=nodes self.nodes=nodes
self.num_layer=len(self.nodes) self.num_layer=len(self.nodes)
self.weights=[] self.weights=[]
#initialize the weights for the NNs
for i in range(0,self.num_layer-1): for i in range(0,self.num_layer-1):
temp_weights=np.random.normal(loc=0.0, scale=0.4, size=(self.nodes[i], self.nodes[i+1])) temp_weights=np.random.normal(loc=0.0, scale=0.4, size=(self.nodes[i], self.nodes[i+1]))
self.weights.append(temp_weights) self.weights.append(temp_weights)
def tanh(self, x, derivative): def tanh(self, x, derivative):
'''Tangens hyperbolicus'''
if not derivative: if not derivative:
return np.tanh(x) return np.tanh(x)
...@@ -31,6 +32,7 @@ class Model: ...@@ -31,6 +32,7 @@ class Model:
return (1-(np.tanh(x)**2)) return (1-(np.tanh(x)**2))
def sigmoid(self,x , derivative): def sigmoid(self,x , derivative):
'''Sigmoid function. '''
if not derivative: if not derivative:
return 1/(1+np.exp(-x)) return 1/(1+np.exp(-x))
...@@ -38,6 +40,7 @@ class Model: ...@@ -38,6 +40,7 @@ class Model:
return self.sigmoid(x, False)*(1-self.sigmoid(x, False)) return self.sigmoid(x, False)*(1-self.sigmoid(x, False))
def relu(self, x, derivative): def relu(self, x, derivative):
'''Rectifier linear unit. '''
if not derivative: if not derivative:
return x*(x>0) return x*(x>0)
...@@ -47,7 +50,7 @@ class Model: ...@@ -47,7 +50,7 @@ class Model:
def activation(self, x, derivative=False, f='sigmoid'): def activation(self, x, derivative=False, f='sigmoid'):
'''Activation function (sigmoid by default) '''Activation function (sigmoid by default)
@param x: input data @param x: input data
@param derivative: boolean if we need a derivative of the sigmoid''' @param derivative: boolean if the derivative of the activation is needed.'''
if f=='sigmoid': if f=='sigmoid':
a=self.sigmoid a=self.sigmoid
...@@ -61,36 +64,15 @@ class Model: ...@@ -61,36 +64,15 @@ class Model:
else: else:
return a(x,True) return a(x,True)
def forward_step(self, x):
z_array=[] # dot product solution, before activation
a_array=[]
a_array.append(x) # the inner states
outputs=[]
#Forward propagation
for i in range(0, self.num_layer-1):
z=np.dot(self.weights[i].T,a_array[i])
z_array.append(z)
a=self.activation(z_array[i], False)
a_array.append(a)
outputs.append(a_array[-1])
return outputs
def mean_squared_error(self, output, target): def mean_squared_error(self, output, target):
'''Computes the mean squared error between the output of nn and the target. '''
return np.sum(np.power(target-output,2))/len(output) return np.sum(np.power(target-output,2))/len(output)
def accuracy(self, output, target):
mean_error=np.sum(abs(output-target))/len(output)
return (1-mean_error)
def train(self, data): def train(self, data):
'''Training of the neural network.
@param data: matrix that contains 100 features and 1 label
'''
random.shuffle(data) random.shuffle(data)
...@@ -99,36 +81,40 @@ class Model: ...@@ -99,36 +81,40 @@ class Model:
outputs=[] outputs=[]
error=0 error=0
#iterate over the dataset
for n in range(0,n_samples): for n in range(0,n_samples):
x=data[n][0] x=data[n][0] #take the features
label=data[n][1] label=data[n][1] # take the labels
x=np.reshape(x, [100,1]) x=np.reshape(x, [100,1]) #bring the features into right shape
zeros=np.zeros(shape=[4,1]) zeros=np.zeros(shape=[4,1])
zeros[label]=1 zeros[label]=1
y=zeros y=zeros
z_=[] z_=[] # storage of neurons values before activation
a_=[] a_=[] # storate of neurons values after activation
a_.append(x) a_.append(x)
delta=[] delta=[]
dEdW=[] dEdW=[] # storage of weight gradient matrices
#Forward step
for i in range(0, self.num_layer-1): for i in range(0, self.num_layer-1):
z=np.dot(self.weights[i].T,a_[i]) z=np.dot(self.weights[i].T,a_[i])
z_.append(z) z_.append(z)
a=self.activation(z_[i], False) a=self.activation(z_[i], False)
a_.append(a) a_.append(a)
outputs.append(a_[-1]) outputs.append(a_[-1])
#Backpropagation
#comute the gradient matrix for the last matrix
temp_delta=-(y-a_[-1])*self.activation(z_[-1],True) temp_delta=-(y-a_[-1])*self.activation(z_[-1],True)
delta.append(temp_delta) delta.append(temp_delta)
temp_dEdW=np.outer(a_[-2],temp_delta) temp_dEdW=np.outer(a_[-2],temp_delta)
dEdW.append(temp_dEdW) dEdW.append(temp_dEdW)
# compute the gradient matrices for the rest
for i in range(0, self.num_layer-2): for i in range(0, self.num_layer-2):
temp_delta=np.dot(self.weights[self.num_layer-(i+2)],delta[i])*self.activation(z_[self.num_layer-(i+3)],True) temp_delta=np.dot(self.weights[self.num_layer-(i+2)],delta[i])*self.activation(z_[self.num_layer-(i+3)],True)
delta.append(temp_delta) delta.append(temp_delta)
...@@ -139,18 +125,21 @@ class Model: ...@@ -139,18 +125,21 @@ class Model:
for i in range(0, self.num_layer-1): for i in range(0, self.num_layer-1):
self.weights[i]=self.weights[i]-learning_rate*dEdW[self.num_layer-(i+2)] self.weights[i]=self.weights[i]-learning_rate*dEdW[self.num_layer-(i+2)]
#compute the mean squarred error
e_=self.mean_squared_error(a_[-1],y) e_=self.mean_squared_error(a_[-1],y)
error+=e_ error+=e_
#make a evaluation of the error progress
if n>0 and n%val_after_iter==0: if n>0 and n%val_after_iter==0:
print('epoch_nr.: %i, n_sample: %i, mse: %.3f' %(epoch, n, (error/val_after_iter))) print('epoch_nr.: %i, n_sample: %i, avg. mse: %.3f' %(epoch, n, (error/val_after_iter)))
error=0 error=0
if __name__ == "__main__":
data=gen_data() data=gen_data(n_samples)
model=Model()
model=Model() model.train(data)
model.train(data)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment