Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
M
ML11_P1_G1
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Container Registry
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Heetak Chung
ML11_P1_G1
Commits
43f8e6a1
Commit
43f8e6a1
authored
May 15, 2018
by
Artem Oppermann
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Update model_v2.py
parent
30ddfdcc
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
39 additions
and
50 deletions
+39
-50
model v2/model_v2.py
model v2/model_v2.py
+39
-50
No files found.
model v2/model_v2.py
View file @
43f8e6a1
...
...
@@ -2,28 +2,29 @@ import numpy as np
from
data_gen
import
gen_data
import
random
n_input
=
100
n_output
=
4
N_INPUT
=
100
# number of input features
N_OUTPUT
=
4
# number of output classes
val_after_iter
=
25
n_samples
=
1000
val_after_iter
=
10
# evaluate the training process after number of iterations
n_samples
=
500
# how many data samples should be used during training
learning_rate
=
0.05
learning_rate
=
0.1
# learning rate
class
Model
:
def
__init__
(
self
,
nodes
=
[
n_input
,
25
,
n_output
]):
def
__init__
(
self
,
nodes
=
[
N_INPUT
,
50
,
N_OUTPUT
]):
self
.
nodes
=
nodes
self
.
num_layer
=
len
(
self
.
nodes
)
self
.
weights
=
[]
#initialize the weights for the NNs
for
i
in
range
(
0
,
self
.
num_layer
-
1
):
temp_weights
=
np
.
random
.
normal
(
loc
=
0.0
,
scale
=
0.4
,
size
=
(
self
.
nodes
[
i
],
self
.
nodes
[
i
+
1
]))
self
.
weights
.
append
(
temp_weights
)
def
tanh
(
self
,
x
,
derivative
):
'''Tangens hyperbolicus'''
if
not
derivative
:
return
np
.
tanh
(
x
)
...
...
@@ -31,6 +32,7 @@ class Model:
return
(
1
-
(
np
.
tanh
(
x
)
**
2
))
def
sigmoid
(
self
,
x
,
derivative
):
'''Sigmoid function. '''
if
not
derivative
:
return
1
/
(
1
+
np
.
exp
(
-
x
))
...
...
@@ -38,6 +40,7 @@ class Model:
return
self
.
sigmoid
(
x
,
False
)
*
(
1
-
self
.
sigmoid
(
x
,
False
))
def
relu
(
self
,
x
,
derivative
):
'''Rectifier linear unit. '''
if
not
derivative
:
return
x
*
(
x
>
0
)
...
...
@@ -47,7 +50,7 @@ class Model:
def
activation
(
self
,
x
,
derivative
=
False
,
f
=
'sigmoid'
):
'''Activation function (sigmoid by default)
@param x: input data
@param derivative: boolean if
we need a derivative of the sigmoid
'''
@param derivative: boolean if
the derivative of the activation is needed.
'''
if
f
==
'sigmoid'
:
a
=
self
.
sigmoid
...
...
@@ -60,37 +63,16 @@ class Model:
return
a
(
x
,
False
)
else
:
return
a
(
x
,
True
)
def
forward_step
(
self
,
x
):
z_array
=
[]
# dot product solution, before activation
a_array
=
[]
a_array
.
append
(
x
)
# the inner states
outputs
=
[]
#Forward propagation
for
i
in
range
(
0
,
self
.
num_layer
-
1
):
z
=
np
.
dot
(
self
.
weights
[
i
]
.
T
,
a_array
[
i
])
z_array
.
append
(
z
)
a
=
self
.
activation
(
z_array
[
i
],
False
)
a_array
.
append
(
a
)
outputs
.
append
(
a_array
[
-
1
])
return
outputs
def
mean_squared_error
(
self
,
output
,
target
):
'''Computes the mean squared error between the output of nn and the target. '''
return
np
.
sum
(
np
.
power
(
target
-
output
,
2
))
/
len
(
output
)
def
accuracy
(
self
,
output
,
target
):
mean_error
=
np
.
sum
(
abs
(
output
-
target
))
/
len
(
output
)
return
(
1
-
mean_error
)
def
train
(
self
,
data
):
'''Training of the neural network.
@param data: matrix that contains 100 features and 1 label
'''
random
.
shuffle
(
data
)
...
...
@@ -99,36 +81,40 @@ class Model:
outputs
=
[]
error
=
0
#iterate over the dataset
for
n
in
range
(
0
,
n_samples
):
x
=
data
[
n
][
0
]
label
=
data
[
n
][
1
]
x
=
data
[
n
][
0
]
#take the features
label
=
data
[
n
][
1
]
# take the labels
x
=
np
.
reshape
(
x
,
[
100
,
1
])
x
=
np
.
reshape
(
x
,
[
100
,
1
])
#bring the features into right shape
zeros
=
np
.
zeros
(
shape
=
[
4
,
1
])
zeros
[
label
]
=
1
y
=
zeros
z_
=
[]
a_
=
[]
z_
=
[]
# storage of neurons values before activation
a_
=
[]
# storate of neurons values after activation
a_
.
append
(
x
)
delta
=
[]
dEdW
=
[]
delta
=
[]
dEdW
=
[]
# storage of weight gradient matrices
#Forward step
for
i
in
range
(
0
,
self
.
num_layer
-
1
):
z
=
np
.
dot
(
self
.
weights
[
i
]
.
T
,
a_
[
i
])
z_
.
append
(
z
)
a
=
self
.
activation
(
z_
[
i
],
False
)
a_
.
append
(
a
)
outputs
.
append
(
a_
[
-
1
])
#Backpropagation
#comute the gradient matrix for the last matrix
temp_delta
=-
(
y
-
a_
[
-
1
])
*
self
.
activation
(
z_
[
-
1
],
True
)
delta
.
append
(
temp_delta
)
temp_dEdW
=
np
.
outer
(
a_
[
-
2
],
temp_delta
)
dEdW
.
append
(
temp_dEdW
)
# compute the gradient matrices for the rest
for
i
in
range
(
0
,
self
.
num_layer
-
2
):
temp_delta
=
np
.
dot
(
self
.
weights
[
self
.
num_layer
-
(
i
+
2
)],
delta
[
i
])
*
self
.
activation
(
z_
[
self
.
num_layer
-
(
i
+
3
)],
True
)
delta
.
append
(
temp_delta
)
...
...
@@ -139,18 +125,21 @@ class Model:
for
i
in
range
(
0
,
self
.
num_layer
-
1
):
self
.
weights
[
i
]
=
self
.
weights
[
i
]
-
learning_rate
*
dEdW
[
self
.
num_layer
-
(
i
+
2
)]
#compute the mean squarred error
e_
=
self
.
mean_squared_error
(
a_
[
-
1
],
y
)
error
+=
e_
#make a evaluation of the error progress
if
n
>
0
and
n
%
val_after_iter
==
0
:
print
(
'epoch_nr.:
%
i, n_sample:
%
i, mse:
%.3
f'
%
(
epoch
,
n
,
(
error
/
val_after_iter
)))
print
(
'epoch_nr.:
%
i, n_sample:
%
i,
avg.
mse:
%.3
f'
%
(
epoch
,
n
,
(
error
/
val_after_iter
)))
error
=
0
data
=
gen_data
()
model
=
Model
()
model
.
train
(
data
)
if
__name__
==
"__main__"
:
data
=
gen_data
(
n_samples
)
model
=
Model
()
model
.
train
(
data
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment