-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmodel.py
105 lines (66 loc) · 2.74 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
import keras
from keras import layers
from keras.models import Sequential, Model
from keras.layers import Dense, Flatten, Conv3D, MaxPooling3D, Dropout
from keras.layers import Activation, Input
from keras.regularizers import l2
import numpy as np
from keras.initializers import random_normal, constant
def fully_connected(x, nf, name, weight_decay):
# kerner_reg = l2(weight_decay[0]) if weight_decay else None
# bias_reg = l2(weight_decay[1]) if weight_decay else None
x = Dense(nf, name=name,
# kernel_regularizer=kerner_reg,
# bias_regulaizer=bias_reg,
kernel_initializer=random_normal(stddev=0.01),
bias_initializer=constant(0.0))(x)
return x
def conv3d(x, nf, ks, name, weight_decay):
# kernel = l2(weight_decay[0]) if weight_decay else None
# bias_reg = l2(weight_decay[1]) if weight_decay else None
x = Conv3D(nf, (ks, ks, ks), padding='same', name=name,
# kernel_regularizer=kernel,
# bias_regularizer=bias_reg,
kernel_initializer=random_normal(stddev=0.01),
bias_initializer=constant(0.0))
return x
def relu(x): return Activation('relu')(x)
def pooling(x, ks):
x = MaxPooling3D(pool_size=(ks, ks, ks))(x)
return x
def softmax(x):
return Activation('softmax')(x)
def get_training_model(sample_shape, dimension=3, layer_name='block1_conv3d', weight_decay=5e-4):
inputs = []
img_input = Input(shape=sample_shape)
inputs.append(img_input)
model = Sequential()
model.add(Conv3D(32, (3, 3, 3), activation='relu', kernel_initializer='he_uniform',
padding='same', input_shape=sample_shape))
model.add(MaxPooling3D(pool_size=(2, 2, 2)))
model.add(Dropout(0.5))
model.add(Conv3D(64, (3, 3, 3), activation='relu'))
model.add(MaxPooling3D(pool_size=(2, 2, 2)))
model.add(Dropout(0.5))
# model.add(Conv3D(128, (3, 3, 3), activation='relu'))
# model.add(MaxPooling3D(pool_size=(2, 2, 2)))
model.add(Flatten())
# x = Conv3D(32, (3, 3, 3), activation='relu',
# kernel_initializer='he_uniform', padding='same')(img_input)
# pooling(x, 2)
# conv3d(x, 64, 3, 'Conv3d_layer3', weight_decay)
# relu(x)
# pooling(x, 2)
# x = Flatten()(x)
# fully_connected(x, 256, 'Dense_layer4', weight_decay)
# relu(x)
# pooling(x, 2)
# fully_connected(x, 10)
# softmax(x)
# model = Model(img_input, x)
model.add(Dense(256, activation='relu', kernel_initializer='he_uniform'))
model.add(Dropout(0.5))
model.add(Dense(128, activation='relu', kernel_initializer='he_uniform'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
return model