- 2023년 06월 이전 수강자 분들은 before202306 Branch를 참고하세요.
- 2021년 이전 수강자 분들이나 Tensorflow버전 코드는 https://github.com/Finfra/TensorflowStudyExample 을 참고 하세요.
본 코드는 Keras를 사용하여 Deep Learning을 학습하기 위한 실습 코드입니다
The code and comments are written by NamJungGu nowage@gmail.com
Maintained by SungKukKim nackchun@gmail.com
This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License.
- Python : 3.10
- Tensorflow 2.12.0
- Colab : 2023.07
- Cuda : cu118
- cf) PreInstalled Enviroment : https://github.com/Finfra/gdm
Please report bugs to nackchun@gmail.com
- Support The Version
The github repository is at https://github.com/Finfra/KerasStudy
Some other stuff.
NamJungGu, <nowage[at]gmail.com>
(c) Copyright 2005-2021 by finfra.com
- Keras Home : https://keras.io/kr
- Keras(github) : https://github.com/keras-team/keras
- slideShare Keras 빨리 훑어볿기 https://www.slideshare.net/madvirus/keras-intro
- DeepBrick for Keras (케라스를 위한 딥브릭) : https://tykimos.github.io/DeepBrick/
- 케라스 이야기 : https://tykimos.github.io/2017/01/27/Keras_Talk/
- Keras Example : https://github.com/tgjeon/Keras-Tutorials.git
- Keras Tutorial (데이터 사이언스 스쿨): https://datascienceschool.net/view-notebook/995bf4ee6c0b49f8957320f0a2fea9f0/
- tensorflow.keras
- keras.engine.sequential.Sequential
from tensorflow.python.keras.models import Sequential
model = Sequential()
model.summary()
- image version
## model.summary()
from keras.utils.vis_utils import plot_model
plot_model(model, to_file='vgg.png')
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
from matplotlib import pyplot as plt
i=plt.imread("/content/DeepLearningStudy/data/MNIST_Simple/test/0/0_1907.png")
img=i[:,:,1:2].reshape(1,28,28,1)
print(model.predict_classes(img) )
- Model 저장.
model.save("/content/gdrive/mnist.h5")
- 모델은 Json파일로 저장되고 Weigth파일은 hdf5 형태로 저장시
model_json = model.to_json()
with open("/content/mnist_mlp_model.json", "w") as json_file:
json_file.write(model_json)
model.save_weights("/content/mnist_mlp_model.h5")
from keras.models import load_model
model = load_model("/content/gdrive/mnist.h5")
- 모델은 Json파일로 저장되고 Weigth파일은 hdf5 형태로 저장시
from tensorflow.python import keras
from keras.models import model_from_json
from tensorflow.python.keras.models import model_from_json
with open('/content/gdrive/mnist_mlp_model.json', 'r') as json_file:
loaded_model_json = json_file.read()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights("/content/gdrive/mnist_mlp_model.h5")
- 저장
model.add(layers.Conv2D(32, (3, 3), ,,,, ,name='c1'))
<<~ Omitted ~>>
model.save_weights("/content/gdrive/mnist_model.h5")
- 읽기
import h5py
import numpy as np
filename = "/content/gdrive/mnist_model.h5"
h5 = h5py.File(filename,'r')
print(h5.keys())
b=h5['c1']['c1']['bias:0']
k=h5['c1']['c1']['kernel:0']
bb=np.array(b)
print(bb)
kk=np.array(k)
kk[:,:,:,0].reshape((3,3))
h5.close()
- Usage of callbacks : https://keras.io/ko/callbacks/
epochs = 40
batch_size = 100
![ ! -d /content/ckpt ] &&mkdir /content/ckpt
from tensorflow.keras.callbacks import ModelCheckpoint
filename = f'/content/ckpt/checkpoint-epoch-{epochs}-batch-{batch_size}-trial-001.h5'
checkpoint = ModelCheckpoint(filename,
monitor='val_loss',
verbose=1,
save_best_only=True,
mode='auto'
)
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy']
)
hist=model.fit(train_images,
train_labels,
batch_size=batch_size,
epochs=epochs,
validation_data=(test_images,test_labels),
callbacks=[checkpoint]
)
- 확인
!ls /content/ckpt
- 계속 같은 값이 20개 나오면 멈추기
from tensorflow.keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(monitor='val_loss', patience=25)
model.fit(X, Y, callbacks=[early_stopping])
from tensorflow.keras.callbacks import TensorBoard
import datetime
![ ! -d /content/logs/my_board/ ]&& mkdir -p /content/logs/my_board/
log_dir = "/content/logs/my_board/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = TensorBoard(log_dir=log_dir, histogram_freq=1)
%load_ext tensorboard
%tensorboard --logdir {log_dir}
history = model.fit(X,Y,callbacks=[tensorboard_callback]
## Source : 학습과정 표시하기 (텐서보드 포함) : https://tykimos.github.io/2017/07/09/Training_Monitoring/
from tensorflow.keras.callbacks import LearningRateScheduler
def scheduler(epoch):
if epoch < 10:
#print(0.001)
return 0.001
else:
return 0.001 * tf.math.exp(0.1 * (10 - epoch))
learning_rate_scheduler = LearningRateScheduler(scheduler)
model.fit(dataset, epochs=100, callbacks=[learning_rate_scheduler])
## Source : Tensorflow Callback 사용하기 : https://jins-sw.tistory.com/27
from keras.callbacks import LambdaCallback
print_weights = LambdaCallback(on_epoch_end=lambda epoch, logs: print("\n",model.layers[3].get_weights()))
history = model.fit(X,Y,callbacks=[print_weights])
## Source : https://rarena.tistory.com/entry/keras트레이닝-된되고있는-weigth값-확인 [deep dev]
import tensorflow as tf
class MyCallback(tf.keras.callbacks.Callback):
def __init__(self, name):
super().__init__()
self.name = name
self.previous_loss = None
def on_epoch_begin(self, epoch, logs=None):
print('\nFrom {}: Epoch {} is starting'.format(self.name, epoch + 1))
def on_epoch_end(self, epoch, logs=None):
print('\nFrom {}: Epoch {} ended.'.format(self.name, epoch + 1))
if epoch > 0:
if (logs['loss'] < self.previous_loss):
print('From {}: loss got better! {:.4f} -> {:.4f}'.format(self.name, self.previous_loss, logs['loss']))
self.previous_loss = logs['loss']
def on_train_batch_begin(self, batch, logs=None):
print('\nFrom {}: Batch {} is starting.'.format(self.name, batch + 1))
def on_train_batch_end(self, batch, logs=None):
print('\nFrom {}: Batch {} ended'.format(self.name, batch + 1))
first_callback = MyCallback('1st callback')
history = model.fit(X,Y,callbacks=[first_callback])
## Source : Tensorflow Callback 사용하기 : https://jins-sw.tistory.com/27
- Basic
from tensorflow.keras.layers import Dense
model.add(Dense(4, activation='relu', input_shape=(4,)))
- softmax
model.add(Dense(4, activation='softmax'))
model.add(layers.Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=(28, 28, 1), kernel_regularizer='l2'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
- hihello
hihello
- 떳다떳다비행기
떳다떳다비행기날아라날아라높이높이날아라우리비행기내가만든비행기날아라날아라높이높이날아라우리비행기
- 미래도래미미
미래도래미미래래래미미미미래도래미미미래래미래도미래도래미미래래래미미미미래도래미미미래래미래도
- 주기도문
하늘에 계신 우리 아버지여,이름이 거룩히 여김을 받으시오며,나라이 임하옵시며,뜻이 하늘에서 이룬 것 같이땅에서도 이루어지이다. 오늘날 우리에게 일용한 양식을 주옵시고,우리가 우리에게 죄 지은자를 사하여 준 것 같이우리 죄를 사하여 주옵시고,우리를 시험에 들게 하지 마옵시고,다만 악에서 구하옵소서. 대개 나라와 권세와 영광이 아버지께영원히 있사옵 나이다. - 아멘 -
- 반야심경
관자재보살 행심반야바라밀다시 조견오온개공 도일체고액 사리자! 색불이공 공불이색 색즉시공 공즉시색 수상행식 역부여시 사리자! 시제법공상 불생불멸 불구부정 부증불감 시고공중무색 무수상행식 무안이비설신의 무색성향미촉법 무안계 내지무의 식계 무무명 역무무명진 내지무로사 역무로사진 무고집멸도 무지역무득. 이무소득고 보리살타의 반야바라밀다고 심무가애 무가애고 무유공포 원리전 도몽상 구경열반. 삼세제불의 반야바라밀다고 득아뇩다라삼먁삼보리. 고지 반야바라밀다 시대신주 시대명주 시무상주 시무등등주 능제일체고 진실불허 고설반야바라밀다주 즉설주왈 아제 아제 바라아제 바라승아제 모지 사바하 아제 아제 바라아제 바라승아제 모지 사바하 아제 아제 바라아제 바라승아제 모지 사바하.
from tensorflow.keras.layers import SimpleRNN
model.add(SimpleRNN(10, activation = 'relu', input_shape=(input_w,1)))
from tensorflow.keras.layers import Dense, LSTM
model = Sequential()
model.add(LSTM(10, activation = 'relu', input_shape=(input_w,1)))
## DENSE와 사용법 동일하나 input_shape=(열, 몇개씩잘라작업)
model.add(Dense(5))
model.add(Dense(1))
from keras.layers import LSTM
model = Sequential()
model.add(LSTM(input_w, activation = 'relu',
batch_input_shape=(1,input_w,dict_size),
stateful=True,
return_sequences = True
)
)
model.add(LSTM(input_w, activation = 'relu',
stateful=True ,
return_sequences = False
)
)
model.add(Dense(dict_size) )
model.summary()
from keras.layers import Bidirectional
model = Sequential()
model.add(Bidirectional(LSTM(input_w, activation = 'relu',return_sequences=True),
input_shape=(input_w,dict_size)
)
)
model.add(Bidirectional(LSTM(input_w)))
model.add(Dense(dict_size) )
model.add(Activation('softmax'))
from tensorflow.keras.layers import GRU
model = Sequential()
model.add(GRU(input_w, activation = 'relu', \
batch_input_shape=(1,input_w,dict_size),stateful=True ) )
model.add(Dense(dict_size) )
model.summary()
- yolo_v4 → Torch 사용
from tensorflow.keras.preprocessing.text import Tokenizer
text="떳다떳다비행기날아라날아라높이높이날아라우리비행기내가만든비행기날아라날아라높이높이날아라우리비행기"
t = Tokenizer()
t.fit_on_texts(text)
print(t.word_index)
sub_text="높이높이날아라"
encoded=t.texts_to_sequences(sub_text)
print(encoded)
- https://medium.com/plotly/nlp-visualisations-for-clear-immediate-insights-into-text-data-and-outputs-9ebfab168d5b
- https://statkclee.github.io/nlp2/nlp-text-viz.html
- https://kanoki.org/2019/03/17/text-data-visualization-in-python/
## hist=model.fit(x_train, t_train, epochs=40,validation_data=(x_test, y_test))
def hist_view(hist):
print('## training loss and acc ##')
fig, loss_ax = plt.subplots()
acc_ax = loss_ax.twinx()
loss_ax.plot(hist.history['loss'], 'y', label='train loss')
loss_ax.plot(hist.history['val_loss'], 'r', label='val loss')
loss_ax.set_xlabel('epoch')
loss_ax.set_ylabel('loss')
loss_ax.legend(loc='center')
acc_ax.plot(hist.history['accuracy'], 'b', label='train acc')
acc_ax.plot(hist.history['val_accuracy'], 'g', label='val acc')
acc_ax.set_ylabel('accuracy')
acc_ax.legend(loc='center right')
plt.show()
hist_view(hist)
- for input_shape and ( stateful=false), and base encoding
input_string="goodMorning".lower()
char_set=sorted(set(input_string))
char_set=[c for c in char_set if c not in (',',' ','!','\n')]
## char_set=[c for c in char_set if c != ' ' and c != ',' and c != '!']
encoder={k:v for v,k in enumerate(char_set)}
decoder={v:k for v,k in enumerate(char_set)}
print('# Encoder')
print(encoder)
encoded_string= [encoder[c] for c in input_string ]
print('# Encoded string')
print(encoded_string)
string_width=len(input_string)
input_w=4
output_w=string_width-input_w
x=[]
y=[]
for i in range(output_w):
x.append( encoded_string[i:i+input_w] )
y.append( encoded_string[input_w+i] )
x=array(x)
y=array(y)
x = x.reshape((x.shape[0], x.shape[1], 1))
print(x,y)
- for batch_input_shape ( stateful=true), and one hot encoding
input_string="떳다떳다비행기날아라날아라높이높이날아라우리비행기내가만든비행기날아라날아라높이높이날아라우리비행기"
char_set=sorted(set(input_string))
dict_size=len(char_set)
encoder={k:v for v,k in enumerate(char_set)}
one_hot_encoder=eye(dict_size)
decoder={v:k for v,k in enumerate(char_set)}
encoded_string= [encoder[c] for c in input_string ]
one_hot_encoded_string=[one_hot_encoder[i] for i in encoded_string]
string_width=len(input_string)
output_w=string_width-input_w
x=[];y=[]
for i in range(output_w):
x.append( one_hot_encoded_string[i:i+input_w] )
y.append( one_hot_encoded_string[input_w+i] )
x=array(x)
y=array(y)
x.shape
x = x.reshape(( output_w, input_w, dict_size))
- Check String [one hot 하기 전]
ok=0
for i in range(output_w):
test_string=input_string[i:i+input_w]
x_input = array([encoder[c] for c in test_string ] )
x_input = x_input.reshape((1,input_w,1))
# print(f"# test string\n {test_string}")
yhat = model.predict(x_input)
org=input_string[i+input_w:i+input_w+1]
su=round(yhat[0][0])
if su >= len(decoder):su=len(decoder)-1
if su < 0 :su=0
out=decoder[su]
if org==out :
ok+=1
print(f"{org} {out} {org==out}")
pct=int(ok/output_w * 100 *10000)/10000
print(f"{ok}/{output_w} acc={pct}%")
- Check string for one hot encoded data
def test_it(test_string,y,debug=True):
x_input = array([encoder[c] for c in test_string ] )
x_input=[one_hot_encoder[i] for i in x_input]
x_input=array(x_input)
x_input = x_input.reshape((1,input_w,dict_size))
yhat = model.predict(x_input)
out=decoder[argmax(yhat)]
isOk= y==out
if debug:
print(f"# {test_string} → {out} {isOk}")
return isOk
print(f'# InputString : {input_string}')
okCount=0
for s in range(output_w):
if test_it(input_string[s:input_w+s],input_string[input_w+s:input_w+s+1],False):
okCount+=1
okPct=okCount/output_w * 100
print(f' {okPct}% : {okCount} / {output_w}')