keras_cnn_expert

ro_ot ㅣ 2020. 1. 30. 19:15

%tensorflow_version 2.x
import tensorflow as tf
from tensorflow import keras
import numpy as np
from sklearn.datasets import load_digits
x, y = load_digits(return_X_y=True)
x.shape, y.shape, set(y) # 150개의 데이터, 클래스 k=3

 ((1797, 64), (1797,), {0, 1, 2, 3, 4, 5, 6, 7, 8, 9})

#간단한 모델
model = keras.Sequential()
model.add(keras.layers.Input((8,8,1)))
model.add(keras.layers.Conv2D(3, [3,3], padding='same'))
model.add(keras.layers.MaxPool2D((2,2), padding='same'))
model.add(keras.layers.Conv2D(6, [3,3], padding='same'))
model.add(keras.layers.MaxPool2D((2,2), padding='same'))
model.add(keras.layers.Flatten())# 1D 로 모양 바꾸기
model.add(keras.layers.Dense(10, activation=keras.layers.Softmax())) # 클래스가 10개이므로 
model.compile(loss=keras.losses.SparseCategoricalCrossentropy(), metrics=['accuracy'])
#전문가 모델
class MyModel(keras.Model):
  def __init__(self):
    super(MyModel, self).__init__()#상속한 클래스의 생성자 호출 
    self.opt = tf.optimizers.SGD(learning_rate=0.01)#Stochatic Gradient Descent 확률적 경사 하강
    self.conv0 = keras.layers.Conv2D(16, [3,3], padding='same', activation=keras.activations.relu)
    self.conv1 = keras.layers.Conv2D(32, [3,3], padding='same', activation=keras.activations.relu)
    self.pool0 = keras.layers.MaxPool2D([2,2], padding='same')
    self.pool1 = keras.layers.MaxPool2D([2,2], padding='same')
    self.flatten = keras.layers.Flatten()
    self.dense = keras.layers.Dense(units=10, activation=keras.activations.softmax)
  
  def call(self, x):
    #x (1797, 64)
    x_4d = tf.reshape(x, [-1,8,8,1]) 
    x_4d = tf.cast(x_4d, tf.float32)
    net = self.conv0(x_4d)
    net = self.pool0(net)
    net = self.conv1(net)
    net = self.pool1(net)
    net = self.flatten(net)    
    h = self.dense(net)
    return h

  def get_loss(self, y, h):
    #학습할때 nan이 발생하는 경우 값을 clip(자르다) (최소값, 최대값) 
    h = tf.clip_by_value(h, 1e-8, 1 - 1e-8) # h 가 0이나 1이 되지 않도록 하는 안전장치 
    cross_entropy = - (y * tf.math.log(h) + (1 - y) * tf.math.log(1 - h)) 
    loss = tf.reduce_mean(cross_entropy)
    return loss

  def get_accuracy(self, y, h):    
    predict = tf.argmax(h, -1)
    self.acc = tf.reduce_mean(tf.cast(tf.equal(y, predict), tf.float32)) # True > 1, False > 0 로 cast

  def fit(self, x, y, epoch=1):
    # x : (m, 4), y: (m)    
    y_hot = tf.one_hot(y, depth=10, axis=-1)#(m, 3)  
    for i in range(epoch):
      with tf.GradientTape() as tape: #경사 기록 장치
        h = self.call(x)
        loss = self.get_loss(y_hot, h)        
      grads = tape.gradient(loss, self.trainable_variables) #경사 계산
      self.opt.apply_gradients(zip(grads, self.trainable_variables)) # 가중치에서 경사를 빼기
      self.get_accuracy(y, h)
      if i%10==0:
        print('%d/%d loss:%.3f acc:%.3f'%(i, epoch, loss, self.acc))
model = MyModel()
model.fit(x, y, 100)

0/100 loss:0.574 acc:0.073

10/100 loss:0.363 acc:0.109


80/100 loss:0.264 acc:0.441

90/100 loss:0.256 acc:0.489

h = model(x[:1])
print(np.array(h)) #확률

[[0.7783411 0.18895338 0.03270547]]

'Deep learning > Code' 카테고리의 다른 글

cnn_2_number_with_blank  (0) 2020.01.30
cnn_2_number  (0) 2020.01.30
keras_logistic_regression_expert  (0) 2020.01.29
gradientDescent  (0) 2020.01.29
Convolution  (0) 2020.01.29