2017. 7. 24. 20:40 프로그래밍/Machine Learning
기본 텐서플로우 소프트맥스 MNIST 코드.
# -*- coding: utf-8 -*-
# MNIST 데이터 다운로드.
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets( "MNIST_data/", one_hot=True )
# TensorFlow 라이브러리 추가.
import tensorflow as tf
# 테스트를 위한 라이브러리 추가.
import matplotlib.pyplot as plt
import numpy as np
# 변수들 설정.
train_data = tf.placeholder( tf.float32, [None, 784] )
Weights = tf.Variable( tf.zeros( [784, 10] ) )
Bias = tf.Variable( tf.zeros([10] ) )
Hypothesis = tf.nn.softmax( tf.matmul( num_data, Weights ) + Bias )
# cross-entropy 모델 설정.
learning_rate = 0.25
num_label = tf.placeholder( tf.float32, [None, 10] )
cost = tf.reduce_mean( -tf.reduce_sum( num_label * tf.log( Hypothesis ), reduction_indices=[1] ) )
optimizer = tf.train.GradientDescentOptimizer( learning_rate ).minimize( cost )
# cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits( logits=Hypothesis, labels=num_label ))
# optimizer = tf.train.AdamOptimizer( learning_rate=learning_rate ).minimize( cost ) # Gradient Descen
# 경사하강법으로 모델 학습.
batch_size = 100
with tf.Session( ) as sess :
sess.run( tf.global_variables_initializer( ) )
for i in range( 2000 ) :
batch_xs, batch_ys = mnist.train.next_batch( batch_size )
sess.run( optimizer, feed_dict={num_data:batch_xs, num_label:batch_ys} )
if i % 200 == 0 :
print( "{0} step, Accuracy {1} ".format( i, sess.run( cost, feed_dict={num_data:batch_xs, num_label:batch_ys} ) ) )
# 학습된 모델이 얼마나 정확한지를 출력한다.
correct_prediction = tf.equal( tf.argmax(Hypothesis,1), tf.argmax(num_label,1) )
Accuracy = tf.reduce_mean( tf.cast( correct_prediction, tf.float32 ) )
print( sess.run( Accuracy, feed_dict={num_data:mnist.test.images, num_label:mnist.test.labels} ) )
'프로그래밍 > Machine Learning' 카테고리의 다른 글
tensorflow 설정 (0) | 2018.08.27 |
---|---|
CNN MNIST (0) | 2017.08.08 |