Author : tmlab / Date : 2017. 2. 17. 14:40 / Category : Analytics
import numpy as np
import tensorflow as tf
xy = np.loadtxt('train.txt', unpack=True)
x_data = xy[0:-1]
y_data = xy[-1]
x_data
y_data
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
W = tf.Variable(tf.random_uniform([1,len(x_data)], -1.0, 1.0)) #초기값 세팅
h = tf.matmul(W, X)
hypothesis = tf.div(1., 1.+tf.exp(-h)) # 시그모이드 함수
cost = -tf.reduce_mean(Y*tf.log(hypothesis)+(1-Y)*tf.log(1-hypothesis)) #코스트(크로스엔트로피)
a = tf.Variable(0.01)
optimizer = tf.train.GradientDescentOptimizer(a)
train = optimizer.minimize(cost)
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
for step in xrange(1000):
sess.run(train, feed_dict={X:x_data, Y:y_data})
if step % 200 == 0:
print step, sess.run(cost, feed_dict={X:x_data, Y:y_data}), sess.run(W)
#모델 테스트
correct_prediction = tf.equal(tf.floor(hypothesis+0.5), Y) # 계산된 값을 0,1로 변환
#정확도 계산
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print sess.run([hypothesis, tf.floor(hypothesis+0.5), correct_prediction, accuracy], feed_dict = {X:x_data, Y:y_data})
print "Accuracy:" , accuracy.eval({X:x_data, Y:y_data})
import numpy as np
import tensorflow as tf
xy = np.loadtxt('train.txt', unpack=True)
x_data = np.transpose(xy[0:-1]) #행렬곱을 위해 트랜스포즈
y_data = np.reshape(xy[-1], (4, 1))
X = tf.placeholder(tf.float32, [None, 2])
Y = tf.placeholder(tf.float32, [None, 1])
W1 = tf.Variable(tf.random_uniform([2,2], -1.0, 1.0))
W2 = tf.Variable(tf.random_uniform([2,1], -1.0, 1.0))
b1 = tf.Variable(tf.zeros([2]), name="Bias1")
b2 = tf.Variable(tf.zeros([1]), name="Bias2")
L2 = tf.sigmoid(tf.matmul(X, W1) + b1) #시그모이드함수로 간단하게 바꿈
hypothesis = tf.sigmoid(tf.matmul(L2, W2) + b2) #L2를 입력으로 받아서 다시곱함
cost = -tf.reduce_mean(Y*tf.log(hypothesis)+(1-Y)*tf.log(1-hypothesis))
a = tf.Variable(0.1)
optimizer = tf.train.GradientDescentOptimizer(a)
train = optimizer.minimize(cost)
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
for step in xrange(10000):
sess.run(train, feed_dict={X:x_data, Y:y_data})
if step % 1000 == 0:
print step, sess.run(cost, feed_dict={X:x_data, Y:y_data})
#모델 테스트
correct_prediction = tf.equal(tf.floor(hypothesis+0.5), Y) # 계산된 값을 0,1로 변환
#정확도 계산
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print sess.run([hypothesis, tf.floor(hypothesis+0.5), correct_prediction, accuracy], feed_dict = {X:x_data, Y:y_data})
print "Accuracy:" , accuracy.eval({X:x_data, Y:y_data})
xy = np.loadtxt('train.txt', unpack=True)
x_data = np.transpose(xy[0:-1]) #행렬곱을 위해 트랜스포즈
y_data = np.reshape(xy[-1], (4, 1))
X = tf.placeholder(tf.float32, [None, 2])
Y = tf.placeholder(tf.float32, [None, 1])
W1 = tf.Variable(tf.random_uniform([2,10], -1.0, 1.0))
W2 = tf.Variable(tf.random_uniform([10,1], -1.0, 1.0))
b1 = tf.Variable(tf.zeros([10]), name="Bias1")
b2 = tf.Variable(tf.zeros([1]), name="Bias2")
L2 = tf.sigmoid(tf.matmul(X, W1) + b1) #시그모이드함수로 간단하게 바꿈
hypothesis = tf.sigmoid(tf.matmul(L2, W2) + b2) #L2를 입력으로 받아서 다시곱함
cost = -tf.reduce_mean(Y*tf.log(hypothesis)+(1-Y)*tf.log(1-hypothesis))
a = tf.Variable(0.1)
optimizer = tf.train.GradientDescentOptimizer(a)
train = optimizer.minimize(cost)
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
for step in xrange(5000):
sess.run(train, feed_dict={X:x_data, Y:y_data})
if step % 1000 == 0:
print step, sess.run(cost, feed_dict={X:x_data, Y:y_data})
#모델 테스트
correct_prediction = tf.equal(tf.floor(hypothesis+0.5), Y) # 계산된 값을 0,1로 변환
#정확도 계산
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print sess.run([hypothesis, tf.floor(hypothesis+0.5), correct_prediction, accuracy], feed_dict = {X:x_data, Y:y_data})
print "Accuracy:" , accuracy.eval({X:x_data, Y:y_data})
W1 = tf.Variable(tf.random_uniform([2,5], -1.0, 1.0))
W2 = tf.Variable(tf.random_uniform([5,4], -1.0, 1.0))
W3 = tf.Variable(tf.random_uniform([4,1], -1.0, 1.0))
b1 = tf.Variable(tf.zeros([5]), name="Bias1")
b2 = tf.Variable(tf.zeros([4]), name="Bias2")
b3 = tf.Variable(tf.zeros([1]), name="Bias3")
L2 = tf.sigmoid(tf.matmul(X, W1) + b1) #시그모이드함수로 간단하게 바꿈
L3 = tf.sigmoid(tf.matmul(L2, W2) + b2) #시그모이드함수로 간단하게 바꿈
hypothesis = tf.sigmoid(tf.matmul(L3, W3) + b3) #L2를 입력으로 받아서 다시곱함
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
for step in xrange(10000):
sess.run(train, feed_dict={X:x_data, Y:y_data})
if step % 1000 == 0:
print step, sess.run(cost, feed_dict={X:x_data, Y:y_data})
#모델 테스트
correct_prediction = tf.equal(tf.floor(hypothesis+0.5), Y) # 계산된 값을 0,1로 변환
#정확도 계산
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print sess.run([hypothesis, tf.floor(hypothesis+0.5), correct_prediction, accuracy], feed_dict = {X:x_data, Y:y_data})
print "Accuracy:" , accuracy.eval({X:x_data, Y:y_data})