그것은 데이터의 문제였다.
train_subset_mnist_tanh_full_cnn_256_sum_0.297016_imgcls_1.000000
test_subset_mnist_tanh_full_cnn_254_sum_0.988055_imgcls_0.969500
#Tags TensorFlow, 텐저플로우, 텐서플로우, 텐져플로우, tensor
그것은 데이터의 문제였다.
train_subset_mnist_tanh_full_cnn_256_sum_0.297016_imgcls_1.000000
test_subset_mnist_tanh_full_cnn_254_sum_0.988055_imgcls_0.969500
#Tags TensorFlow, 텐저플로우, 텐서플로우, 텐져플로우, tensor
음..
왜일까 아래 코딩을 할때는 노래를 듣지도 않았는데 "금지된 경호" 와 "She's gone" 이 내머릿속에 맴돌던 이유는..;
# Tags TensorFlow, 텐저플로우, 텐서플로우, 텐져플로우, tensor
# how to visualize a 2-D or 3-D scatter plot of weights in TensorFlow done.
# coding: utf-8
# In[1]:
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib inline')
#######################
# data setting #
#######################
train_input1 = []
train_input2 = []
train_target = []
for i in range(50000):
x1 = np.random.randint(10)
x2 = np.random.randint(10)
if(x1 + x2 < 10):
train_input1.append(np.eye(10)[x1])
train_input2.append(np.eye(10)[x2])
train_target.append(np.eye(10)[x1 + x2])
#print(x1," + ", x2," = ", x1+x2)
if(len(train_input1) == 100):
break
print("Training Data: ",len(train_input1),",",len(train_input2),"Target",len(train_target));
train_input1 = np.float32(train_input1)
train_input2 = np.float32(train_input2)
train_target = np.float32(train_target)
test_input1 = []
test_input2 = []
test_target = []
check = []
for i in range(10):
check.append(np.eye(10)[i])
for j in range (10):
if(i + j < 10):
test_input1.append(np.eye(10)[i])
test_input2.append(np.eye(10)[j])
test_target.append(np.eye(10)[i + j])
print("Test Data: ",len(test_input1),",",len(test_input2),"Target",len(test_target));
test_input1 = np.float32(test_input1)
test_input2 = np.float32(test_input2)
test_target = np.float32(test_target)
# In[2]:
########################
# Graph Building #
########################
#placeholder : Graph Argument
x1 = tf.placeholder(tf.float32, shape=[None, 10])
x2 = tf.placeholder(tf.float32, shape=[None, 10])
y = tf.placeholder(tf.float32, shape=[None, 10])
check_y = tf.placeholder(tf.float32, shape=[None, 10])
# layer
def one_layer(_x,_W,_b):
return tf.add(tf.matmul(_x,_W),_b)
## Image to Shared Network
w_dev = 0.3
hl_size = 10
learning_rate = 0.05
Image_Vector_Size = 2
# Shared Wegiths : _W1, _b1, _W2, _b2
# Image(10) to Hidden Lay\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0er(hl_size) including shared weights
_W1 = tf.Variable(tf.random_normal([train_input1.shape[1], hl_size], stddev=w_dev))
_b1 = tf.Variable(tf.random_normal([hl_size]))
# Hidden Layer(hl_size) to Image Vector(2)
_W2 = tf.Variable(tf.random_normal([hl_size, Image_Vector_Size], stddev=w_dev))
_b2 = tf.Variable(tf.random_normal([Image_Vector_Size]))
# Layers
# Inputs to Hidden
x1h = one_layer(x1, _W1, _b1)
x2h = one_layer(x2, _W1, _b1)
# Hidden to Input Image Vectors
v1 = one_layer(x1h, _W2, _b2)
v2 = one_layer(x2h, _W2, _b2)
#summation
out = tf.add(v1, v2) #Returns x + y element-wise
# Target to Hidden
yh = one_layer(y, _W1, _b1)
# Hidden to Target Image Vector
vy = one_layer(yh, _W2, _b2)
loss = tf.reduce_mean(tf.square(vy - out))
#Gradient Descent Methods
#optm = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(loss)
optm = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
#Get point.x, point.y
xy = one_layer(one_layer(check_y, _W1, _b1), _W2, _b2)
#Init Graph
init = tf.global_variables_initializer()
print ("Graph build")
# In[3]:
#Start Session
sess = tf.Session()
sess.run(init)
training_epochs = 100
batch_size = 50
display_step = 20
# In[4]:
for epoch in range(training_epochs):
avg_cost = 0.
num_batch = int(train_input1.shape[0] / batch_size )
for i in range(num_batch):
randidx = np.random.randint(train_input1.shape[0], size=batch_size)
batch_x1 = train_input1[randidx, :]
batch_x2 = train_input2[randidx, :]
batch_ys = train_target[randidx, :]
sess.run(optm, feed_dict={x1: batch_x1, x2: batch_x2, y: batch_ys})
avg_cost += sess.run(loss, feed_dict={x1: batch_x1, x2: batch_x2, y: batch_ys})/num_batch
if epoch % display_step == 0:
print ("epoch: %03d/%03d , cost: %.6f"% (epoch, training_epochs,avg_cost))
result_xy = sess.run(vy, feed_dict={y: check})
&nb\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0sp; point_x = []
point_y = []
point_z = []
#drawing scatter plots of image vector
for j in range(len(check)):
print(j, result_xy[j])
point_x.append(result_xy[j][0])
point_y.append(result_xy[j][1])
plt.plot(point_x, point_y, "o")
plt.show()
# In[5]:
avg_loss = 0.
if batch_size > test_input1.shape[0]:
num_batch_test = 1
else:
num_batch_test = int(test_input1.shape[0]/batch_size)
for i in range(num_batch_test):
batch_x1 = test_input1[i * batch_size : (i+1) * batch_size ]
batch_x2 = test_input2[i * batch_size : (i+1) * batch_size ]
batch_y = test_target[i * batch_size : (i+1) * batch_size ]
# it gives cost of batch
avg_loss += sess.run(loss, feed_dict={x1 : batch_x1, x2: batch_x2, y : batch_y})
print ("Training complete, Test Average Loss : %.6f" % (avg_loss / num_batch_test,))
result_xy = sess.run(vy, feed_dict={y: check})
x = []
y = []
z = []
for j in range(len(check)):
print(j, result_xy[j])
x.append(result_xy[j][0])
y.append(result_xy[j][1])
plt.plot(x, y, "o")
# In[ ]:
이걸 하면 좋을 것 같기는 한데.. 음^^;
how to visualize graph architectures in TensorFlow
how to visualize a weight image in TensorFlow
how to visualize a 2-D or 3-D scatter plot of weights in TensorFlow
#Tags TensorFlow, 텐저플로우, 텐서플로우, 텐져플로우, tensor