Notice
Recent Posts
Recent Comments
NeuroWhAI의 잡블로그
[TensorFlow] DCGAN으로 MNIST 이미지 만들기 실패 본문
역시 초짜가 건들기엔 너무 무리한것 같습니다 ㅠㅠ
생성기 학습이 잘 안되네요...
판별기에 FC를 넣어서 그런가 아니면 속도좀 올린다고 conv 계층을 두개나 줄여서 그런건지.
코드:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
|
#-*- coding: utf-8 -*-
import tensorflow as tf import numpy as np import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("./mnist/data/", one_hot=True)
total_epoch = 70
batch_size = 100
n_noise = 100
D_global_step = tf.Variable(0, trainable=False, name='D_global_step')
G_global_step = tf.Variable(0, trainable=False, name='G_global_step')
X = tf.placeholder(tf.float32, [None, 28, 28, 1])
Z = tf.placeholder(tf.float32, [None, n_noise])
is_training = tf.placeholder(tf.bool)
def leaky_relu(x, leak=0.2):
return tf.maximum(x, x * leak)
def generator(noise):
with tf.variable_scope('generator'):
output = tf.layers.dense(noise, 128*7*7)
output = tf.reshape(output, [-1, 7, 7, 128])
output = tf.nn.relu(tf.layers.batch_normalization(output, training=is_training))
output = tf.layers.conv2d_transpose(output, 64, [5, 5], strides=(2, 2), padding='SAME')
output = tf.nn.relu(tf.layers.batch_normalization(output, training=is_training))
output = tf.layers.conv2d_transpose(output, 1, [5, 5], strides=(2, 2), padding='SAME')
output = tf.tanh(output)
return output
def discriminator(inputs, reuse=None):
with tf.variable_scope('discriminator') as scope:
if reuse:
scope.reuse_variables()
output = tf.layers.conv2d(inputs, 32, [5, 5], strides=(2, 2), padding='SAME')
output = leaky_relu(tf.layers.batch_normalization(output, training=is_training))
output = tf.layers.conv2d(inputs, 64, [5, 5], strides=(2, 2), padding='SAME')
output = leaky_relu(tf.layers.batch_normalization(output, training=is_training))
flat = tf.contrib.layers.flatten(output)
hidden = tf.layers.dense(flat, 256, activation=tf.nn.relu)
output = tf.layers.dense(hidden, 1, activation=None)
return output
def get_noise(batch_size, n_noise):
return np.random.uniform(-1., 1., size=[batch_size, n_noise])
G = generator(Z)
D_real = discriminator(X)
D_gene = discriminator(G, True)
loss_D_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=D_real, labels=tf.ones_like(D_real)
))
loss_D_gene = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=D_gene, labels=tf.zeros_like(D_gene)
))
loss_D = loss_D_real + loss_D_gene
loss_G = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=D_gene, labels=tf.ones_like(D_gene)
))
vars_D = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope='discriminator')
vars_G = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope='generator')
train_D = tf.train.AdamOptimizer().minimize(loss_D,
var_list=vars_D, global_step=D_global_step)
train_G = tf.train.AdamOptimizer().minimize(loss_G,
var_list=vars_G, global_step=G_global_step)
tf.summary.scalar('loss_D', loss_D)
tf.summary.scalar('loss_G', loss_G)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter('./logs', sess.graph)
total_batch = int(mnist.train.num_examples / batch_size)
for epoch in range(total_epoch):
loss_val_D, loss_val_G = 0, 0
batch_xs, batch_ys = None, None
noise = None
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
batch_xs = batch_xs.reshape(-1, 28, 28, 1)
noise = get_noise(batch_size, n_noise)
_, loss_val_D = sess.run([train_D, loss_D],
feed_dict={X: batch_xs, Z: noise, is_training: True})
_, loss_val_G = sess.run([train_G, loss_G],
feed_dict={Z: noise, is_training: True})
summary = sess.run(merged,
feed_dict={X: batch_xs, Z: noise, is_training: False})
writer.add_summary(summary, global_step=sess.run(G_global_step))
if epoch == 0 or (epoch + 1) % 10 == 0:
print('Epoch:', '%04d' % epoch,
'D loss: {:.4}'.format(loss_val_D),
'G loss: {:.4}'.format(loss_val_G))
sample_size = 10
noise = get_noise(sample_size, n_noise)
samples = sess.run(G, feed_dict={Z: noise, is_training: False})
fig, ax = plt.subplots(1, sample_size, figsize=(sample_size, 1))
for i in range(sample_size):
ax[i].set_axis_off()
ax[i].imshow(np.reshape(samples[i], (28, 28)))
plt.savefig('{}.png'.format(str(epoch).zfill(3)),
bbox_inches='tight')
plt.close(fig)
|
cs |
결과:
Epoch: 0000 D loss: 0.04285 G loss: 12.52
Epoch: 0009 D loss: 0.07978 G loss: 7.917
Epoch: 0019 D loss: 0.07283 G loss: 6.954
Epoch: 0029 D loss: 0.3557 G loss: 3.424
Epoch: 0039 D loss: 0.3438 G loss: 4.134
Epoch: 0049 D loss: 0.2844 G loss: 4.72
Epoch: 0059 D loss: 0.3627 G loss: 4.096
Epoch: 0069 D loss: 0.1933 G loss: 5.506
실행에 무려 20시간이 걸렸다면 믿으시겠습니까 흑흑
학습이 안되는건 아닌데 무엇이 문제일까요.
층이 너무 적어서?
epoch 횟수가 적어서?
FC 층 때문에?
으어엉
'개발 및 공부 > 라이브러리&프레임워크' 카테고리의 다른 글
[TensorFlow] LSTM으로 단어 글자 예측하기 (0) | 2018.01.31 |
---|---|
[TensorFlow] RNN으로 MNIST 학습 (0) | 2018.01.29 |
[TensorFlow] GAN으로 특정 숫자의 MNIST 이미지 생성하기 (4) | 2018.01.24 |
[TensorFlow] GAN으로 MNIST 이미지 생성하기 (0) | 2018.01.23 |
[TensorFlow] 오토인코더 (0) | 2018.01.22 |
Comments