-
Notifications
You must be signed in to change notification settings - Fork 72
Expand file tree
/
Copy pathtrain.py
More file actions
133 lines (109 loc) · 6.19 KB
/
train.py
File metadata and controls
133 lines (109 loc) · 6.19 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
import os
from datetime import datetime
import time
import tensorflow as tf
from meta import Meta
from donkey import Donkey
from model import Model
from evaluator import Evaluator
tf.app.flags.DEFINE_string('data_dir', './data', 'Directory to read TFRecords files')
tf.app.flags.DEFINE_string('train_logdir', './logs/train', 'Directory to write training logs')
tf.app.flags.DEFINE_string('restore_checkpoint', None,
'Path to restore checkpoint (without postfix), e.g. ./logs/train/model.ckpt-100')
tf.app.flags.DEFINE_integer('batch_size', 32, 'Default 32')
tf.app.flags.DEFINE_float('learning_rate', 1e-2, 'Default 1e-2')
tf.app.flags.DEFINE_integer('patience', 100, 'Default 100, set -1 to train infinitely')
tf.app.flags.DEFINE_integer('decay_steps', 10000, 'Default 10000')
tf.app.flags.DEFINE_float('decay_rate', 0.9, 'Default 0.9')
FLAGS = tf.app.flags.FLAGS
def _train(path_to_train_tfrecords_file, num_train_examples, path_to_val_tfrecords_file, num_val_examples,
path_to_train_log_dir, path_to_restore_checkpoint_file, training_options):
batch_size = training_options['batch_size']
initial_patience = training_options['patience']
num_steps_to_show_loss = 100
num_steps_to_check = 1000
with tf.Graph().as_default():
image_batch, length_batch, digits_batch = Donkey.build_batch(path_to_train_tfrecords_file,
num_examples=num_train_examples,
batch_size=batch_size,
shuffled=True)
length_logtis, digits_logits = Model.inference(image_batch, drop_rate=0.2)
loss = Model.loss(length_logtis, digits_logits, length_batch, digits_batch)
global_step = tf.Variable(0, name='global_step', trainable=False)
learning_rate = tf.train.exponential_decay(training_options['learning_rate'], global_step=global_step,
decay_steps=training_options['decay_steps'], decay_rate=training_options['decay_rate'], staircase=True)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=global_step)
tf.summary.image('image', image_batch)
tf.summary.scalar('loss', loss)
tf.summary.scalar('learning_rate', learning_rate)
summary = tf.summary.merge_all()
with tf.Session() as sess:
summary_writer = tf.summary.FileWriter(path_to_train_log_dir, sess.graph)
evaluator = Evaluator(os.path.join(path_to_train_log_dir, 'eval/val'))
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
saver = tf.train.Saver()
if path_to_restore_checkpoint_file is not None:
assert tf.train.checkpoint_exists(path_to_restore_checkpoint_file), \
'%s not found' % path_to_restore_checkpoint_file
saver.restore(sess, path_to_restore_checkpoint_file)
print 'Model restored from file: %s' % path_to_restore_checkpoint_file
print 'Start training'
patience = initial_patience
best_accuracy = 0.0
duration = 0.0
while True:
start_time = time.time()
_, loss_val, summary_val, global_step_val, learning_rate_val = sess.run([train_op, loss, summary, global_step, learning_rate])
duration += time.time() - start_time
if global_step_val % num_steps_to_show_loss == 0:
examples_per_sec = batch_size * num_steps_to_show_loss / duration
duration = 0.0
print '=> %s: step %d, loss = %f (%.1f examples/sec)' % (
datetime.now(), global_step_val, loss_val, examples_per_sec)
if global_step_val % num_steps_to_check != 0:
continue
summary_writer.add_summary(summary_val, global_step=global_step_val)
print '=> Evaluating on validation dataset...'
path_to_latest_checkpoint_file = saver.save(sess, os.path.join(path_to_train_log_dir, 'latest.ckpt'))
accuracy = evaluator.evaluate(path_to_latest_checkpoint_file, path_to_val_tfrecords_file,
num_val_examples,
global_step_val)
print '==> accuracy = %f, best accuracy %f' % (accuracy, best_accuracy)
if accuracy > best_accuracy:
path_to_checkpoint_file = saver.save(sess, os.path.join(path_to_train_log_dir, 'model.ckpt'),
global_step=global_step_val)
print '=> Model saved to file: %s' % path_to_checkpoint_file
patience = initial_patience
best_accuracy = accuracy
else:
patience -= 1
print '=> patience = %d' % patience
if patience == 0:
break
coord.request_stop()
coord.join(threads)
print 'Finished'
def main(_):
path_to_train_tfrecords_file = os.path.join(FLAGS.data_dir, 'train.tfrecords')
path_to_val_tfrecords_file = os.path.join(FLAGS.data_dir, 'val.tfrecords')
path_to_tfrecords_meta_file = os.path.join(FLAGS.data_dir, 'meta.json')
path_to_train_log_dir = FLAGS.train_logdir
path_to_restore_checkpoint_file = FLAGS.restore_checkpoint
training_options = {
'batch_size': FLAGS.batch_size,
'learning_rate': FLAGS.learning_rate,
'patience': FLAGS.patience,
'decay_steps': FLAGS.decay_steps,
'decay_rate': FLAGS.decay_rate
}
meta = Meta()
meta.load(path_to_tfrecords_meta_file)
_train(path_to_train_tfrecords_file, meta.num_train_examples,
path_to_val_tfrecords_file, meta.num_val_examples,
path_to_train_log_dir, path_to_restore_checkpoint_file,
training_options)
if __name__ == '__main__':
tf.app.run(main=main)