python - OutOfRangeError : FIFOQueue 'batch/fifo_queue' in tf.train.batch -
i want solve problem. checked num_epochs=none
of tf.train.slice_input_producer
, reduced batch_size 64 16. however, outofrangeerror not disappear. tried everything(on stackoverflow 'outofrange, fifoqueie, insufficient elements, train.batch, slice_input_producer'... if know solving problem, please let me know.
import matplotlib.pyplot plt import tensorflow tf import numpy np import os import time import re datetime import datetime datetime import timedelta nets import pam_cnn, fd_cnn import load_jpeg_with_tensorflow flags = tf.app.flags flags = flags.flags flags.height = 250 flags.width = 250 flags.num_classes = 2 flags.batch_size = 16 ######################################################## load data ######################################################## main_dir = './data/lfwdata/lfw_train/' log_dir = 'tmp/pam/' num_classes = 2 # number of bubbles x 2 # batch_img, batch_label = load_jpeg_with_tensorflow.read_data_batch(train_dir, 'trainimagelist.csv', height, width, # num_channels, batch_size=batch_size) ######################################################## placeholder variable ######################################################## x = tf.placeholder(tf.float32, shape=[none, flags.height, flags.width, 3], name='input') y = tf.placeholder(tf.float32, shape=[none, num_classes], name='label') y_cls = tf.argmax(y, dimension=1) image_batch, label_batch, file_batch = load_jpeg_with_tensorflow.read_data_batch(main_dir+'trainimagelist.csv', flags.height, flags.width, flags.batch_size) ######################################################## training process ######################################################## keep_prob = tf.placeholder(tf.float32) prediction = fd_cnn.build_model(x, keep_prob) loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y)) tf.summary.scalar('loss', loss) optimizer = tf.train.adamoptimizer(learning_rate=1e-3).minimize(loss) validate_image_batch, validate_label_batch, validate_file_batch = load_jpeg_with_tensorflow.read_data_batch(main_dir+'testimagelist.csv', flags.height, flags.width, flags.batch_size) label_max = tf.argmax(y, 1) pre_max = tf.argmax(prediction, 1) correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) tf.summary.scalar('accuracy', accuracy) starttime = datetime.now() iteration = 20 summary = tf.summary.merge_all() ######################################################## tensorflow run ######################################################## tf.session(config=tf.configproto(allow_soft_placement=true, log_device_placement=true)) sess: saver = tf.train.saver() summary_writer = tf.summary.filewriter(log_dir, sess.graph) coord = tf.train.coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) sess.run(tf.initialize_all_variables()) in range(iteration): images_, labels_ = sess.run([image_batch, label_batch]) # images_ = image_batch # labels_ = label_batch sess.run(optimizer, feed_dict={x : images_, y : labels_, keep_prob:0.5}) if % 10 == 0: = datetime.now() - starttime print('## time:', now, ' steps:', i) rt = sess.run([label_max, pre_max, loss, accuracy], feed_dict={x : images_, y : labels_, keep_prob : 1.0}) print('prediction loss:', rt[2], ' accuracy:', rt[3]) # validation steps validate_images_, validate_labels_ = sess.run([validate_image_batch, validate_label_batch]) rv = sess.run([label_max, pre_max, loss, accuracy], feed_dict={x: validate_images_, y: validate_labels_, keep_prob: 1.0}) print('validation loss:', rv[2], ' accuracy:', rv[3]) if (rv[3] > 0.9): break # validation accuracy summary_str = sess.run(summary, feed_dict={x: validate_images_, y: validate_labels_, keep_prob: 1.0}) summary_writer.add_summary(summary_str, i) summary_writer.flush() saver.save(sess, 'face_recog') # save session coord.request_stop() coord.join(threads) print('finish')
'load_jpeg_with_tensorflow.py' below.
def get_input_queue(csv_file_name, num_epochs=none): train_images = [] train_labels = [] line in open(csv_file_name, 'r'): cols = re.split(',|\n', line) train_images.append(cols[0]) train_labels.append([float(cols[1])]) # train_labels.append([float(cols[1]), float(cols[2])]) print([train_images, train_labels]) print("number of images :", len(train_images)) input_queue = tf.train.slice_input_producer([train_images, train_labels], num_epochs=num_epochs) # should small num_epoch # batch_size x return input_queue def read_data(input_queue): image_file = input_queue[0] label = input_queue[1] image = tf.image.decode_jpeg(tf.read_file(image_file), channels=3) return image, label, image_file def read_data_batch(csv_file_name, height, width, batch_size): input_queue = get_input_queue(csv_file_name) image, label, file_name = read_data(input_queue) image = tf.reshape(image, [height, width, 3]) batch_image, batch_label, batch_file = tf.train.batch([image, label, file_name], batch_size=batch_size) # add allow_smaller_final_batch=true batch_file = tf.reshape(batch_file, [batch_size,1]) return batch_image, batch_label, batch_file
Comments
Post a Comment