I am a beginer. When I learned the tensorflow's programmer's guide, I tried to define a dataset_input_fn function used for 'estimator'. I got a wierd error for me, which shows:
INFO:tensorflow:Using default config.
INFO:tensorflow:Using config: {'_model_dir': '/model', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_save_checkpoints_secs': 600, '_session_config': None, '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_service': None, '_cluster_spec': , '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}
INFO:tensorflow:Calling model_fn.
INFO:tensorflow:Done calling model_fn.
INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Graph was finalized.
2018-03-12 10:22:14.699465: I C:\tf_jenkins\workspace\rel-win\M\windows\PY\36\tensorflow\core\platform\cpu_feature_guard.cc:140] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
INFO:tensorflow:Running local_init_op.
INFO:tensorflow:Done running local_init_op.
2018-03-12 10:22:15.913858: W C:\tf_jenkins\workspace\rel-win\M\windows\PY\36\tensorflow\core\framework\op_kernel.cc:1202] OP_REQUIRES failed at iterator_ops.cc:870 : Invalid argument: Expected image (JPEG, PNG, or GIF), got empty file [[Node: DecodeJpeg = DecodeJpegacceptable_fraction=1, channels=0, dct_method="", fancy_upscaling=true, ratio=1, try_recover_truncated=false]]
Traceback (most recent call last): File "F:\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1361, in _do_call return fn(*args) File "F:\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1340, in _run_fn target_list, status, run_metadata) File "F:\Anaconda3\lib\site-packages\tensorflow\python\framework\errors_impl.py", line 516, in exit c_api.TF_GetCode(self.status.status))
tensorflow.python.framework.errors_impl.InvalidArgumentError: Expected image (JPEG, PNG, or GIF), got empty file [[Node: DecodeJpeg = DecodeJpegacceptable_fraction=1, channels=0, dct_method="", fancy_upscaling=true, ratio=1, try_recover_truncated=false]] [[Node: IteratorGetNext = IteratorGetNextoutput_shapes=[[?,28,28,1], [?]], output_types=[DT_FLOAT, DT_INT32], _device="/job:localhost/replica:0/task:0/device:CPU:0"]] During handling of the above exception, another exception occurred:
Traceback (most recent call last): File "F:\Program Files\JetBrains\PyCharm 2017.3.3\helpers\pydev\pydev_run_in_console.py", line 53, in run_file pydev_imports.execfile(file, globals, locals) # execute the script File "F:\Program Files\JetBrains\PyCharm 2017.3.3\helpers\pydev_pydev_imps_pydev_execfile.py", line 18, in execfile exec(compile(contents+"\n", file, 'exec'), glob, loc) File "E:/Learning_process/semester2018_spring/deep_learning/meituan/MNIST/demo_cnn_mnist_meituan.py", line 201, in tf.app.run(main) File "F:\Anaconda3\lib\site-packages\tensorflow\python\platform\app.py", line 126, in run _sys.exit(main(argv)) File "E:/Learning_process/semester2018_spring/deep_learning/meituan/MNIST/demo_cnn_mnist_meituan.py", line 195, in main steps=50) File "F:\Anaconda3\lib\site-packages\tensorflow\python\estimator\estimator.py", line 352, in train loss = self._train_model(input_fn, hooks, saving_listeners) File "F:\Anaconda3\lib\site-packages\tensorflow\python\estimator\estimator.py", line 891, in _train_model _, loss = mon_sess.run([estimator_spec.train_op, estimator_spec.loss]) File "F:\Anaconda3\lib\site-packages\tensorflow\python\training\monitored_session.py", line 546, in run run_metadata=run_metadata) File "F:\Anaconda3\lib\site-packages\tensorflow\python\training\monitored_session.py", line 1022, in run run_metadata=run_metadata) File "F:\Anaconda3\lib\site-packages\tensorflow\python\training\monitored_session.py", line 1113, in run raise six.reraise(*original_exc_info) File "F:\Anaconda3\lib\site-packages\six.py", line 693, in reraise raise value File "F:\Anaconda3\lib\site-packages\tensorflow\python\training\monitored_session.py", line 1098, in run return self._sess.run(*args, **kwargs) File "F:\Anaconda3\lib\site-packages\tensorflow\python\training\monitored_session.py", line 1170, in run run_metadata=run_metadata) File "F:\Anaconda3\lib\site-packages\tensorflow\python\training\monitored_session.py", line 950, in run return self._sess.run(*args, **kwargs) File "F:\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 905, in run run_metadata_ptr) File "F:\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1137, in _run feed_dict_tensor, options, run_metadata) File "F:\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1355, in _do_run options, run_metadata) File "F:\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1374, in _do_call raise type(e)(node_def, op, message) tensorflow.python.framework.errors_impl.InvalidArgumentError: Expected image (JPEG, PNG, or GIF), got empty file [[Node: DecodeJpeg = DecodeJpegacceptable_fraction=1, channels=0, dct_method="", fancy_upscaling=true, ratio=1, try_recover_truncated=false]] [[Node: IteratorGetNext = IteratorGetNextoutput_shapes=[[?,28,28,1], [?]], output_types=[DT_FLOAT, DT_INT32], _device="/job:localhost/replica:0/task:0/device:CPU:0"]] PyDev console: using IPython 6.1.0
The code is as follows:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Imports
import numpy as np
import os
import tensorflow as tf
import argparse
parser = argparse.ArgumentParser()
# parser.add_argument("--batch_size", default=100, type=int, help='batch_size')
# parser.add_argument("--train_steps", default=1000, type=int, help="train_steps")
parser.add_argument("--model_dir", default='/model', type=str, help='model_dir')
parser.add_argument("--data_dir", default='', type=str, help="data_dir")
def cnn_model(features, labels, mode):
"""
:param features:
:param labels:
:param mode:
:return:
"""
# input
input_layer = tf.reshape(features['image'], [-1, 28, 28, 1])
conv1 = tf.layers.conv2d(inputs=input_layer,
filters = 32,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu)
pool1 = tf.layers.max_pooling2d(inputs=conv1,
pool_size=[2, 2],
strides=2)
conv2 = tf.layers.conv2d(inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2,
pool_size=[2, 2],
strides=2)
pool_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
dense = tf.layers.dense(inputs=pool_flat,
units=1024,
activation=tf.nn.relu)
dropout = tf.layers.dropout(inputs=dense,
rate=0.4,
training=mode == tf.estimator.ModeKeys.TRAIN)
logits = tf.layers.dense(inputs=dropout,
units=10,
activation=None)
predictions = {
'class_ids': tf.argmax(logits, 1),
'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode,
predictions=predictions)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
if mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(labels=labels,
predictions=tf.argmax(logits, 1))
}
return tf.estimator.EstimatorSpec(mode,
loss=loss,
eval_metric_ops=eval_metric_ops)
# train
assert mode == tf.estimator.ModeKeys.TRAIN
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode,
loss=loss,
train_op=train_op)
def dataset_input_fn(filenames):
"""
:param filenames: tfrecord file's path
:return:
"""
# filenames = ['train.tfrecords', 'test.tfrecords']
dataset = tf.data.TFRecordDataset(filenames)
def _parse(record):
features = {"image": tf.FixedLenFeature((), tf.string, default_value=""),
"label": tf.FixedLenFeature((), tf.int64, default_value=0)}
parsed = tf.parse_single_example(record, features)
image = tf.image.decode_jpeg(parsed["image"])
image = tf.cast(image, tf.float32)
# image = tf.image.convert_image_dtype(image, tf.float32)
image = tf.reshape(image, [28, 28, 1])
# image = tf.cast(image, tf.float32)
# image = tf.decode_raw(features['image'], tf.float64)
label = tf.cast(parsed['label'], tf.int32)
return {'image': image}, label
dataset = dataset.map(_parse)
dataset = dataset.shuffle(buffer_size=10000)
dataset = dataset.batch(100)
dataset = dataset.repeat(1)
iterator = dataset.make_one_shot_iterator()
features, labels = iterator.get_next()
# features = tf.cast(features, tf.float32)
return features, labels
def main(argv):
"""
:param argv:
:return:
"""
args = parser.parse_args(argv[1:])
train_path = ['train.tfrecords']
test_path = ['test.tfrecords']
print("\ndata has been loaded as 'train_x' and 'train_y'\n")
classifier = tf.estimator.Estimator(model_fn=cnn_model,
model_dir=args.model_dir)
classifier.train(
input_fn=lambda: dataset_input_fn(train_path),
steps=50)
print("\ntraining process is done\n")
if __name__ == '__main__':
tf.app.run(main)
At least one example isn't an image.
You can check the datatype before feeding the image to the Neural Network.
I'm doing it with the library imghdr:
import imghdr
import os
l_FileNames = os.listdir("images_path")
for image in l_FileNames:
if not imghdr.what(image) == "png":
l_FileNames.remove(image)
The error seems to be that in some of your examples, there is no actual image.
Basically, when you call image = tf.image.decode_jpeg(parsed["image"])
, parsed["image"]
is an empty tensor.
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With