Logo Questions Linux Laravel Mysql Ubuntu Git Menu
 

tensorboard can't find event files

I tried to use tensorboard to visualize a image classifier using DNN. I'm very sure that the directory path is correct, however no data is shown. When I tried tensorboard --inspect --logdir='PATH/' returns: No event files found within logdir 'PATH/'

I'm thinking of there must be something wrong with my coding.

Graph

batch_size = 500

graph = tf.Graph()
with graph.as_default():

  # Input data. For the training data, we use a placeholder that will be fed
  # at run time with a training minibatch.
  with tf.name_scope('train_input'):
    tf_train_dataset = tf.placeholder(tf.float32,
                                      shape=(batch_size, image_size * image_size),
                                      name = 'train_x_input')

    tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels),
                                     name = 'train_y_input')
  with tf.name_scope('validation_input'):
    tf_valid_dataset = tf.constant(valid_dataset, name = 'valid_x_input')
    tf_test_dataset = tf.constant(test_dataset, name = 'valid_y_input')

  # Variables.
  with tf.name_scope('layer'):
    with tf.name_scope('weights'):
        weights = tf.Variable(
            tf.truncated_normal([image_size * image_size, num_labels]),
            name = 'W')
        variable_summaries(weights)
    with tf.name_scope('biases'):
        biases = tf.Variable(tf.zeros([num_labels]), name = 'B')
        variable_summaries(biases)
  # Training computation.
  with tf.name_scope('Wx_plus_b'):
    logits = tf.matmul(tf_train_dataset, weights) + biases
    tf.summary.histogram('logits', logits)
  with tf.name_scope('loss'):
    loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits),
        name = 'loss')
    tf.summary.histogram('loss', loss)
    tf.summary.scalar('loss_scalar', loss)

  # Optimizer.
  with tf.name_scope('optimizer'):
    optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)

  # Predictions for the training, validation, and test data.
  train_prediction = tf.nn.softmax(logits)
  valid_prediction = tf.nn.softmax(tf.matmul(tf_valid_dataset, weights) + biases)
  test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)

Run

num_steps = 1001
t1 = time.time()
with tf.Session(graph=graph) as session:
  merged = tf.summary.merge_all()
  writer = tf.summary.FileWriter('C:/Users/Dr_Chenxy/Documents/pylogs', session.graph)
  tf.global_variables_initializer().run()
  print("Initialized")
  for step in range(num_steps):
    # Pick an offset within the training data, which has been randomized.
    # Note: we could use better randomization across epochs.
    offset = (step * batch_size) % (train_labels.shape[0] - batch_size)  # 1*128 % (200000 - 128)
    # Generate a minibatch.
    batch_data = train_dataset[offset:(offset + batch_size), :]   # choose training set for this iteration
    batch_labels = train_labels[offset:(offset + batch_size), :]  # choose labels for this iteration
    # Prepare a dictionary telling the session where to feed the minibatch.
    # The key of the dictionary is the placeholder node of the graph to be fed,
    # and the value is the numpy array to feed to it.
    feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
    _, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict)
    if (step % 100 == 0):
      print("Minibatch loss at step %d: %f" % (step, l))
      print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
      print("Validation accuracy: %.1f%%" % accuracy(
        valid_prediction.eval(), valid_labels))
  print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
t2 = time.time()
print('Running time', t2-t1, 'seconds')
like image 207
Xiuyuanc Avatar asked Mar 13 '17 20:03

Xiuyuanc


1 Answers

Solved. For anyone who is bad at command line as me, the problem is that in command line, don't use quote ('') to label your directory. Say your data is at 'X:\X\file.x' Go in command line to X:\ first. Then type: tensorboard --logdir=X/ NOT tensorboard --logdir='.X/'

like image 169
Xiuyuanc Avatar answered Oct 03 '22 06:10

Xiuyuanc