2016-06-17 4 views
0

Dies ist main.py:Tensorflow Abmessungen sind nicht kompatibel in CNN

# pylint: disable=missing-docstring 
from __future__ import absolute_import 
from __future__ import division 
from __future__ import print_function 

import time 

from six.moves import xrange # pylint: disable=redefined-builtin 
import tensorflow as tf 
from pylab import * 

import cnn 

# Basic model parameters as external flags. 
flags = tf.app.flags 
FLAGS = flags.FLAGS 
flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.') 
flags.DEFINE_integer('max_steps', 2000, 'Number of steps to run trainer.') 
flags.DEFINE_integer('batch_size', 1000, 'Batch size. Must divide evenly into the dataset sizes.') 


def placeholder_inputs(batch_size): 

    images_placeholder = tf.placeholder(tf.float32, shape=(batch_size, cnn.IMAGE_WIDTH, cnn.IMAGE_HEIGHT, 1)) 
    labels_placeholder = tf.placeholder(tf.int32, shape=(batch_size)) 
    return images_placeholder, labels_placeholder 


def fill_feed_dict(data_set, images_pl, labels_pl): 

    data_set = loadtxt("../dataset/images") 
    images = data_set[:,:115*25] 
    labels_feed = data_set[:,115*25:] 
    images_feed = tf.reshape(images, [batch_size, cnn.IMAGE_WIDTH, cnn.IMAGE_HEIGHT, 1]) 

    feed_dict = { 
     images_pl: images_feed, 
     labels_pl: labels_feed, 
    } 
    return feed_dict 

def run_training(): 

    with tf.Graph().as_default(): 

    images_placeholder, labels_placeholder = placeholder_inputs(FLAGS.batch_size) 
    logits = cnn.inference(images_placeholder) 
    loss = cnn.loss(logits, labels_placeholder) 
    train_op = cnn.training(loss, FLAGS.learning_rate) 
    eval_correct = cnn.evaluation(logits, labels_placeholder) 
    summary_op = tf.merge_all_summaries() 
    init = tf.initialize_all_variables() 

    saver = tf.train.Saver() 
    sess = tf.Session() 
    summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph) 

    sess.run(init) 
    feed_dict = fill_feed_dict(data_sets.train, images_placeholder, labels_placeholder) 
    # Start the training loop. 
    for step in xrange(FLAGS.max_steps): 
     start_time = time.time() 

     _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict) 

     duration = time.time() - start_time 

     # Write the summaries and print an overview fairly often. 
     if step % 100 == 0: 
     # Print status to stdout. 
     print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration)) 
     # Update the events file. 
     summary_str = sess.run(summary_op, feed_dict=feed_dict) 
     summary_writer.add_summary(summary_str, step) 
     summary_writer.flush() 

    predictions = sess.run(logits, feed_dict=feed_dict) 
    savetxt("predictions", predictions) 

def main(_): 
    run_training() 

if __name__ == '__main__': 
    tf.app.run() 

dann, cnn.py:

from __future__ import absolute_import 
from __future__ import division 
from __future__ import print_function 

import math 

import tensorflow as tf 

NUM_OUTPUT = 4 

IMAGE_WIDTH = 115 
IMAGE_HEIGHT = 25 
IMAGE_PIXELS = IMAGE_WIDTH * IMAGE_HEIGHT 


def inference(images): 
    # Conv 1 
    with tf.name_scope('conv1'):   
     kernel = tf.Variable(tf.truncated_normal(stddev = 1.0/math.sqrt(float(IMAGE_PIXELS)), name='weights', shape=[5, 5, 1, 10])) 
     conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='VALID') 
     biases = tf.Variable(tf.constant(0.0, name='biases', shape=[10])) 
     bias = tf.nn.bias_add(conv, biases) 
     conv1 = tf.nn.relu(bias, name='conv1') 

    # Pool1 
    pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 3, 3, 1], padding='VALID', name='pool1')  

    # Conv 2 
    with tf.name_scope('conv2'):  
     kernel = tf.Variable(tf.truncated_normal(stddev = 1.0/math.sqrt(float(IMAGE_PIXELS)), name='weights', shape=[5, 5, 10, 20])) 
     conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='VALID') 
     biases = tf.Variable(tf.constant(0.1, name='biases', shape=[20])) 
     bias = tf.nn.bias_add(conv, biases) 
     conv2 = tf.nn.relu(bias, name='conv2') 

    # Pool2 
    pool2 = tf.nn.max_pool(conv2, ksize=[1, 3, 3, 1], strides=[1, 3, 3, 1], padding='VALID', name='pool2') 

    # Identity 
    with tf.name_scope('identity'): 
     weights = tf.Variable(tf.truncated_normal([11, NUM_OUTPUT], stddev=1.0/math.sqrt(float(11))), name='weights') 
     biases = tf.Variable(tf.zeros([NUM_OUTPUT], name='biases')) 
     logits = tf.matmul(pool2, weights) + biases 

    return output 


def loss(outputs, labels): 

    rmse = tf.sqrt(tf.reduce_mean(tf.square(tf.sub(targets, outputs))), name="rmse") 
    return rmse 


def training(loss, learning_rate): 

    tf.scalar_summary(loss.op.name, loss) 
    optimizer = tf.train.GradientDescentOptimizer(learning_rate) 
    global_step = tf.Variable(0, name='global_step', trainable=False) 
    train_op = optimizer.minimize(loss, global_step=global_step) 
    return train_op 

und ich bekomme diese Fehlermeldung:

Traceback (most recent call last): 
    File "main.py", line 84, in <module> 
    tf.app.run() 
    File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/platform/app.py", line 30, in run 
    sys.exit(main(sys.argv)) 
    File "main.py", line 81, in main 
    run_training() 
    File "main.py", line 47, in run_training 
    logits = cnn.inference(images_placeholder) 
    File "/home/andrea/test/python/cnn.py", line 31, in inference 
    conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='VALID') 
    File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_nn_ops.py", line 394, in conv2d 
    data_format=data_format, name=name) 
    File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/op_def_library.py", line 704, in apply_op 
    op_def=op_def) 
    File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 2262, in create_op 
    set_shapes_for_outputs(ret) 
    File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 1702, in set_shapes_for_outputs 
    shapes = shape_func(op) 
    File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/common_shapes.py", line 230, in conv2d_shape 
    input_shape[3].assert_is_compatible_with(filter_shape[2]) 
    File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/tensor_shape.py", line 108, in assert_is_compatible_with 
    % (self, other)) 
ValueError: Dimensions 1 and 10 are not compatible 

Don verstehe nicht warum. Dimensionen scheinen mir gut zu sein. Eingabebild sind 1000 Proben von 115 (Breite) x25 (Höhe) x1 (Farbe). Ich benutze 'VALID' als Padding und überprüfe die Berechnung von Hand. Nicht sicher, woher das Missverhältnis kommt. Jeder kann helfen? TensorFlow rc0.9 auf Ubuntu 14.04 (Anmerkung: im Code könnte es andere Fehler sein, die ich jetzt noch nicht bewusst bin, vergessen sie)

Antwort

2

Leicht Typo:

In Ihrer zweiten Faltung:

conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='VALID') 

ändern images-pool1:

conv = tf.nn.conv2d(pool1, kernel, [1, 1, 1, 1], padding='VALID') 
+0

Oh mein Gott, habe ich fast das ganze für einige Fehler suchen Tag ... und das ist es. Vielen Dank –