Logo Questions Linux Laravel Mysql Ubuntu Git Menu
 

TypeError: ('Keyword argument not understood:', 'inputs')

The following code is for disease detection with a CNN model using Tensorflow and Keras. For some reason, I keep getting an error. This is a TypeError with parameter 'inputs'. I don't understand why this error is being raised. Here is my code:

from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np # linear algebra
import pandas as pd # data processing CSV file
import tensorflow as tf
from tensorflow.keras.layers import Dense, Flatten, Conv2D
from tensorflow.keras import Model
import cv2
import matplotlib.pyplot as plt
import seaborn as sns # seaborn is a data visualization library for python graphs
from PIL import Image
import os #file path interacting with operating system

thisFolder = os.path.dirname(os.path.realpath(__file__))

print(thisFolder)
print(tf.__version__)

infected = os.listdir(thisFolder + '/cell_images/cell_images/Parasitized/')
uninfected = os.listdir(thisFolder +'/cell_images/cell_images/Uninfected/')

data = []
labels = []

for i in infected:
    try: 
        image = cv2.imread(thisFolder + "/cell_images/cell_images/Parasitized/"+i)
        image_array = Image.fromarray(image , 'RGB')
        resize_img = image_array.resize((50 , 50))
        rotated45 = resize_img.rotate(45)
        rotated75 = resize_img.rotate(75)
        blur = cv2.blur(np.array(resize_img) ,(10, 10))
        data.append(np.array(resize_img)) 
        data.append(np.array(rotated45))
        data.append(np.array(rotated75))
        data.append(np.array(blur))
        labels.append(1)
        labels.append(1)
        labels.append(1)
        labels.append(1)

    except AttributeError:
        print('')

for u in uninfected:
    try:
        image = cv2.imread("../input/cell_images/cell_images/Uninfected/"+u)
        image_array = Image.fromarray(image , 'RGB')
        resize_img = image_array.resize((50 , 50))
        rotated45 = resize_img.rotate(45)
        rotated75 = resize_img.rotate(75)
        data.append(np.array(resize_img))
        data.append(np.array(rotated45))
        data.append(np.array(rotated75))
        labels.append(0)
        labels.append(0)
        labels.append(0)

    except AttributeError:
        print('')

cells = np.array(data)
labels = np.array(labels)

np.save('Cells' , cells)
np.save('Labels' , labels)

print('Cells : {} | labels : {}'.format(cells.shape , labels.shape))

# plt.figure(1 , figsize = (15, 9)) # all graphs and displays
n = 0
for i in range(49):
    n += 1
    r = np.random.randint(0 , cells.shape[0] , 1)
    plt.subplot(7 , 7, n)
    plt.subplots_adjust(hspace = 0.5 , wspace = 0.5)
    plt.imshow(cells[r[0]])
    plt.title('{} : {}'.format('Infected' if labels[r[0]] == 1 else 'Uninfected', labels[r[0]]))
    plt.xticks([]) , plt.yticks([])

plt.figure(1, figsize = (15 , 7))
plt.subplot(1 , 2 , 1)
plt.imshow(cells[0])
plt.title('Infected Cell')
plt.xticks([]) , plt.yticks([])

n = np.arange(cells.shape[0])
np.random.shuffle(n)
cells = cells[n]
labels = labels[n]

cells = cells.astype(np.float32)
labels = labels.astype(np.int32)
cells = cells/255

from sklearn.model_selection import train_test_split

train_x , x , train_y , y = train_test_split(cells , labels ,
                                            test_size = 0.2 ,
                                            random_state = 111)

eval_x , test_x , eval_y , test_y = train_test_split(x , y ,
                                                    test_size = 0.5 ,
                                                    random_state = 111)
plt.figure(1 , figsize = (15 ,5))
n = 0
for z , j in zip([train_y , eval_y , test_y] , ['train labels','eval labels','test labels']):
    n += 1
    plt.subplot(1 , 3  , n)
    sns.countplot(x = z )
    plt.title(j)
# plt.show()


print('train data shape {} ,eval data shape {} , test data shape {}'.format(train_x.shape,
                                                                           eval_x.shape ,
                                                                           test_x.shape))
from tensorflow.python.framework import ops
ops.reset_default_graph()

def cnn_model_fn(features , labels , mode):
    input_layers = tf.reshape(features['x'] , [-1 , 50 , 50 ,3])
    conv1 = tf.compat.v1.layers.Conv2D(
        inputs = input_layers ,
        filters = 50 ,
        kernel_size = [7 , 7],
        padding = 'same',
        activation = tf.nn.relu
        )


    conv2 = tf.layers.conv2d(
        inputs = conv1,
        filters = 90,
        kernel_size = [3 , 3],
        padding = 'valid',
        activation = tf.nn.relu
        )


    conv3 = tf.layers.conv2d(
        inputs = conv2 ,
        filters = 10,
        kernel_size = [5 , 5],
        padding = 'same',
        activation = tf.nn.relu
        )

    pool1 = tf.layers.max_pooling2d(inputs = conv3 , pool_size = [2 , 2] ,
                                    strides = 2 )
    conv4 = tf.layers.conv2d(
        inputs = pool1 ,
        filters = 5,
        kernel_size = [3 , 3],
        padding = 'same',
        activation = tf.nn.relu
        )

    pool2 = tf.layers.max_pooling2d(inputs = conv4 , pool_size = [2 , 2] ,
                                    strides = 2 , padding = 'same')

    pool2_flatten = tf.layers.flatten(pool2)
    fc1 = tf.layers.dense(
        inputs = pool2_flatten,
        units = 2000,
        activation = tf.nn.relu
        )
    fc2 = tf.layers.dense(
        inputs = fc1,
        units = 1000,
        activation = tf.nn.relu
        )
    fc3 = tf.layers.dense(
        inputs = fc2 ,
        units = 500 ,
        activation = tf.nn.relu
        )
    logits = tf.layers.dense(
        inputs = fc3 ,
        units = 2
        )

    predictions = {
        'classes': tf.argmax(input = logits , axis = 1),
        'probabilities': tf.nn.softmax(logits , name = 'softmax_tensor')
    }

    if mode == tf.estimator.ModeKeys.PREDICT:
        return tf.estimator.EstimatorSpec(mode = mode ,
                                          predictions = predictions)

    loss = tf.losses.sparse_softmax_cross_entropy(labels = labels ,
                                                 logits = logits)

    if mode == tf.estimator.ModeKeys.TRAIN:
        optimizer = tf.train.GradientDescentOptimizer(learning_rate = 0.001)
        train_op = optimizer.minimize(loss = loss ,
                                      global_step = tf.train.get_global_step())

        return tf.estimator.EstimatorSpec(mode = mode ,
                                            loss = loss ,
                                            train_op = train_op
                                           )
    eval_metric_op = {'accuracy' : tf.metrics.accuracy(labels = labels ,
                                         predictions =  predictions['classes'])}

    logging_hook = tf.train.LoggingTensorHook(
        tensors = tensors_to_log , every_n_iter = 50
        )

    return tf.estimator.EstimatorSpec(mode = mode ,
                                      loss = loss ,
                                      eval_metric_ops = eval_metric_op)

# Checkpoint saving training values
malaria_detector = tf.estimator.Estimator(model_fn = cnn_model_fn ,
                                         model_dir = '/tmp/modelchkpt')

tensors_to_log = {'probabilities':'softmax_tensor'}
logging_hook = tf.estimator.LoggingTensorHook(
    tensors = tensors_to_log , every_n_iter = 50
    )

train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
    x = {'x': train_x},
    y = train_y,
    batch_size = 100 ,
    num_epochs = None ,
    shuffle = True
    )
malaria_detector.train(input_fn = train_input_fn , steps = 1 , hooks = [logging_hook])

malaria_detector.train(input_fn = train_input_fn , steps = 10000)

eval_input_fn = tf.estimator.inputs.numpy_input_fn(
    x = {'x': eval_x},
    y = eval_y ,
    num_epochs = 1 ,
    shuffle = False
    )
eval_results = malaria_detector.evaluate(input_fn = eval_input_fn)
print(eval_results)

pred_input_fn = tf.estimator.inputs.numpy_input_fn(
    x = {'x' : test_x},
    y = test_y,
    num_epochs = 1,
    shuffle = False
    )

y_pred = malaria_detector.predict(input_fn = pred_input_fn)
classes = [p['classes'] for p in y_pred]

from sklearn.metrics import confusion_matrix , classification_report , accuracy_score
print('{} \n{} \n{}'.format(confusion_matrix(test_y , classes) ,
                           classification_report(test_y , classes) ,
                           accuracy_score(test_y , classes)))

plt.figure(1 , figsize = (15 , 9))
n = 0
for i in range(49):
    n += 1
    r = np.random.randint( 0  , test_x.shape[0] , 1)
    plt.subplot(7 , 7 , n)
    plt.subplots_adjust(hspace = 0.5 , wspace = 0.5)
    plt.imshow(test_x[r[0]])
    plt.title('true {} : pred {}'.format(test_y[r[0]] , classes[r[0]]) )
    plt.xticks([]) , plt.yticks([])

plt.show()
print("done")

And here is the error:

File "CNN.py", line 240, in <module>
    malaria_detector.train(input_fn = train_input_fn , steps = 1 , hooks = [logging_hook])
  File "/usr/local/lib/python2.7/site-packages/tensorflow_estimator/python/estimator/estimator.py", line 374, in train
    loss = self._train_model(input_fn, hooks, saving_listeners)
  File "/usr/local/lib/python2.7/site-packages/tensorflow_estimator/python/estimator/estimator.py", line 1164, in _train_model
    return self._train_model_default(input_fn, hooks, saving_listeners)
  File "/usr/local/lib/python2.7/site-packages/tensorflow_estimator/python/estimator/estimator.py", line 1194, in _train_model_default
    features, labels, ModeKeys.TRAIN, self.config)
  File "/usr/local/lib/python2.7/site-packages/tensorflow_estimator/python/estimator/estimator.py", line 1152, in _call_model_fn
    model_fn_results = self._model_fn(features=features, **kwargs)
  File "CNN.py", line 136, in cnn_model_fn
    activation = tf.nn.relu
  File "/usr/local/lib/python2.7/site-packages/tensorflow_core/python/layers/convolutional.py", line 314, in __init__
    name=name, **kwargs)
  File "/usr/local/lib/python2.7/site-packages/tensorflow_core/python/keras/layers/convolutional.py", line 527, in __init__
    **kwargs)
  File "/usr/local/lib/python2.7/site-packages/tensorflow_core/python/keras/layers/convolutional.py", line 122, in __init__
    **kwargs)
  File "/usr/local/lib/python2.7/site-packages/tensorflow_core/python/layers/base.py", line 213, in __init__
    **kwargs)
  File "/usr/local/lib/python2.7/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
    result = method(self, *args, **kwargs)
  File "/usr/local/lib/python2.7/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 186, in __init__
    generic_utils.validate_kwargs(kwargs, allowed_kwargs)
  File "/usr/local/lib/python2.7/site-packages/tensorflow_core/python/keras/utils/generic_utils.py", line 718, in validate_kwargs
    raise TypeError(error_message, kwarg)
TypeError: ('Keyword argument not understood:', 'inputs')

How can I fix this TypeError? I have installed tensorflow 2.1 and upgraded keras. Not sure if that would relate to this error - this seems like a syntax error.

Thanks! - Satya

like image 238
Satya Vejus Avatar asked Mar 15 '20 06:03

Satya Vejus


2 Answers

just remove the output = and input = In new update of TensorFlow u don't need to specify input output

like image 106
shubendu biswas Avatar answered Sep 25 '22 02:09

shubendu biswas


The error you are asking about (TypeError: ('Keyword argument not understood:', 'inputs')) is caused because you are capitalizing the conv2d function in your first convolutional layer. Change the following:

conv1 = tf.compat.v1.layers.Conv2D(
        inputs = input_layers ,
        filters = 50 ,
        kernel_size = [7 , 7],
        padding = 'same',
        activation = tf.nn.relu
        )

to:

conv1 = tf.compat.v1.layers.conv2d(
    inputs = input_layers ,
    filters = 50 ,
    kernel_size = [7 , 7],
    padding = 'same',
    activation = tf.nn.relu
    )

and the error will go away.

like image 31
Bashir Kazimi Avatar answered Sep 22 '22 02:09

Bashir Kazimi