Logo Questions Linux Laravel Mysql Ubuntu Git Menu
 

TypeError: Expected float32 passed to parameter 'y' of op 'Equal', got 'auto' of type 'str' instead

I am making a neural network to predict audio data (to learn more about how neural networks function and how to use tensorflow), and everything is going pretty smoothly so far, with one exception. I've looked around quite a bit to solve this issue and haven't been able to find anything specific enough to help me. I set up the dataset and model and those work fine, but for some reason when I try to train the model, it gives me a type error, even though all of the values in the dataset are 32 bit floats. It'd be much appreciated if someone could answer this for me, or at least push in the right direction to figuring this out. Code and console outputs are below. (BTW all values in dataset are between 0 and 1, I don't know if that's relevant but I thought I'd add that in)

EDIT: I've included the AudioHandler class as well, which you can use to reproduce the error. get_audio_array or get_audio_arrays can be used to convert a single mp3 or a directory of mp3s into array(s) of the audio data. You can also use dataset_from_arrays to generate a dataset from the audio arrays created with dataset_from_arrays.

from AudioHandler import AudioHandler
import os

seq_length = 22050
BATCH_SIZE = 64
BUFFER_SIZE = 10000

audio_arrays = AudioHandler.get_audio_arrays("AudioDataset", normalized=True)

dataset = AudioHandler.dataset_from_arrays(audio_arrays, seq_length, BATCH_SIZE, buffer_size=BUFFER_SIZE)

print(dataset)

rnn_units = 256

def build_model(rnn_units, batch_size):
    model = tf.keras.Sequential([
        tf.keras.layers.InputLayer(batch_input_shape=(batch_size, None, 2)),

        tf.keras.layers.GRU(rnn_units, return_sequences=True, stateful=True),

        tf.keras.layers.Dense(2)
    ])
    return model


model = build_model(rnn_units, BATCH_SIZE)

model.summary()

model.compile(optimizer='adam', loss=tf.keras.losses.MeanSquaredError)

EPOCHS = 10

history = model.fit(dataset, epochs=EPOCHS)
from pydub import AudioSegment
import numpy as np
from pathlib import Path
from tensorflow import data
import os


class AudioHandler:

    @staticmethod
    def print_audio_info(file_name):
        audio_segment = AudioSegment.from_file(file_name)
        print("Information of '" + file_name + "':")
        print("Sample rate: " + str(audio_segment.frame_rate) + "kHz")
        # Multiply frame_width by 8 to get bits, since it is given in bytes
        print("Sample width: " + str(audio_segment.frame_width * 8) + " bits per sample (" + str(
            int(audio_segment.frame_width * 8 / audio_segment.channels)) + " bits per channel)")
        print("Channels: " + str(audio_segment.channels))

    @staticmethod
    def get_audio_array(file_name, normalized=True):
        audio_segment = AudioSegment.from_file(file_name)
        # Get bytestring of raw audio data
        raw_audio_bytestring = audio_segment.raw_data
        # Adjust sample width to accommodate multiple channels in each sample
        sample_width = audio_segment.frame_width / audio_segment.channels
        # Convert bytestring to numpy array
        if sample_width == 1:
            raw_audio = np.array(np.frombuffer(raw_audio_bytestring, dtype=np.int8))
        elif sample_width == 2:
            raw_audio = np.array(np.frombuffer(raw_audio_bytestring, dtype=np.int16))
        elif sample_width == 4:
            raw_audio = np.array(np.frombuffer(raw_audio_bytestring, dtype=np.int32))
        else:
            raw_audio = np.array(np.frombuffer(raw_audio_bytestring, dtype=np.int16))
        # Normalize the audio data
        if normalized:
            # Cast the audio data as 32 bit floats
            raw_audio = raw_audio.astype(dtype=np.float32)
            # Make all values positive
            raw_audio += np.power(2, 8*sample_width)/2
            # Normalize all values between 0 and 1
            raw_audio *= 1/np.power(2, 8*sample_width)
        # Reshape the array to accommodate multiple channels
        if audio_segment.channels > 1:
            raw_audio = raw_audio.reshape((-1, audio_segment.channels))

        return raw_audio

    @staticmethod
    # Return an array of all audio files in directory, as arrays of audio data
    def get_audio_arrays(directory, filetype='mp3', normalized=True):

        file_count_total = len([name for name in os.listdir(directory) if os.path.isfile(os.path.join(directory, name))]) - 1

        audio_arrays = []
        # Iterate through all audio files
        pathlist = Path(directory).glob('**/*.' + filetype)
        # Keep track of progress
        file_count = 0
        print("Loading audio files... 0%")
        for path in pathlist:
            path_string = str(path)
            audio_array = AudioHandler.get_audio_array(path_string, normalized=normalized)
            audio_arrays.append(audio_array)
            # Update Progress
            file_count += 1
            print('Loading audio files... ' + str(int(file_count/file_count_total*100)) + '%')

        return audio_arrays

    @staticmethod
    def export_to_file(audio_data_array, file_name, normalized=True, file_type="mp3", bitrate="256k"):
        if normalized:
            audio_data_array *= np.power(2, 16)
            audio_data_array -= np.power(2, 16)/2
        audio_data_array = audio_data_array.astype(np.int16)
        audio_data_array = audio_data_array.reshape((1, -1))[0]
        raw_audio = audio_data_array.tostring()
        audio_segment = AudioSegment(data=raw_audio, sample_width=2, frame_rate=44100, channels=2)
        audio_segment.export(file_name, format=file_type, bitrate=bitrate)

    # Splits a sequence into input values and target values
    @staticmethod
    def __split_input_target(chunk):
        input_audio = chunk[:-1]
        target_audio = chunk[1:]
        return input_audio, target_audio

    @staticmethod
    def dataset_from_arrays(audio_arrays, sequence_length, batch_size, buffer_size=10000):
        # Create main data set, starting with first audio array
        dataset = data.Dataset.from_tensor_slices(audio_arrays[0])
        dataset = dataset.batch(sequence_length + 1, drop_remainder=True)
        # Split each audio array into sequences individually,
        # then concatenate each individual data set with the main data set
        for i in range(1, len(audio_arrays)):
            audio_data = audio_arrays[i]
            tensor_slices = data.Dataset.from_tensor_slices(audio_data)
            audio_dataset = tensor_slices.batch(sequence_length + 1, drop_remainder=True)
            dataset.concatenate(audio_dataset)

        dataset = dataset.map(AudioHandler.__split_input_target)

        dataset = dataset.shuffle(buffer_size).batch(batch_size, drop_remainder=True)

        return dataset
Loading audio files... 0%
Loading audio files... 25%
Loading audio files... 50%
Loading audio files... 75%
Loading audio files... 100%
2020-06-21 00:20:10.796993: I tensorflow/core/platform/cpu_feature_guard.cc:143] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
2020-06-21 00:20:10.811357: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x7fddb7b23fd0 initialized for platform Host (this does not guarantee that XLA will be used). Devices:
2020-06-21 00:20:10.811368: I tensorflow/compiler/xla/service/service.cc:176]   StreamExecutor device (0): Host, Default Version
<BatchDataset shapes: ((64, 22050, 2), (64, 22050, 2)), types: (tf.float32, tf.float32)>
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
gru (GRU)                    (64, None, 256)           199680    
_________________________________________________________________
dense (Dense)                (64, None, 2)             514       
=================================================================
Total params: 200,194
Trainable params: 200,194
Non-trainable params: 0
_________________________________________________________________
Epoch 1/10
Traceback (most recent call last):
  File "/Users/anonteau/Desktop/Development/Python/Lo-FiGenerator/RNN.py", line 57, in <module>
    history = model.fit(dataset, epochs=EPOCHS)
  File "/Users/anonteau/Desktop/Development/Python/Lo-FiGenerator/venv/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py", line 66, in _method_wrapper
    return method(self, *args, **kwargs)
  File "/Users/anonteau/Desktop/Development/Python/Lo-FiGenerator/venv/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py", line 848, in fit
    tmp_logs = train_function(iterator)
  File "/Users/anonteau/Desktop/Development/Python/Lo-FiGenerator/venv/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py", line 580, in __call__
    result = self._call(*args, **kwds)
  File "/Users/anonteau/Desktop/Development/Python/Lo-FiGenerator/venv/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py", line 627, in _call
    self._initialize(args, kwds, add_initializers_to=initializers)
  File "/Users/anonteau/Desktop/Development/Python/Lo-FiGenerator/venv/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py", line 506, in _initialize
    *args, **kwds))
  File "/Users/anonteau/Desktop/Development/Python/Lo-FiGenerator/venv/lib/python3.7/site-packages/tensorflow/python/eager/function.py", line 2446, in _get_concrete_function_internal_garbage_collected
    graph_function, _, _ = self._maybe_define_function(args, kwargs)
  File "/Users/anonteau/Desktop/Development/Python/Lo-FiGenerator/venv/lib/python3.7/site-packages/tensorflow/python/eager/function.py", line 2777, in _maybe_define_function
    graph_function = self._create_graph_function(args, kwargs)
  File "/Users/anonteau/Desktop/Development/Python/Lo-FiGenerator/venv/lib/python3.7/site-packages/tensorflow/python/eager/function.py", line 2667, in _create_graph_function
    capture_by_value=self._capture_by_value),
  File "/Users/anonteau/Desktop/Development/Python/Lo-FiGenerator/venv/lib/python3.7/site-packages/tensorflow/python/framework/func_graph.py", line 981, in func_graph_from_py_func
    func_outputs = python_func(*func_args, **func_kwargs)
  File "/Users/anonteau/Desktop/Development/Python/Lo-FiGenerator/venv/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py", line 441, in wrapped_fn
    return weak_wrapped_fn().__wrapped__(*args, **kwds)
  File "/Users/anonteau/Desktop/Development/Python/Lo-FiGenerator/venv/lib/python3.7/site-packages/tensorflow/python/framework/func_graph.py", line 968, in wrapper
    raise e.ag_error_metadata.to_exception(e)
TypeError: in user code:

    /Users/anonteau/Desktop/Development/Python/Lo-FiGenerator/venv/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py:571 train_function  *
        outputs = self.distribute_strategy.run(
    /Users/anonteau/Desktop/Development/Python/Lo-FiGenerator/venv/lib/python3.7/site-packages/tensorflow/python/distribute/distribute_lib.py:951 run  **
        return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
    /Users/anonteau/Desktop/Development/Python/Lo-FiGenerator/venv/lib/python3.7/site-packages/tensorflow/python/distribute/distribute_lib.py:2290 call_for_each_replica
        return self._call_for_each_replica(fn, args, kwargs)
    /Users/anonteau/Desktop/Development/Python/Lo-FiGenerator/venv/lib/python3.7/site-packages/tensorflow/python/distribute/distribute_lib.py:2649 _call_for_each_replica
        return fn(*args, **kwargs)
    /Users/anonteau/Desktop/Development/Python/Lo-FiGenerator/venv/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py:533 train_step  **
        y, y_pred, sample_weight, regularization_losses=self.losses)
    /Users/anonteau/Desktop/Development/Python/Lo-FiGenerator/venv/lib/python3.7/site-packages/tensorflow/python/keras/engine/compile_utils.py:205 __call__
        loss_value = loss_obj(y_t, y_p, sample_weight=sw)
    /Users/anonteau/Desktop/Development/Python/Lo-FiGenerator/venv/lib/python3.7/site-packages/tensorflow/python/keras/losses.py:143 __call__
        losses = self.call(y_true, y_pred)
    /Users/anonteau/Desktop/Development/Python/Lo-FiGenerator/venv/lib/python3.7/site-packages/tensorflow/python/keras/losses.py:246 call
        return self.fn(y_true, y_pred, **self._fn_kwargs)
    /Users/anonteau/Desktop/Development/Python/Lo-FiGenerator/venv/lib/python3.7/site-packages/tensorflow/python/keras/losses.py:313 __init__
        mean_squared_error, name=name, reduction=reduction)
    /Users/anonteau/Desktop/Development/Python/Lo-FiGenerator/venv/lib/python3.7/site-packages/tensorflow/python/keras/losses.py:229 __init__
        super(LossFunctionWrapper, self).__init__(reduction=reduction, name=name)
    /Users/anonteau/Desktop/Development/Python/Lo-FiGenerator/venv/lib/python3.7/site-packages/tensorflow/python/keras/losses.py:94 __init__
        losses_utils.ReductionV2.validate(reduction)
    /Users/anonteau/Desktop/Development/Python/Lo-FiGenerator/venv/lib/python3.7/site-packages/tensorflow/python/ops/losses/loss_reduction.py:67 validate
        if key not in cls.all():
    /Users/anonteau/Desktop/Development/Python/Lo-FiGenerator/venv/lib/python3.7/site-packages/tensorflow/python/ops/math_ops.py:1491 tensor_equals
        return gen_math_ops.equal(self, other, incompatible_shape_error=False)
    /Users/anonteau/Desktop/Development/Python/Lo-FiGenerator/venv/lib/python3.7/site-packages/tensorflow/python/ops/gen_math_ops.py:3224 equal
        name=name)
    /Users/anonteau/Desktop/Development/Python/Lo-FiGenerator/venv/lib/python3.7/site-packages/tensorflow/python/framework/op_def_library.py:479 _apply_op_helper
        repr(values), type(values).__name__, err))

    TypeError: Expected float32 passed to parameter 'y' of op 'Equal', got 'auto' of type 'str' instead. Error: Expected float32, got 'auto' of type 'str' instead.```
like image 696
notpublic Avatar asked Jun 21 '20 04:06

notpublic


3 Answers

Try changing

model.compile(optimizer='adam', loss=tf.keras.losses.MeanSquaredError)

to

model.compile(optimizer='adam', loss=tf.keras.losses.MeanSquaredError())
like image 197
Sourcerer Avatar answered Oct 20 '22 18:10

Sourcerer


I was compiling my model:

model.compile(optimizer='Adam', loss=tf.losses.CosineSimilarity,
metrics= ['accuracy'])

Then I got the same error. I changed my code as follow:

model.compile(optimizer='Adam', loss= tf.losses.CosineSimilarity(),
metrics= ['accuracy'])

It worked.

like image 37
Faeze Zps Avatar answered Oct 20 '22 16:10

Faeze Zps


I had a similar issue while I was compiling my model with the following code:

model.compile(loss=tf.keras.losses.MeanSquaredError, optimizer='Adam')

I changed it to the following and it worked. Thanks to https://stackoverflow.com/users/7349864/sourcerer

model.compile(loss=tf.keras.losses.MeanSquaredError(), optimizer='Adam')
like image 1
Parvez Nadvi Avatar answered Oct 20 '22 16:10

Parvez Nadvi