I am using TensorFlow to build a simple feed-forward neural network, and I am using variable size batches. I am not using the GPU, I have 8GB RAM, and running on Python 3.5.2.
My problem is that I have some batches that are too big and are generating the typical out of memory error. I understand that, it is not a problem. However, if I use Keras with TF backend I don't have that issue. I have built an example (with fixed size batches) bellow that illustrates this.
Is there a problem with my implementation? How should I handle batches that are too big?
import numpy as np
import tensorflow as tf
n_observations = 100000
n_input = 6
batch_size = 20000
X = np.random.rand(n_observations, n_input)
Y = X[:,0] ** 3 + X[:,1] ** 2 + X[:,2] + X[:,3] + X[:,4] + X[:,5]+ np.random.rand(n_observations)
n_hidden = 16
n_output = 1
def generatebatch(n_observations, batch_size):
for batch_i in range(n_observations // batch_size):
start = batch_i*batch_size
end = start + batch_size
batch_xs = X[start:end, :]
batch_ys = Y[start:end]
yield batch_xs, batch_ys
with tf.Session() as sess:
# placeholders for input and target
net_input = tf.placeholder(tf.float32, [None, n_input])
y_true = tf.placeholder(tf.float32)
# Hidden Layer
W1 = tf.Variable(tf.random_normal([n_input, n_hidden]))
b1 = tf.Variable(tf.random_normal([n_hidden]))
net_output1 = tf.nn.relu(tf.matmul(net_input, W1) + b1)
# Yet another Hidden Layer
yaW1 = tf.Variable(tf.random_normal([n_hidden, n_hidden]))
yab1 = tf.Variable(tf.random_normal([n_hidden]))
yanet_output1 = tf.nn.relu(tf.matmul(net_output1, yaW1) + yab1)
# Output Layer
W2 = tf.Variable(tf.random_normal([n_hidden, n_output]))
b2 = tf.Variable(tf.random_normal([n_output]))
net_output2 = tf.nn.relu(tf.matmul(yanet_output1, W2) + b2)
# The loss function
cost = tf.reduce_mean(tf.pow(y_true - net_output2, 2))
# Configure the optimizer
optimizer = tf.train.AdamOptimizer().minimize(cost)
# Initialize variables
sess.run(tf.global_variables_initializer())
n_epochs = 100
for epoch_i in range(n_epochs):
batchloss = []
for batch_xs, batch_ys in generatebatch(n_observations, batch_size):
_, loss = sess.run(
[optimizer, cost],
feed_dict={
net_input: batch_xs,
y_true: batch_ys
})
batchloss.append(loss)
print(np.mean(batchloss))
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
import logging
#just to hide the deprecation warnings
logging.basicConfig(level=logging.CRITICAL)
n_input = 6
n_observations = 100000
n_hidden = 16
n_epochs = 10
batch_size = 35000
# input data
X = np.random.rand(n_observations, n_input)
Y = X[:,0] ** 3 + X[:,1] ** 2 + X[:,2] + X[:,3] + X[:,4] + X[:,5]+ np.random.rand(n_observations)
# create and fit Multilayer Perceptron model
model = Sequential()
model.add(Dense(n_hidden, input_dim=n_input, activation='relu'))
model.add(Dense(n_hidden, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='mse', optimizer='adam')
model.fit(X, Y, nb_epoch=n_epochs, batch_size=batch_size, verbose=1)
Your Y
has incorrect shape, maybe causing tensorflow to infer shape of tensors incorrectly ((20000, 20000) instead of (20000, 6), for example), consuming a lot of memory.
Y = np.reshape(Y, [n_observations, 1])
Thus your placeholder should have the SAME shape:
net_input = tf.placeholder(tf.float32, shape=[None, n_input])
y_true = tf.placeholder(tf.float32, shape=[None, 1])
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With