I am trying to learn LSTM model for sentiment analysis using Tensorflow, I have gone through the LSTM model.
Following code (create_sentiment_featuresets.py) generates the lexicon from 5000 positive sentences and 5000 negative sentences.
import nltk
from nltk.tokenize import word_tokenize
import numpy as np
import random
from collections import Counter
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
def create_lexicon(pos, neg):
lexicon = []
with open(pos, 'r') as f:
contents = f.readlines()
for l in contents[:len(contents)]:
l= l.decode('utf-8')
all_words = word_tokenize(l)
lexicon += list(all_words)
f.close()
with open(neg, 'r') as f:
contents = f.readlines()
for l in contents[:len(contents)]:
l= l.decode('utf-8')
all_words = word_tokenize(l)
lexicon += list(all_words)
f.close()
lexicon = [lemmatizer.lemmatize(i) for i in lexicon]
w_counts = Counter(lexicon)
l2 = []
for w in w_counts:
if 1000 > w_counts[w] > 50:
l2.append(w)
print("Lexicon length create_lexicon: ",len(lexicon))
return l2
def sample_handling(sample, lexicon, classification):
featureset = []
print("Lexicon length Sample handling: ",len(lexicon))
with open(sample, 'r') as f:
contents = f.readlines()
for l in contents[:len(contents)]:
l= l.decode('utf-8')
current_words = word_tokenize(l.lower())
current_words= [lemmatizer.lemmatize(i) for i in current_words]
features = np.zeros(len(lexicon))
for word in current_words:
if word.lower() in lexicon:
index_value = lexicon.index(word.lower())
features[index_value] +=1
features = list(features)
featureset.append([features, classification])
f.close()
print("Feature SET------")
print(len(featureset))
return featureset
def create_feature_sets_and_labels(pos, neg, test_size = 0.1):
global m_lexicon
m_lexicon = create_lexicon(pos, neg)
features = []
features += sample_handling(pos, m_lexicon, [1,0])
features += sample_handling(neg, m_lexicon, [0,1])
random.shuffle(features)
features = np.array(features)
testing_size = int(test_size * len(features))
train_x = list(features[:,0][:-testing_size])
train_y = list(features[:,1][:-testing_size])
test_x = list(features[:,0][-testing_size:])
test_y = list(features[:,1][-testing_size:])
return train_x, train_y, test_x, test_y
def get_lexicon():
global m_lexicon
return m_lexicon
The following code (sentiment_analysis.py) is for sentiment analysis using simple neural network model and is working fine
from create_sentiment_featuresets import create_feature_sets_and_labels
from create_sentiment_featuresets import get_lexicon
import tensorflow as tf
import numpy as np
# extras for testing
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
#- end extras
train_x, train_y, test_x, test_y = create_feature_sets_and_labels('pos.txt', 'neg.txt')
# pt A-------------
n_nodes_hl1 = 1500
n_nodes_hl2 = 1500
n_nodes_hl3 = 1500
n_classes = 2
batch_size = 100
hm_epochs = 10
x = tf.placeholder(tf.float32)
y = tf.placeholder(tf.float32)
hidden_1_layer = {'f_fum': n_nodes_hl1,
'weight': tf.Variable(tf.random_normal([len(train_x[0]), n_nodes_hl1])),
'bias': tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_2_layer = {'f_fum': n_nodes_hl2,
'weight': tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'bias': tf.Variable(tf.random_normal([n_nodes_hl2]))}
hidden_3_layer = {'f_fum': n_nodes_hl3,
'weight': tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
'bias': tf.Variable(tf.random_normal([n_nodes_hl3]))}
output_layer = {'f_fum': None,
'weight': tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),
'bias': tf.Variable(tf.random_normal([n_classes]))}
def nueral_network_model(data):
l1 = tf.add(tf.matmul(data, hidden_1_layer['weight']), hidden_1_layer['bias'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1, hidden_2_layer['weight']), hidden_2_layer['bias'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2, hidden_3_layer['weight']), hidden_3_layer['bias'])
l3 = tf.nn.relu(l3)
output = tf.matmul(l3, output_layer['weight']) + output_layer['bias']
return output
# pt B--------------
def train_neural_network(x):
prediction = nueral_network_model(x)
cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits= prediction, labels= y))
optimizer = tf.train.AdamOptimizer(learning_rate= 0.001).minimize(cost)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(hm_epochs):
epoch_loss = 0
i = 0
while i < len(train_x):
start = i
end = i+ batch_size
batch_x = np.array(train_x[start: end])
batch_y = np.array(train_y[start: end])
_, c = sess.run([optimizer, cost], feed_dict= {x: batch_x, y: batch_y})
epoch_loss += c
i+= batch_size
print('Epoch', epoch+ 1, 'completed out of ', hm_epochs, 'loss:', epoch_loss)
correct= tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:', accuracy.eval({x:test_x, y:test_y}))
# testing --------------
m_lexicon= get_lexicon()
print('Lexicon length: ',len(m_lexicon))
input_data= "David likes to go out with Kary"
current_words= word_tokenize(input_data.lower())
current_words = [lemmatizer.lemmatize(i) for i in current_words]
features = np.zeros(len(m_lexicon))
for word in current_words:
if word.lower() in m_lexicon:
index_value = m_lexicon.index(word.lower())
features[index_value] +=1
features = np.array(list(features)).reshape(1,-1)
print('features length: ',len(features))
result = sess.run(tf.argmax(prediction.eval(feed_dict={x:features}), 1))
print(prediction.eval(feed_dict={x:features}))
if result[0] == 0:
print('Positive: ', input_data)
elif result[0] == 1:
print('Negative: ', input_data)
train_neural_network(x)
I am trying to modify the above (sentiment_analysis.py) for LSTM model after reading the RNN w/ LSTM cell example in TensorFlow and Python which is for LSTM on mnist image dataset:
Some how through many hit and run trails, I was able to get the below running code (sentiment_demo_lstm.py) :
import tensorflow as tf
from tensorflow.contrib import rnn
from create_sentiment_featuresets import create_feature_sets_and_labels
from create_sentiment_featuresets import get_lexicon
import numpy as np
# extras for testing
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
#- end extras
train_x, train_y, test_x, test_y = create_feature_sets_and_labels('pos.txt', 'neg.txt')
n_steps= 100
input_vec_size= len(train_x[0])
hm_epochs = 8
n_classes = 2
batch_size = 128
n_hidden = 128
x = tf.placeholder('float', [None, input_vec_size, 1])
y = tf.placeholder('float')
def recurrent_neural_network(x):
layer = {'weights': tf.Variable(tf.random_normal([n_hidden, n_classes])), # hidden_layer, n_classes
'biases': tf.Variable(tf.random_normal([n_classes]))}
h_layer = {'weights': tf.Variable(tf.random_normal([1, n_hidden])), # hidden_layer, n_classes
'biases': tf.Variable(tf.random_normal([n_hidden], mean = 1.0))}
x = tf.transpose(x, [1,0,2])
x = tf.reshape(x, [-1, 1])
x= tf.nn.relu(tf.matmul(x, h_layer['weights']) + h_layer['biases'])
x = tf.split(x, input_vec_size, 0)
lstm_cell = rnn.BasicLSTMCell(n_hidden, state_is_tuple=True)
outputs, states = rnn.static_rnn(lstm_cell, x, dtype= tf.float32)
output = tf.matmul(outputs[-1], layer['weights']) + layer['biases']
return output
def train_neural_network(x):
prediction = recurrent_neural_network(x)
cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits= prediction, labels= y))
optimizer = tf.train.AdamOptimizer(learning_rate= 0.001).minimize(cost)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(hm_epochs):
epoch_loss = 0
i = 0
while (i+ batch_size) < len(train_x):
start = i
end = i+ batch_size
batch_x = np.array(train_x[start: end])
batch_y = np.array(train_y[start: end])
batch_x = batch_x.reshape(batch_size ,input_vec_size, 1)
_, c = sess.run([optimizer, cost], feed_dict= {x: batch_x, y: batch_y})
epoch_loss += c
i+= batch_size
print('--------Epoch', epoch+ 1, 'completed out of ', hm_epochs, 'loss:', epoch_loss)
correct= tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:', accuracy.eval({x:np.array(test_x).reshape(-1, input_vec_size, 1), y:test_y}))
# testing --------------
m_lexicon= get_lexicon()
print('Lexicon length: ',len(m_lexicon))
input_data= "Mary does not like pizza" #"he seems to to be healthy today" #"David likes to go out with Kary"
current_words= word_tokenize(input_data.lower())
current_words = [lemmatizer.lemmatize(i) for i in current_words]
features = np.zeros(len(m_lexicon))
for word in current_words:
if word.lower() in m_lexicon:
index_value = m_lexicon.index(word.lower())
features[index_value] +=1
features = np.array(list(features)).reshape(-1, input_vec_size, 1)
print('features length: ',len(features))
result = sess.run(tf.argmax(prediction.eval(feed_dict={x:features}), 1))
print('RESULT: ', result)
print(prediction.eval(feed_dict={x:features}))
if result[0] == 0:
print('Positive: ', input_data)
elif result[0] == 1:
print('Negative: ', input_data)
train_neural_network(x)
Output of
print(train_x[0])
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
print(train_y[0])
[0, 1]
len(train_x)= 9596
, len(train_x[0]) = 423
meaning train_x
is a list of 9596x423 ?
Tough I have a running code now, I still have lots of doubts.
In sentiment_demo_lstm, I am not able to understand the following part
x = tf.transpose(x, [1,0,2])
x = tf.reshape(x, [-1, 1])
x = tf.split(x, input_vec_size, 0)
I have print the following shapes:
x = tf.placeholder('float', [None, input_vec_size, 1]) ==> TensorShape([Dimension(None), Dimension(423), Dimension(1)]))
x = tf.transpose(x, [1,0,2]) ==> TensorShape([Dimension(423), Dimension(None), Dimension(1)]))
x = tf.reshape(x, [-1, 1]) ==> TensorShape([Dimension(None), Dimension(1)]))
x = tf.split(x, input_vec_size, 0) ==> ?
Here I took the number of hidden layers as 128, does it need to be same as the number of inputs i.e. len(train_x)= 9596
The value 1 in
x = tf.placeholder('float', [None, input_vec_size, 1])
and
x = tf.reshape(x, [-1, 1])
is because train_x[0]
is 428x1 ?
The following is in order to match the placeholder
batch_x = np.array(train_x[start: end]) ==> (128, 423)
batch_x = batch_x.reshape(batch_size ,input_vec_size, 1) ==> (128, 423, 1)
x = tf.placeholder('float', [None, input_vec_size, 1])
dimensions, right?
If I modified the code:
while (i+ batch_size) < len(train_x):
as
while i < len(train_x):
I get the following error:
Traceback (most recent call last):
File "sentiment_demo_lstm.py", line 131, in <module>
train_neural_network(x)
File "sentiment_demo_lstm.py", line 86, in train_neural_network
batch_x = batch_x.reshape(batch_size ,input_vec_size, 1)
ValueError: cannot reshape array of size 52452 into shape (128,423,1)
=> I can't include the last 124 records/feature-sets while training?
Long Short Term Memory is also known as LSTM that was introduced by Hocheriter & Schmindhuber in 1997. LSTM is a type of RNN network that can grasp long term dependence. They are widely used today for a variety of different tasks like speech recognition, text classification, sentimental analysis, etc.
Long Short- Term Memory (LSTM) networks are a modified version of recurrent neural networks, which makes it easier to remember past data in memory. 1. Input gate- It discover which value from input should be used to modify the memory. Sigmoid function decides which values to let through 0 or 1.
Having a good hold over memorizing certain patterns LSTMs perform fairly better. As with every other NN, LSTM can have multiple hidden layers and as it passes through every layer, the relevant information is kept and all the irrelevant information gets discarded in every single cell.
Long short-term memory (LSTM) is an artificial neural network used in the fields of artificial intelligence and deep learning. Unlike standard feedforward neural networks, LSTM has feedback connections.
This is loaded question. Let me try to put it in simple English hiding all the complicated inner details:
A simple Unrolled LSTM model with 3 steps is shown below. Each LSTM cell takes an input vector and the hidden output vector of the previous LSTM cell and produces an output vector and the hidden output for the next LSTM cell.
A concise representation of the same model is shown below.
LSTM models are sequence to sequence models, i.e, they are used for problems when a sequence has to be labeled with an another sequence, like POS tagging or NER tagging of each word in a sentence.
You seem to be using it for classification problem. There are two possible ways to use LSTM model for classification
1) Take the output of all the states (O1, O2 and O3 in our example) and apply a softmax layer with softmax layer output size being equal to number of classes (2 in your case)
2) Take the output of the last state (O3) and apply a softmax layer to it. (This is what you are doing in your cod. outputs[-1] return the last row in the outputs)
So we back propagate (Backpropagation Through Time - BTT) on the error of the softmax output.
Coming to the implementation using Tensorflow, lets see what is the input and output to the LSTM model.
Each LSTM takes an input, but we have 3 such LSTM cells, So the input (X placeholder) should be of size (inputsize * time steps). But we don't calculate error for single input and BTT for it, but instead we do it on a batch of input - output combinations. So the Input of LSTM will be (batchsize * inputsize * time steps).
A LSTM cells is defined with the size of hidden state. The size of output and the hidden output vector of the LSTM cell will be same as the size of the hidden states (Check LSTM internal calcuations for why!). We then define an LSTM Model using a list of these LSTM cells where the size of the list will be equal to the number of unrolling of the model. So we define the number of unrolling to be done and the size of input during each unrolling.
I have skipped lots of things like how to handle variable length sequence, sequence to sequence error calcuations, How LSTM calcuates output and hidden output etc.
Coming to your implementation, you are applying a relu layer before the input of each LSTM cell. I dont understand why you are doing that but I guess you are doing it to map your input size to that of the LSTM input size.
Coming to your questions:
lets say input_vec_size = 3
You are passing a ndarray of size [128 * 3 * 1]
x = tf.transpose(x, [1,0,2]) --> [3*128*1]
x = tf.reshape(x, [-1, 1]) --> [384*1]
h_layer['weights'] --> [1, 128]
x= tf.nn.relu(tf.matmul(x, h_layer['weights']) + h_layer['biases']) --> [384 * 128]
No input size are hidden size are different. LSTM does a set of operations on the input and previous hidden output and given an output and next hidden output both of which are of size hidden size.
x = tf.placeholder('float', [None, input_vec_size, 1])
It defines a tensor or ndarray or variable number of rows, each rows has input_vec_size columns an and each value is a single value vector.
x = tf.reshape(x, [-1, 1]) --> reshapes the input x into a matrix of size fixed to 1 column and any number of rows.
batch_x.reshape will fail if number of values in batch_x != batch_size*input_vec_size*1. This might be the case for last batch because len(train_x) might not be a multiple of batch_size resulting in the non fully filled last batch.
You can avoid this problem by using
batch_x = batch_x.reshape(-1 ,input_vec_size, 1)
But I am still not sure why you are using Relu in front of the input layer.
You are applying logistic regression at the output of the last cell which is fine.
You can look my toy example which is a classifier using bidirectional LSTM for classifying if a sequence is increasing or decreasing or mixed.
Toy sequence_classifier using LSTM in Tensorflow
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With