-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrain_lstm.py
More file actions
252 lines (156 loc) · 6.61 KB
/
train_lstm.py
File metadata and controls
252 lines (156 loc) · 6.61 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
# -*- coding: utf-8 -*-
"""1.ipynb
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/1YjGx0dVQR6OdF7ZxfiLHKO2Zu9e9pcfM
"""
from google.colab import drive
drive.mount('/content/drive')
cd /content/drive/MyDrive/100_days_of_Deep_Learning/DL-PROJECTS-MISCLINOUS/Sentiment-analysis-IMDB
ls
!pip install kaggle
import os
import json
from zipfile import ZipFile
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow import keras
from keras.models import Sequential
from keras.layers import Dense, Embedding, LSTM
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
"""## **Setting the Enviourment variables**"""
kaggle_dictionary = json.load(open("kaggle.json"))
kaggle_dictionary.keys()
# setup kaggle credentials as environment variables
os.environ["KAGGLE_USERNAME"] = kaggle_dictionary["username"]
os.environ["KAGGLE_KEY"] = kaggle_dictionary["key"]
!kaggle datasets download -d lakshmi25npathi/imdb-dataset-of-50k-movie-reviews
ls
# unzip the dataset file
with ZipFile("imdb-dataset-of-50k-movie-reviews.zip", "r") as zip_ref:
zip_ref.extractall()
ls
"""## **Load the dataset**"""
data = pd.read_csv("IMDB Dataset.csv")
data.shape
data.head()
data.tail()
"""## **Data analysis & Pre-processing**"""
data["sentiment"].value_counts()
# getting some information about the data
data.info()
data.isnull().sum()
data.replace({"sentiment": {"positive": 1, "negative": 0}}, inplace=True)
data.head()
data["sentiment"].value_counts()
# Step 1: Calculate the number of words in each review and store in a NumPy array
review_word_counts = data['review'].apply(lambda x: len(x.split())).to_numpy()
# Step 2: Find the maximum number of words
max_word_count = review_word_counts.max()
print(f"The maximum number of words in a review is: {max_word_count}")
review_word_counts.shape
review_word_counts # this is the number of words in each review ...
from collections import Counter
# Step 3: Count the frequency of each number in the array
frequency_counter = Counter(review_word_counts)
print(frequency_counter)
# Step 4: Get the number of unique keys (unique word counts)
num_unique_word_counts = len(frequency_counter.keys())
print(f"The number of unique word counts is: {num_unique_word_counts}")
# Step 5: Verify the highest key in the frequency counter
max_key_in_counter = max(frequency_counter.keys())
print(f"The highest word count in the frequency counter is: {max_key_in_counter}")
# Step 6: Verify the lowest key in the frequency counter
min_key_in_counter = min(frequency_counter.keys())
print(f"The lowest word count in the frequency counter is: {min_key_in_counter}")
# Step 7: Find the key with the highest frequency
most_frequent_key = max(frequency_counter, key=frequency_counter.get)
most_frequent_key_count = frequency_counter[most_frequent_key]
print(f"The word count that appears most frequently is: {most_frequent_key} with a frequency of: {most_frequent_key_count}")
sns.set()
# Convert the frequency counter to a DataFrame for easier plotting
frequency_df = pd.DataFrame(frequency_counter.items(), columns=['word_count', 'frequency'])
# Plot the count plot
plt.figure(figsize=(34, 7))
sns.barplot(x='word_count', y='frequency', data=frequency_df)
plt.title('Frequency of Word Counts in Reviews')
plt.xlabel('Number of Words')
plt.ylabel('Frequency')
plt.show()
# split data into training data and test data
train_data, test_data = train_test_split(data, test_size=0.2, random_state=42)
print(train_data.shape)
print(test_data.shape)
# Tokenize text data
tokenizer = Tokenizer(num_words=5000) # this will take only most frequent 5000 words
tokenizer.fit_on_texts(train_data["review"]) # fit on train data
X_train = pad_sequences(tokenizer.texts_to_sequences(train_data["review"]), maxlen=200) # transform on train data
X_test = pad_sequences(tokenizer.texts_to_sequences(test_data["review"]), maxlen=200) # transform on test data
print(X_train)
print(X_test)
Y_train = train_data["sentiment"]
Y_test = test_data["sentiment"]
print(Y_train)
print(Y_test)
"""## **Model-Architecture & Model Evaluation**"""
# build the model
model = Sequential()
model.add(Embedding(input_dim=5000, output_dim=128, input_length=200))
model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(1, activation="sigmoid"))
model.summary()
# compile the model
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
history = model.fit(X_train, Y_train, epochs=5, batch_size=64, validation_split=0.2)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['training data', 'validation data'], loc = 'upper right')
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['training data', 'validation data'], loc = 'lower right')
# testing on test data ...
loss, accuracy = model.evaluate(X_test, Y_test)
print(f"Test Loss: {loss}")
print(f"Test Accuracy: {accuracy}")
"""# **Accuracy Score**"""
# make predictions ...
y_log = model.predict(X_test)
# the predicted values in not in 0 1 form , because we use sigmoid function it returns a probability, so we have to convert it into 0 1 using some threshold value ..
y_pred = np.where(y_log>0.5,1,0)
from sklearn.metrics import accuracy_score
accuracy_score(Y_test,y_pred)
"""## **Building a Predictive System**"""
def predict_sentiment(review):
# tokenize and pad the review
sequence = tokenizer.texts_to_sequences([review])
padded_sequence = pad_sequences(sequence, maxlen=200)
prediction = model.predict(padded_sequence)
sentiment = "positive" if prediction[0][0] > 0.5 else "negative"
return sentiment
# example usage
new_review = "This movie was fantastic. I loved it."
sentiment = predict_sentiment(new_review)
print(f"The sentiment of the review is: {sentiment}")
# example usage
new_review = "This movie was not that good"
sentiment = predict_sentiment(new_review)
print(f"The sentiment of the review is: {sentiment}")
# example usage
new_review = "This movie was ok but not that good."
sentiment = predict_sentiment(new_review)
print(f"The sentiment of the review is: {sentiment}")
# example usage
new_review = "This movie sound was not good, but the way of represent the story was better,but cast not that good, so overall it is good."
sentiment = predict_sentiment(new_review)
print(f"The sentiment of the review is: {sentiment}")