Commit c0774f4f authored by Ankush's avatar Ankush

DLNLP assignment addition

parent 8a7e507f
{
"cells": [
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"# reading dataset\n",
"train_data = pd.read_csv(\"train.csv\", index_col=0)#.iloc[:100,:]\n",
"test_data = pd.read_csv(\"test.csv\", index_col=0)#.iloc[:1000,:]\n",
"\n",
"# separate reviews, ratings\n",
"train_reviews = train_data.iloc[:, :-1]\n",
"train_ratings = train_data.iloc[:, -1]\n",
"train_ratings = pd.get_dummies(train_ratings)\n",
"\n",
"\n",
"batch_size, epochs = 32, 10 \n",
"tokenizer = Tokenizer(oov_token = '<oov>')\n",
"\n",
"# pre-process data\n",
"train_reviews, tokenizer = preprocess_data(tokenizer, train_data, preprocessing_training_data=True)\n",
"test_reviews, tokenizer = preprocess_data(tokenizer, test_data, maxlen=train_reviews.shape[1])\n"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Reviews Shape: (50000, 31)\n",
"Ratings Shape: (50000, 5)\n",
"Epoch 1/10\n",
"1563/1563 [==============================] - 8s 5ms/step - loss: 317.8222 - accuracy: 0.4365\n",
"Epoch 2/10\n",
"1563/1563 [==============================] - 10s 6ms/step - loss: 9.7619 - accuracy: 0.5061\n",
"Epoch 3/10\n",
"1563/1563 [==============================] - 9s 6ms/step - loss: 6.6692 - accuracy: 0.5140\n",
"Epoch 4/10\n",
"1563/1563 [==============================] - 10s 6ms/step - loss: 6.4623 - accuracy: 0.5139\n",
"Epoch 5/10\n",
"1563/1563 [==============================] - 9s 6ms/step - loss: 6.3933 - accuracy: 0.5150\n",
"Epoch 6/10\n",
"1563/1563 [==============================] - 9s 6ms/step - loss: 6.2997 - accuracy: 0.5213\n",
"Epoch 7/10\n",
"1563/1563 [==============================] - 8s 5ms/step - loss: 6.2729 - accuracy: 0.5209\n",
"Epoch 8/10\n",
"1563/1563 [==============================] - 7s 4ms/step - loss: 6.3800 - accuracy: 0.5188\n",
"Epoch 9/10\n",
"1563/1563 [==============================] - 8s 5ms/step - loss: 6.3796 - accuracy: 0.5210\n",
"Epoch 10/10\n",
"1563/1563 [==============================] - 9s 6ms/step - loss: 6.3761 - accuracy: 0.5205\n"
]
}
],
"source": [
"import pandas as pd\n",
"import numpy as np\n",
"import nltk\n",
"from nltk.corpus import stopwords\n",
"import tensorflow as tf\n",
"from tensorflow.keras.preprocessing.text import Tokenizer\n",
"from tensorflow.keras.preprocessing.sequence import pad_sequences\n",
"from tensorflow.keras import Sequential\n",
"from tensorflow.keras.layers import Dense, Dropout, Activation\n",
"from sklearn import datasets, model_selection, metrics\n",
"import keras\n",
"stopword = stopwords.words('english')\n",
"\n",
"'''\n",
"About the task:\n",
"\n",
"You are provided with a codeflow- which consists of functions to be implemented(MANDATORY).\n",
"\n",
"You need to implement each of the functions mentioned below, you may add your own function parameters if needed(not to main).\n",
"Execute your code using the provided auto.py script(NO EDITS PERMITTED) as your code will be evaluated using an auto-grader.\n",
"'''\n",
"\n",
"def encode_data(tokenizer, text, tokens, preprocessing_training_data = False):\n",
" # This function will be used to encode the reviews using a dictionary (created using corpus vocabulary) \n",
"\n",
" # Example of encoding :\"The food was fabulous but pricey\" has a vocabulary of 4 words, each one has to be mapped to an integer like: \n",
" # {'The':1,'food':2,'was':3 'fabulous':4 'but':5 'pricey':6} this vocabulary has to be created for the entire corpus and then be used to \n",
" # encode the words into integers \n",
"\n",
" # return encoded examples\n",
" if preprocessing_training_data:\n",
" tokenizer = Tokenizer(oov_token = '<oov>')\n",
" tokenizer.fit_on_texts(tokens)\n",
"\n",
" sequences = tokenizer.texts_to_sequences(text)\n",
" return sequences, tokenizer\n",
"\n",
"def convert_to_lower(text):\n",
" # return the reviews after convering then to lowercase\n",
" lower_text = text.lower()\n",
" return lower_text\n",
"\n",
"def perform_tokenization(text):\n",
" # return the reviews after performing tokenization\n",
" token=nltk.word_tokenize(text)\n",
" return token\n",
"\n",
"def remove_stopwords(text):\n",
"\t# return the reviews after removing the stopwords\n",
"\tremoving_stopwords=[word for word in text if word not in stopword]\n",
"\treturn removing_stopwords\n",
"\t#print(removing_stopwords)\n",
"\n",
"def remove_punctuation(text):\n",
" # return the reviews after removing punctuations\n",
" removing_punctuation = [word for word in text if word.isalpha()]\n",
" return removing_punctuation\n",
"\n",
"def perform_padding(data, maxlen):\n",
" # return the reviews after padding the reviews to maximum length\n",
"\tpadded_data = pad_sequences(data, maxlen=maxlen, padding='post')\n",
"\treturn padded_data\n",
"\n",
"def preprocess_data(tokenizer, data, preprocessing_training_data=False, maxlen=None):\n",
" # make all the following function calls on your data\n",
" # EXAMPLE:->\n",
" '''\n",
" review = data[\"reviews\"]\n",
" review = convert_to_lower(review)\n",
" review = remove_punctuation(review)\n",
" review = remove_stopwords(review)\n",
" review = perform_tokenization(review)\n",
" review = encode_data(review)\n",
" review = perform_padding(review)\n",
" '''\n",
" # return processed data\n",
"\n",
" reviews = data[\"reviews\"]\n",
" list_of_reviews = list(reviews)\n",
" string_of_reviews = ' '.join(str(e) for e in list_of_reviews)\n",
"\n",
" lower_text = convert_to_lower(string_of_reviews)\n",
" # print(lower_text, end='\\n\\n\\n\\n')\n",
" tokens = perform_tokenization(lower_text)\n",
"# print(tokens, end='\\n\\n\\n\\n')\n",
" tokens = remove_stopwords(tokens)\n",
"# print(tokens, end='\\n\\n\\n\\n')\n",
" tokens = remove_punctuation(tokens)\n",
"# print(tokens, end='\\n\\n\\n\\n')\n",
" encoded_data, tokenizer = encode_data(tokenizer, reviews, tokens, preprocessing_training_data)\n",
"# print(encoded_data, end='\\n\\n\\n\\n')\n",
" reviews = perform_padding(encoded_data, maxlen)\n",
"# print(review, end='\\n\\n\\n\\n')\n",
"\n",
" # TODO: Use word embedding for better results \n",
" return reviews, tokenizer\n",
"\n",
"\n",
"# TODO: Change this function to support tensors\n",
"def softmax_activation(x):\n",
" # write your own implementation from scratch and return softmax values(using predefined softmax is prohibited)\n",
"# print(type(x))\n",
"# print(x)\n",
"# print(type(x))\n",
"# Exponent_calculation = tf.exp(x)\n",
"# print(type(Exponent_calculation))\n",
"# Normalization = tf.reduce_sum(Exponent_calculation)\n",
"# print(type(Normalization))\n",
"\n",
"\n",
" \n",
"# return tf.math.divide_no_nan(z,z2)\n",
"\n",
" from tensorflow.python.ops import math_ops\n",
"# tf.print(\"x\")\n",
"# tf.print(x)\n",
" e = math_ops.exp(x - math_ops.reduce_max(x, keepdims=True))\n",
"# tf.print(\"e\")\n",
"# tf.print(e)\n",
" s = math_ops.reduce_sum(e, keepdims=True)\n",
"# tf.print(\"s\")\n",
"# tf.print(s)\n",
" output = tf.math.divide_no_nan(e, s)\n",
"# tf.print(\"output\")\n",
"# tf.print(output)\n",
" epsilon = tf.constant(1e-16)\n",
" output = output + epsilon\n",
"# tf.print(\"output total\")\n",
"# tf.print(math_ops.reduce_sum(output))\n",
" \n",
" \n",
"# if rank == 2:\n",
"# output = tf.nn.softmax(x)\n",
"# elif rank > 2:\n",
"# e = math_ops.exp(x - math_ops.reduce_max(x, axis=axis, keepdims=True))\n",
"# s = math_ops.reduce_sum(e, axis=axis, keepdims=True)\n",
"# output = e / s\n",
"# else:\n",
"# raise ValueError('Cannot apply softmax to a tensor that is 1D. '\n",
"# 'Received input: %s' % (x,))\n",
"\n",
" # Cache the logits to use for crossentropy loss.\n",
" output._keras_logits = x # pylint: disable=protected-access\n",
"# return output\n",
" \n",
"# y1 = keras.backend.sigmoid(x)\n",
"# return exp\n",
" return output\n",
"# return Exponent_calculation/Normalization\n",
"\n",
"\n",
"class NeuralNet:\n",
"\n",
" def __init__(self, reviews, ratings):\n",
" self.reviews = reviews\n",
" self.ratings = ratings\n",
"\n",
" def build_nn(self):\n",
" #add the input and output layer here; you can use either tensorflow or pytorch\n",
" self.model = Sequential()\n",
" print(\"Reviews Shape: \", self.reviews.shape)\n",
" print(\"Ratings Shape: \", self.ratings.shape)\n",
" self.model.add(Dense(5, input_shape=(self.reviews.shape[1], )))\n",
" self.model.add(Activation('softmax'))\n",
" # TODO: Add our softmax function \n",
"# self.model.add(Activation(softmax_activation, name='Softmax'))\n",
" self.model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n",
" \n",
" def train_nn(self,batch_size,epochs):\n",
" # write the training loop here; you can use either tensorflow or pytorch\n",
" # print validation accuracy\n",
" self.model.fit(self.reviews, self.ratings, epochs=epochs, batch_size=batch_size, verbose=1)\n",
"\n",
" def predict(self, reviews):\n",
" # return a list containing all the ratings predicted by the trained model\n",
" predicted = self.model.predict(reviews)\n",
" return predicted\n",
"\n",
"\n",
"# TODO: Cross-validate\n",
"\n",
"# Build model\n",
"model = NeuralNet(train_reviews, train_ratings)\n",
"model.build_nn()\n",
"model.train_nn(batch_size, epochs)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\n",
"# predict\n",
"# test_predictions = model.predict(test_reviews)\n",
"# train_predictions = model.predict(train_reviews)\n",
"# \n",
"# # get ratings from probabilities\n",
"# train_predictions = np.argmax(train_predictions, axis=1)\n",
"# test_predictions = np.argmax(test_predictions, axis=1)\n",
"# train_ratings = np.argmax(np.array(train_ratings), axis=1)\n",
"\n",
"# # report generation on training data\n",
"# print(f\"Classification report:\\n{metrics.classification_report(train_ratings, train_predictions)}\\n\")\n",
"# metrics.confusion_matrix(train_ratings, train_predictions)\n",
"\n",
"# # reading gold-test data\n",
"# test_data = pd.read_csv(\"gold_test.csv\", index_col=0)\n",
"\n",
"# # separate reviews, ratings\n",
"# test_reviews = test_data.iloc[:, :-1]\n",
"# test_ratings = test_data.iloc[:, -1]\n",
"# test_ratings = pd.get_dummies(test_ratings)\n",
"\n",
"# # pre-process data\n",
"# test_reviews, tokenizer = preprocess_data(tokenizer, test_reviews, maxlen=train_reviews.shape[1])\n",
"\n",
"# # predict\n",
"# test_predictions = model.predict(test_reviews)\n",
"\n",
"# # get ratings from probabilities\n",
"# test_predictions = np.argmax(test_predictions, axis=1) + 1\n",
"# test_ratings = np.argmax(np.array(test_ratings), axis=1) + 1\n",
"\n",
"# # report generation on training data\n",
"# print(f\"Classification report:\\n{metrics.classification_report(test_ratings, test_predictions)}\\n\")\n",
"# metrics.confusion_matrix(test_ratings, test_predictions)\n",
"\n",
"# # Test input\n",
"# Test = ['this is bad', 'wow nice!', 'this is a great product']\n",
"# test_reviews = pd.DataFrame(Test, columns=['reviews'])\n",
"\n",
"# # pre-process data\n",
"# test_reviews, tokenizer = preprocess_data(tokenizer, test_reviews, maxlen=train_reviews.shape[1])\n",
"\n",
"# # predict\n",
"# test_predictions = model.predict(test_reviews)\n",
"# test_ratings = np.argmax(np.array(test_predictions), axis=1) + 1\n",
"\t\n",
"# # show probabilities and ratings\n",
"# print(test_predictions)\n",
"# print(test_ratings)"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"WARNING:tensorflow:From /home/rohit/.local/lib/python3.8/site-packages/tensorflow/python/ops/resource_variable_ops.py:1813: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.\n",
"Instructions for updating:\n",
"If using Keras pass *_constraint arguments to layers.\n",
"INFO:tensorflow:Assets written to: mymodel/assets\n"
]
}
],
"source": [
"model.model.save(\"mymodel\")"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"model = tf.keras.models.load_model(\"mymodel\")"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"ename": "NameError",
"evalue": "name 'tokenizer' is not defined",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-4-5b8d9f44ad52>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0mtest_ratings\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_dummies\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtest_ratings\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 7\u001b[0;31m \u001b[0mtest_reviews\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtokenizer\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpreprocess_data\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtokenizer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtest_reviews\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmaxlen\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtrain_reviews\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 8\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0mtest_predictions\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpredict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtest_reviews\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mNameError\u001b[0m: name 'tokenizer' is not defined"
]
}
],
"source": [
"test_data = pd.read_csv(\"gold_test.csv\", index_col=0)\n",
"\n",
"test_reviews = test_data.iloc[:, :-1]\n",
"test_ratings = test_data.iloc[:, -1]\n",
"test_ratings = pd.get_dummies(test_ratings)\n",
"\n",
"test_reviews, tokenizer = preprocess_data(tokenizer, test_reviews, maxlen=train_reviews.shape[1])\n",
"\n",
"test_predictions = model.predict(test_reviews)\n",
"\n",
"test_predictions = np.argmax(test_predictions, axis=1) + 1\n",
"test_ratings = np.argmax(np.array(test_ratings), axis=1) + 1\n",
"\n",
"print(f\"Classification report:\\n{metrics.classification_report(test_ratings, test_predictions)}\\n\")\n",
"metrics.confusion_matrix(test_ratings, test_predictions)"
]
},
{
"cell_type": "code",
"execution_count": 25,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Matplotlib is building the font cache; this may take a moment.\n"
]
},
{
"data": {
"text/plain": [
"<AxesSubplot:>"
]
},
"execution_count": 25,
"metadata": {},
"output_type": "execute_result"
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAiwAAAGbCAYAAADnUMu5AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8QVMy6AAAACXBIWXMAAAsTAAALEwEAmpwYAABBc0lEQVR4nO3dd3gUVdvH8e9JofceCIgKKqLSmxTpvYogioAIIggKiCCKgoAKKEUUFWnSpPcS6R2lCqggzQLSe1chyXn/yJoXHlqUhJnZ/X2ea69n98zszj1r2Ny573NmjbUWERERETcLcjoAERERkdtRwiIiIiKup4RFREREXE8Ji4iIiLieEhYRERFxvZCEPkCKZPdqGVIC+zvystMh+D1jjNMhBIRorVpMcHXDCjkdQkCYtm/OXf3QuHLi13j7xxOa4T5XfuCpwiIiIiKul+AVFhEREUlg0VFOR5DgVGERERER11OFRURExOtstNMRJDglLCIiIl4X7f8Ji1pCIiIi4nqqsIiIiHicVUtIREREXE8tIRERERHnqcIiIiLidWoJiYiIiOvpwnEiIiIizlOFRURExOvUEhIRERHX0yohEREREeepwiIiIuJxunCciIiIuJ9aQiIiIiLOU4VFRETE69QSEhEREdfTheNEREREnKcKi4iIiNepJSQiIiKup1VCIiIiIs5ThUVERMTr1BISERER11NLSERERMR5qrCIiIh4nLX+fx0WJSwiIiJeFwBzWNQSEhEREddThUVERMTrAmDSrRIWERERrwuAlpASFhEREa/Tlx+63+dD+/Hb7xvZsHHBLfcrWOgxzpzbQ9261e74mGnTpmbO3HFs/WEZc+aOI02aVAA0fLoO69Z/w/oN37Bk2TQeeTTPHR/LH4SHZ2Xxoqls27acrVuX8Uq7FgC8+25nvt+8mE0bFxExfwJhYZkdjtTbdu/6ju83L2HjhoV89+18ANKmTUNExAS2b19NRMQE0qRJ7XCU3jV82AAOHdjG1i1LY8fq16/Jtq3LuPzXHxQq+JiD0bnbyx+9ysjNYxm46NN4eb0n6pfn0xVD+XTFUJ6oXz52vNuYd+n/zWAGLR5Cq/fbEBTk+V9xchXP/9f8etx06tZ9/pb7BAUF0bv3GyxduvpfvXbp0sUY+uVH142/1qkNK1asJf9j5VmxYi2vdWoDwL7f/6BqlacpVrQa/fp+yqdDPvhXx/NXkZGRdOnSk3z5ylGqVC1at3mePHlyM2DAFxQsVInCRSoTEbGEt7t1dDpUz6tUuQFFilahxOM1AOjSuS3Ll60lb97SLF+2li6d2zocoXeNHTuFGjUbXzO2fftOGjR8kdWr1zkUlTcsn7qU95q9+6+f13PS+2QMz3TNWIrUKWjYoRFv1nmdrrU70bBDI5KnSg7AwLb9eL1aezpWakeq9KkpUaNkfITvDTY6/m4u5fmEZe3aDZw+deaW+7Ru04zZsxdw/NjJa8bbd2jFytWzWLf+G7q93SHOx6xRsxJffz0dgK+/nk7NWpUBWL/+e86cOQfAxg1byJYtS9xPxI8dOXKMLVt/AuDChYvs3LmHrFmzcP78hdh9kiVPhrXWqRD9Vq1alRk3fioA48ZPpXbtKg5H5F2r16zn1Okz14zt3LmX3bt/cSYgD/l5w3YunLlwzVjmHFnoNuZd+s0bSO+pfch6f7Y4vVa+JwqybfVWLpy9wMVzF9m2eiv5yxYC4M8LfwIQHBJMSGhIYH2mREfH382lPJ+w3E5Y1szUrl2F4cPGXzNevkJpcuXKyROl61KieHXyF3iEkiWLxuk1M2XKwNEjxwE4euQ4mTJluG6fps2eZtGilXd+An7mnnvCyZ/vETZs2AJAr15v8OsvG3nmmXq82/P6apbEncUSMX8C676LoEWLmEpApkwZOHLkGBCTON7oZ1XECa37tmVkjy95o+ZrjH3/K158r02cnpc+SzpOHj4R+/jUkZOkz5Iu9vHbY99l5Pfj+PPin6yL+Da+wxYH/edJt8aY5tbar26yrRXQCiBRaHpCQ1L+18PcsQ8/7M47b/e9LtOuUKE05SuU5tt1Mb3+5MmTcX+unKxdu4HlK2eSOHEikidPRtq0aWL3eeftfixdsuq6Y/zva5cpU5xmzRpSqWKDBDorb0qePBlTJg+n0+s9Yqsr3bv3o3v3fnTp0o6XX25Or14DHI7Su8qVe5JDh46QMWN6vomYyK5de6/bJ6D+4hTXSpIsCQ8UeohOn78ROxaaOBSAcg0qUL15LQCy5AzjrdHdibwcybE/jvLRS31u+9rvNX2X0MShtB/ciUcef4wf1mxNkHNwHRe3cuLLnawS6gncMGGx1g4DhgGkSHavo5+QBQo+yuixMRO90qdPS5UqZYmMisQYw4D+nzNq5MTrnlPuiXpAzByWxs89ReuXOl+z/dixE2TOkpGjR46TOUtGjh///1ZT3kceYsjnfXmybnNO3aZVFUhCQkKYMnk4EyfOZNasb67bPnHiDObMGaeE5Q4cOnQEgOPHTzJ79gKKFMnPsWMnyJIlE0eOHCNLlkzX/KyKOMUEGS6du0jn6h2u27Z86lKWT42Z2Nxz0vsMeX0wxw8ci91+8sgp8hZ/JPZxuizp2b7up2te48rfV9i4aD1FKhcLnITFxa2c+HLLlpAx5oeb3H4EPLGk45GHy5A3T2ny5inNrJnf0LFDd+bNXcySJato0rQhyZMnA2JaRxkzpo/Ta0bMX0LjxvUBaNy4PvPnLQZiVsNMmPgFL7Z4jb17f0uYE/Ko4cMGsHPnXj4ePCx2LFeue2Pv165VhV27NBfgv0qWLCkpUiSPvV+xYhm2b9/F3HmLafJcTKWvyXMNmDt3kZNhigAxc02O/XGUEtX/f1LsPXlyxum521Z+T74yBUieKjnJUyUnX5kCbFv5PUmSJSFNprQABAUHUbB8YQ7+ciAhwheH3K7CkhmoApz+n3EDuKI5+NXowZQuU5z06dOya8+3vP/ex4SGxpzWyBETbvq8ZUtX89CD97Nseczk2QsXL9HyhY5x+gt04IAvGDtuCE2bNeSP/Qdp2qQdAF3fepV06dIyaHBvIGZ1TJlSde70FD2v5ONFeO65p/jxxx1s2hjzC/Ptd/rSvHkjHnjgfmx0NPv2H6Rt264OR+pdmTNnZOqUEQCEhAQzadIsFi1awaZNW5kwYSjPN2/E/v0HePbZuM0TkOuNH/cZT5QpQYYM6fj910307NWfU6fPMHjQe2TMmI45s8eybdt2qv/PSiKBDp+8Tt4Sj5AybSq+XDeKyYMmMrj9QF58rw31X2lIcGgwa+esZt/Pv9/2tS6cvcD0TybTd+5AAKYNnsSFsxdInSENXUe8TWiiUEyQ4afvfmTR+OuruX4rACos5lY9bWPMSOAra+2aG2ybYK199nYHcLolFAj+jrzsdAh+zxjjdAgBIVpzbBJc3bBCTocQEKbtm3NXPzT+XDU63v7xJC3zvCs/8G5ZYbHWtrjFttsmKyIiIiLxQZfmFxER8boAaAkpYREREfG6AFjW7PcXjhMRERHvU4VFRETE69QSEhEREddTS0hERETEeaqwiIiIeJ1aQiIiIuJ6agmJiIiIOE8VFhEREa9TS0hERERcLwASFrWERERExPVUYREREfG6AJh0q4RFRETE69QSEhEREXGeKiwiIiJep5aQiIiIuJ5aQiIiIiLXMsYEG2O2GGPm+R7fa4xZb4zZa4yZbIxJ5BtP7Hu817c951Wv8aZvfJcxpsrtjqmERURExOtsdPzd4qY98PNVj/sBg6y1uYDTQAvfeAvgtG98kG8/jDEPA42AvEBV4HNjTPCtDqiERURExOuio+PvdhvGmHCgBjDC99gA5YFpvl3GAHV99+v4HuPbXsG3fx1gkrX2b2vtb8BeoOitjquERURERGIZY1oZYzZddWv1P7t8DHQB/slu0gNnrLWRvscHgGy++9mAPwB828/69o8dv8FzbkiTbkVERLwuHifdWmuHAcNutM0YUxM4Zq3dbIwpG28HjQMlLCIiIl5n7d06UkmgtjGmOpAESAUMBtIYY0J8VZRw4KBv/4NAduCAMSYESA2cvGr8H1c/54bUEhIREZE4sda+aa0Nt9bmJGbS7DJrbWNgOfCUb7dmwGzf/Tm+x/i2L7PWWt94I98qonuB3MCGWx1bFRYRERGvc/46LG8Ak4wx7wFbgJG+8ZHAOGPMXuAUMUkO1trtxpgpwA4gEmhrrY261QGUsIiIiHidAwmLtXYFsMJ3/1dusMrHWvsX0OAmz38feD+ux1NLSERERFxPFRYRERGv03cJiYiIiOs5P4clwaklJCIiIq6nCouIiIjX3b3rsDhGCYuIiIjXBUBLKMETlivRkbffSe6I/+fVLhAAf71IYNj+5xGnQxD5T1RhERER8TpVWERERMT1AmBZs1YJiYiIiOupwiIiIuJxNtr/59kpYREREfG6AJjDopaQiIiIuJ4qLCIiIl4XAJNulbCIiIh4XQDMYVFLSERERFxPFRYRERGvC4BJt0pYREREvE4Ji4iIiLheAHzfmeawiIiIiOupwiIiIuJ1agmJiIiI62lZs4iIiIjzVGERERHxOl3pVkRERFxPLSERERER56nCIiIi4nFWq4RERETE9dQSEhEREXGeKiwiIiJep1VCIiIi4npqCYmIiIg4TxUWERERr9MqIREREXE9tYREREREnKcKi4iIiNdplZCIiIi4nlpCIiIiIs5ThUVERMTj9F1CIiIi4n5qCfm3B3Lfx4b1C2Jvx4/t4JV2LWK3d2jfir//+oP06dM6GKX3DR82gEMHtrF1y9LYsX593uanH1fy/ebFTJs6gtSpUzkYofeFh2dl8aKpbNu2nK1bl8X+HOfLl5c1q+eyaeMi1n0XQZHC+Z0N1E8kTpyY79bOY/OmxWzbuowe3Ts5HZKrLNk0i9krJjBj2XimLhpz3fZ7c93DxIiRbPtjDc1fbhwvxwxNFMrAYe+zYP10Jn0ziqzZwwB4/ImiTFs8htkrJjBt8RiKlSocL8eTuy+gE5bde36laLGqFC1WleIlqnPp0p/MnrMAgPDwMCpWLMO+/QccjtL7xo6dQo2a134oLVm6inz5y1OwUCX27PmVrm+0cyg6/xAZGUmXLj3Jl68cpUrVonWb58mTJzd9PuhG7/cGUrhIZd7t2Z8+fbo5Hapf+Pvvv6lYuSGFCleiUOHKVKlclmJFCzodlqs0e7INT5Z/jgaVm1237eyZc7z/Vn9Gff71v37drNnDGDPzi+vGn2pcm7Nnz1O1WH3GfjmR19+J+Uw5ffIMbZ7rRJ2yz/LmKz3p99m7//qYnhBt4+/mUgGdsFytfPlS/PrbPvbvPwjARx/24M233sda9/7H84rVa9Zz6vSZa8YWL1lFVFQUAOvWf0+2bGEOROY/jhw5xpatPwFw4cJFdu7cQ9asWbDWkipVSgBSp07JocNHnQzTr1y8eAmA0NAQQkJD9VnxL5w6cZqftv5MZGTkddtqPVWVyQu+Ysay8bzbvytBQXH7NVW+6hPMnjwfgIVzl1G8dBEAfv5pN8ePngBgz85fSZwkMaGJQuPpTFzERsffzaVu+5NgjHnIGFPBGJPif8arJlxYd1+DBrWZMnk2ALVqVubQoSP8+OPPDkcVGJo/34gFC5c7HYbfuOeecPLne4QNG7bQ6fUe9O3zNr/+spF+fd/h7bf7OB2e3wgKCmLTxkUcPvgDS5euYsPGLU6H5BrWwsgpnzJt8RgaNKkb5+fdlzsn1epUonHNljxZ/jmio6Kp9VTcftVkzpKRwwdjEvKoqCjOn79AmnSpr9mncs3y/PzjLq5cvhLnmMQ9bjnp1hjzKtAW+BkYaYxpb62d7dv8AbDgJs9rBbQCCA5JQ3Bwihvt5hqhoaHUrFGJd97pS9KkSejSpd11LQxJGG92fZXIyEgmTJjhdCh+IXnyZEyZPJxOr/fg/PkLvNSqKa93fpeZMyN46qlaDPtyAFWrNXI6TL8QHR1N4SKVSZ06FdOnjiRv3gfZvn2X02G5QuNaL3LsyHHSZUjLyKlD+G3PPjatu31CV7x0EfLme4gpvnkvSZIk5uSJ0wB8OvpDsuXISmhoCGHhWZixbDwA44ZNYuakebd97VwP3ken7u1o2fCVOzgzF3NxKye+3G6V0ItAIWvtBWNMTmCaMSantXYwYG72JGvtMGAYQOIk2V3/LlatUo6tW3/i2LET5M37EDlzZmfjxoUAhGcLY926byhVqhZHjx53OFL/0rRJQ2pUr0ilKg2dDsUvhISEMGXycCZOnMmsWd8A0KRJAzq+1h2AadPm8uXQj5wM0S+dPXuOFSvXUqVyWSUsPseOxHxWnjpxmiURK3i04MNxSliMMcyaPJ9B739+3bZXnu8CxMxh6fNJd5rVa3PN9qNHjhOWLTNHDx8jODiYlClTcObUWQAyh2Xi09Ef0rXdu/zx+8E7PT1XsgGQsNyuJRRkrb0AYK39HSgLVDPGDOQWCYvXNGxYh8lTYgpH27fvJHuOAjz44OM8+ODjHDh4mOLFqylZiWdVKpfl9dfbUPfJ5/nzz7+cDscvDB82gJ079/Lx4GGxY4cOH6VMmRIAlCtXir17f3MqPL+SIUO62JVtSZIkoWKFMuza9YvDUblD0mRJSJY8Wez9kmWLsefnuL0361ZvpEqt8qTLELMyM3WaVGQNzxKn5y5fuIo6T9cAoEqt8qxbswmAlKlSMHTCIAa+N4QtG374t6cjLnK7CstRY0x+a+1WAF+lpSYwCng0oYO7G5IlS0qFCqVp266r06H4rfHjPuOJMiXIkCEdv/+6iZ69+vNGl3YkTpyYBd9MAmD9+u/13+AOlHy8CM899xQ//riDTRsXAfD2O31p07ozAwf2IiQkhL/++os2bbo4HKl/CAvLzKiRHxMcHERQUBDTps1lfsQSp8NyhfQZ0/Hp6JhKXkhwMPNmLGTN8nU83exJACaPmUGGTOmZumg0KVImJzra0rRVI2qWasQvu39jcJ+hjJjyKUFBhsgrkfTu+hGHDhy57XGnfT2Hfp/1ZMH66Zw9fY5OL8WsiGvcoiE5cobTplNL2nRqCUDLhq9wytdq8hsBUGExt5rZbowJByKttdf9tBhjSlpr197uAF5oCXldVABc4dBpflNOdDl9WCS83GmyOR1CQPj52Ia7+rFxvl31ePvnk3JIhCs/8m5ZYbHW3vQiJHFJVkRERETigy7NLyIi4nUB0BJSwiIiIuJ1AZCw6Eq3IiIi4nqqsIiIiHhcIHw1hBIWERERr1NLSERERMR5qrCIiIh4XQBUWJSwiIiIeJy+S0hERETEBVRhERER8boAqLAoYREREfG6APhKObWERERExPVUYREREfG4QJh0q4RFRETE6wIgYVFLSERERFxPFRYRERGvC4BJt0pYREREPC4Q5rCoJSQiIiKupwqLiIiI16klJCIiIm6nlpCIiIiIjzEmiTFmgzFmmzFmuzGmp2/8XmPMemPMXmPMZGNMIt94Yt/jvb7tOa96rTd947uMMVVud2wlLCIiIl4XHY+3W/sbKG+tzQfkB6oaY4oD/YBB1tpcwGmghW//FsBp3/gg334YYx4GGgF5garA58aY4FsdWAmLiIiIx9no+Lvd8jgxLvgehvpuFigPTPONjwHq+u7X8T3Gt72CMcb4xidZa/+21v4G7AWK3urYSlhERES8Lh4rLMaYVsaYTVfdWl19KGNMsDFmK3AMWAz8Apyx1kb6djkAZPPdzwb8AeDbfhZIf/X4DZ5zQ5p0KyIiIrGstcOAYbfYHgXkN8akAWYCD92NuJSwiIiIeNztWjkJckxrzxhjlgMlgDTGmBBfFSUcOOjb7SCQHThgjAkBUgMnrxr/x9XPuSG1hERERLzuLk26NcZk9FVWMMYkBSoBPwPLgad8uzUDZvvuz/E9xrd9mbXW+sYb+VYR3QvkBjbc6tiqsIiIiEhchQFjfCt6goAp1tp5xpgdwCRjzHvAFmCkb/+RwDhjzF7gFDErg7DWbjfGTAF2AJFAW1+r6aaUsIiIiHjc3WoJWWt/AArcYPxXbrDKx1r7F9DgJq/1PvB+XI+thEVERMTjnJjDcrdpDouIiIi4niosIiIiHhcIFZYET1iCjIo4CS0qEL6mU0TiRbrQFE6HIAnBGqcjSHDKJkRERMT11BISERHxOLWERERExPVstFpCIiIiIo5ThUVERMTj1BISERER17NaJSQiIiLiPFVYREREPE4tIREREXE9rRISERERcQFVWERERDzOWqcjSHhKWERERDxOLSERERERF1CFRURExOMCocKihEVERMTjAmEOi1pCIiIi4nqqsIiIiHicWkIiIiLievouIREREREXUIVFRETE4/RdQiIiIuJ60WoJiYiIiDhPFRYRERGPC4RJt0pYREREPC4QljWrJSQiIiKupwqLiIiIxwXCpfmVsIiIiHicWkIiIiIiLqAKi4iIiMcFwnVYlLCIiIh4XCAsa1ZLSERERFxPFRYRERGP0yohERERcb1AmMPi+ZbQ0KEfsW/fZjZtWnTD7Q88cD8rVszkzJnddOjQKl6OmShRIsaNG8JPP61k1apZ5MgRDkD58qVYu3YeGzcuZO3aeTzxxOPxcjyvGz5sAIcObGPrlqWxYz3f7cz3mxezaeMivpk/gbCwzA5G6H2JEyfm27Xz2LxpMVu3LqN7904AlCtXig3rF7Bp4yJWLJ/J/ffndDZQP/HAA/ezaeOi2NupEzt59ZWWToflCjnuz87oRcNib4t2zqVhy/rX7FOgRD4W/jwndp/mHZrc8XFDE4XS64t3mLxmHMPmfkaW8JjPlCKlCzHym6GMXTKCkd8MpWDJAnd8LHGGsQlcR0qa9J4EPUDJkkW5ePESI0YMpHDhytdtz5gxPTlyZKNWrSqcOXOWjz8eFufXzpEjnOHD+1OlSqNrxlu1asIjjzzEq692o0GDWtSuXYUmTdqRL19ejh07zuHDx3j44QeYO3cc999f7I7P8XauREUm+DHuROlSxbhw4SJffTWY/AUqAJAyZQrOn78AQLu2L5AnzwO0bdfVyTBvyQt/uyRPnoyLFy8REhLCyhUzee21Hoz6ajD16zdn5869tH6pGUWK5KdFy45Oh3pTXqxqBwUFsf/3zTxeqib79x90OpzbKpbxwbt2rKCgIGZtnsKLNdty9ODR2PECJfLxTOuGdGnW7V+/ZpbwzHQb9AavNHjtmvF6zWqTK899fNT1YyrULscT1UrRvU1vcufNxekTpzlx9CT3PpiTQV9/SN3CDe/43G5n7cFld/VjY0uOOvH2z6fA/tmu/MjzfIVl7doNnDp15qbbjx8/yebNP3DlypXrtjVqVI/Vq2ezbl0En376AUFBcXs7atasxNdfTwdgxowIypYtCcC2bds5fPgYADt27CZJkiQkSpToX56R/1m9Zj2nTp+5ZuyfZAViftEmdOIcCC5evARAaGgIoaGhWGux1pIqZUoAUqVOyaHDR2/1EvIfVChfil9/3eeJZOVuK1yqIAf3HbomWbmdyk9WZPi8zxm9aBid+3WM8+dy6coliZgaU2lfMX8lhUoVBGDP9r2cOHoSgN92/U7iJIkITRT6L8/E/ayNv5tb3fYnwRhT1BhTxHf/YWPMa8aY6gkfWsJ68MFcPPVUTcqVq0/x4tWJioqmUaO6cXpu1qxZOHDgEABRUVGcO3ee9OnTXrNPvXrV2br1Jy5fvhzfofuN3r3e4LdfNvLMM/V4t+dHTofjeUFBQWzauIhDB39gydJVbNi4hZdeep05c8bx26+baNy4Ph9+OMTpMP1Ow4Z1mDR5ltNhuFKFOuVYMmvZDbc9UuhhRi8eTv9xfbj3gZwA3JMrBxVql6N13Vd4vnIroqOiqfxkhTgdK2OWDBw7FPMHY1RUNBfPXSR12lTX7FO2Rhl2/bSHK5ev/wNW3O+Wk26NMT2AakCIMWYxUAxYDnQ1xhSw1r5/F2JMEOXKlaRgwUdZs2YOAEmTJuH48RMATJ78Jffck51EiRKRPXtW1q2LAOCzz75i3Lipt33tPHly8957XalZ87mEOwE/8E73frzTvR9vdGlH25eb07PXAKdD8rTo6GgKF6lM6tSpmDZ1JHnzPkj79i9Su3YTNmzcwmuvtab/Rz14qXVnp0P1G6GhodSqWZlub/dxOhTXCQkNoVTlxxnaZ8R123b9uIf6RZ/hz0t/UaJ8MfqM6kWjUk0pXKogDz2am5ERXwCQOEliTp84A8AHI3qRNUcWQkJDyJwtM6MXxbT3p4yYQcSUBbeN594HcvLyW63o+GyX+DtJFwmESbe3WyX0FJAfSAwcAcKtteeMMf2B9cANExZjTCugFUBISDpCQlLEW8DxxRjD+PHT6N79w+u2Pf30S8DN57AcOnSE8PCsHDx4hODgYFKlSsnJk6cByJYtC5MnD6Nly9f47bf9CX8ifmDCxBnMnTNOCUs8OXv2HCtWrqVKlXI89ujDbNi4BYCpU+cwb97XDkfnX6pWLceWLT9y7NgJp0NxneLlirL7xz2cPnH6um2XLlyKvf/dsvV0+qA9qdOmwhjDN1MXMbTv9UnOWy27Azefw3L8yAkyZc3E8cMnCA4OInmq5Jw9fQ6AjGEZ+GBkT3q378PBfYfi8zRdQxeOg0hrbZS19hLwi7X2HIC19k8g+mZPstYOs9YWttYWdmOyArB8+Vrq1atOxozpAUibNjU5cmSL03Pnz19C48Yxs96ffLI6K1d+C0Dq1KmYMeMr3nmnH999tylhAvcTuXLdG3u/dq0q7Nr1i4PReF+GDOlInTqm/J0kSRIqVijDzp17SZ06Fblz3wfgG9vjZJh+p9HTddUOuolKdcuz+CbtoHQZ/7+Fnif/Q5ggw9nT59i05nvK1ixDmvRpAEiZJiWZs8VtBeGaRd9SvUHMwouyNZ5g89qYRD1FquR8NLYPQz8YwY+btt/BGYnTbldhuWyMSeZLWAr9M2iMSc0tEpa7acyYTyhdugQZMqRl79519O49iNDQmNMaMeJrMmfOyNq1c0mZMgXR0dG0a/cCBQpUZOfOPfTs2Z+5c8cRFBTElSuRdOz4Tpwmzo0ePZlRowbx008rOX36DE2atAOgdetm3H9/Tt5881XefPNVAGrVasLx4ycT7g3wgPHjPuOJMiXIkCEdv/+6iZ69+lOtWnkeeOB+oqOj2b//IC+3de8KIS8IC8vMqJEfExwchAkKYtq0uURELKF1m85MmTyM6GjL6dNneLFVJ6dD9RvJkiWlYoUytHn5DadDcZ0kSZNQpEwhPnxjUOxY3Sa1AJg1bi7lajxBvaa1iYyK4vJff9Pj5fcA+H3PPoZ/OIqPJ36IMYbIyCgGdhscp0m78yZF8M4nbzF5zTjOnTlPj5d7A1C/eT3Cc2aleccmNO8Ys3y6wzNdOHPyTDyftbMCoSV0y2XNxpjE1tq/bzCeAQiz1v54uwMk9LJmcf+yZn/g/x8F7qAPi4R3N5c1B7K7vax5XdYn4+2fT/FDM1z5kXfLCsuNkhXf+AlATVsREREXCIQKi+evwyIiIiL+T98lJCIi4nGBsEpICYuIiIjHuWIVTAJTS0hERERcTxUWERERj7MBsJZRCYuIiIjHRQfANQHUEhIRERHXU4VFRETE46LVEhIRERG3C4Q5LGoJiYiIiOupwiIiIuJxgXAdFiUsIiIiHqeWkIiIiIgLqMIiIiLicWoJiYiIiOsFQsKilpCIiIi4niosIiIiHhcIk26VsIiIiHhctP/nK2oJiYiIiPupwiIiIuJx+i4hERERcT3rdAB3gVpCIiIi4nqqsIiIiHicrsMiIiIirhdtTLzdbsUYk90Ys9wYs8MYs90Y0943ns4Ys9gYs8f3/2l948YY84kxZq8x5gdjTMGrXquZb/89xphmtztHJSwiIiISV5FAJ2vtw0BxoK0x5mGgK7DUWpsbWOp7DFANyO27tQK+gJgEB+gBFAOKAj3+SXJuRgmLiIiIx9l4vN3yONYettZ+77t/HvgZyAbUAcb4dhsD1PXdrwOMtTHWAWmMMWFAFWCxtfaUtfY0sBioeqtjaw6LiIiIxzkxh8UYkxMoAKwHMltrD/s2HQEy++5nA/646mkHfGM3G78pVVhEREQkljGmlTFm01W3VjfYJwUwHehgrT139TZrbVyKNf+aKiwiIiIeF5+X5rfWDgOG3Wy7MSaUmGTla2vtDN/wUWNMmLX2sK/lc8w3fhDIftXTw31jB4Gy/zO+4lZxqcIiIiLicdGYeLvdijHGACOBn621A6/aNAf4Z6VPM2D2VeNNfauFigNnfa2jhUBlY0xa32Tbyr6xm1KFRUREROKqJNAE+NEYs9U39hbQF5hijGkB7AMa+rZFANWBvcAloDmAtfaUMaY3sNG3Xy9r7albHVgJi4iIiMfdrUvzW2vXwE3LMBVusL8F2t7ktUYBo+J67ARPWIKNuk4JLdLpAAKAuc3FlCR+xHy2SUK6LySN0yFIAojPOSxupWxCREREXE8tIREREY8LhO8SUsIiIiLicYHQTFVLSERERFxPFRYRERGPC4RJt0pYREREPC4Q5rCoJSQiIiKupwqLiIiIxwVChUUJi4iIiMfZAJjDopaQiIiIuJ4qLCIiIh6nlpCIiIi4XiAkLGoJiYiIiOupwiIiIuJxgXBpfiUsIiIiHhcIV7pVS0hERERcTxUWERERjwuESbdKWERERDwuEBIWtYRERETE9VRhERER8TitEhIRERHXC4RVQkpYREREPE5zWERERERcQBUWERERj9McFhEREXG96ABIWdQSEhEREddThUVERMTjAmHSrRIWERERj/P/hpBaQiIiIuIBqrCIiIh4nFpCIiIi4nqBcKVbtYRERETE9VRhERER8bhAuA6LEhYRERGP8/90xQ8Sls+H9qNa1fIcP36SokWqXre9dOliTJoyjH37DgAwZ/YC+vb59I6OmShRIoaPGED+Ao9w6tQZmjVpx/79BylXvhS9enchUWgol69c4e23+rBy5Xd3dCx/kDhxYpYvm07ixIkJDglmxoz59Oo1gLFjPqVgoXxcuXKFTRu30ublN4iMjHQ6XM/aves7Lly4SFRUFJGRkZR4vAbvvP0aL7zwLCdOnATgne79WLBgmcORetPwYQOoUb0ix46fIH+BCgA89tjDfD6kL8lTJGPfvgM0adqO8+cvOByp81p+1JYC5Qtz7uRZ3qzc4brtj9ctQ43WdTHG8NfFPxndbRj7f/79jo4ZkiiElwa2595H7+PC6fMMaTeAEweOc1++XLzQpw0AxhhmfDyZzQvX39GxxBmen8Py9bjp1K37/C33+fbbjTxevAaPF6/xr5KVHDmy8c2CideNN3u+IWfOnCXfo+X47NOR9H6vKwAnT56iwVMtKVa0Gi+9+DrDRw78V+fir/7++28qVW5IocKVKFy4MlUql6VY0YJMmDiTRx4pQ4ECFUiSNAktXnjW6VA9r1LlBhQpWoUSj9eIHfvk0+EUKVqFIkWrKFm5A2PHTqFGzcbXjH059CPe6vYBBQpWZNasb3i9UxuHonOX1VOX82Gz3jfdfvyPo7zf8B3eqtKRWZ9M5YU+reP82hnCM/LWpF7XjT/xdEUunr3A60+0ZcHIuTzdtSkAB3btp3utzrxdvRMfNuvNCx+0JijY87/6rhMdjze3+tf/1YwxYxMikP9q7doNnD515j899+lGdVmxahbfrpvPJ5++T1BQ3N6OGjUq8fX46QDMnPkNZcs+DsAP23Zw5PAxAHbs2E2SJElIlCjRf4rN31y8eAmA0NAQQkNDsdZe88tz08atZAsPcyo8kdtavWY9p06fuWbsgdz3sWr1OgCWLF1NvXrVHYjMfXZt2MHFM+dvun3P5l1cOncRgL3f7yZtWPrYbY/XK8O7s/vxXsQAmn/QGhPHz+WClYqwZvpyADZEfEfeko8CcPmvy0RHxfwaTpQ45rPHH0Vj4+3mVrf8STDGzPmf21zgyX8e36UY71jRogX5bl0EM2Z9RZ48uQF48MH7qf9UTSqWf4rHi9cgKiqKpxvVjdPrZc2amQMHDwMQFRXF2XPnSZ8+7TX71K1bjW1bf+Ly5cvxei5eFRQUxKaNizh08AeWLF3Fho1bYreFhITQuHF9Fi5c7mCE3mexRMyfwLrvImjR4v8rAW1aP8/mTYsZ9mV/0qRJ7WCE/mfHjt3Url0FgKfq1yR7eFaHI/Keso0q8sOKmM+DrLmyUbxmSXrXf4u3q3ciOjqax+uWidPrpMuSnpOHYlqf0VHRXDp/iRRpUwJwf/7c9Fn8MR8sHMRX3b6MTWDEW243hyUc2AGMIGZOjwEKAwNu9SRjTCugFUCi0PSEhqS880j/o61bt/PwQ6W4ePESlauUZeLkL8n/WHnKlitJgQKPsGrNbACSJEnC8eMxP+wTJw3lnpzZSRQaSnj2rHy7bj4An3/2FePHTbvtMfPkyU2v996gTq2mCXdiHhMdHU3hIpVJnToV06aOJG/eB9m+fRcAQz79gNWr17N27QaHo/S2cuWe5NChI2TMmJ5vIiaya9devhw2lvc/+BhrLT3f7cyH/d6h1UuvOx2q32jZ6jU+Htibbm91YN68RVy+fMXpkDwlT4lHKPN0Bd6r/xYAD5d8jJyP3k/POR8CkChJIs6dOAtA+y/fIGP2TIQkCiF91gy8FxHza2jhV/NZPfXWrc5ftu7hzUodyJorG60GvMoPK77nyt/+9d/KvXWR+HO7hKUw0B7oBnS21m41xvxprV15qydZa4cBwwBSJLvX0ffx6glwixauYNDHvUmfPi0Gw9fjp/Nuj4+ue84zjWL6qTlyZOPLYf2pVvWZa7YfOnSU8GxhHDp4hODgYFKnSsnJk6cByJotCxMmfUmrlp347bf9CXhm3nT27DlWrFxL5cpl2b59F2+/3ZEMGdPT5uWWTofmeYcOHQHg+PGTzJ69gCJF8rNmzf9PLhw5agKzZo52KDr/tGvXL1SrETP3Knfu+6herYLDEXlH9ofuoUW/l+nfrDcXzsR8ThtjWDNtOVM+/Pq6/Qe/1A+ImcPSqv8rfNCo+zXbTx05Sfqs6Tl95CRBwUEkS5mMC6evbUsd2nuQvy/9RfgDOfjtx18S6MycEQg1o1u2hKy10dbaQUBzoJsxZggeW1mUKXOG2PuFCucjKMhw8uRpVqxYS9161ciYMaZ3mjZtarJnzxan14yIWELj5+oDUK9etdiVQKlTp2T69FH06N6Pdes2x/OZeFeGDOlInToVEFPJqlihDLt2/cILzZ+hcqWyPPdcW7/tK98tyZIlJUWK5LH3K1Ysw/btu8iSJVPsPnXqVI2takn8+OfzwxjDW2+258th4xyOyBvSZ81A+y+78GXHwRz57XDs+Pa1P1CkeglSpY9pXSZPnYL02TLG6TW3LNlIqfrlAChavQQ7vv0RgIzZM8VOsk2fLSNh92fj+IFj8Xk6cpfEKfmw1h4AGhhjagDnEjakf+er0YMpXaY46dOnZdeeb3n/vY8JDY05rZEjJlCvXnVatmxMZGQUf/71F883fRWAnTv30rvnAGbPHUuQCeJK5BVe69CdP/44eNtjjhk9mREjB7Htx+WcPn2W55u+AsBLrZtx3/330PXNV+n6Zsxx6tRqGttqClRhYZkZNfJjgoODMEFBTJs2l4iIJfx5aR/79h1gzeqY6VAzZ0Xw/vsfOxusR2XOnJGpU0YAEBISzKRJs1i0aAVfjRpMvnx5sdayb98fvNy2q8ORetf4cZ/xRJkSZMiQjt9/3UTPXv1JkSI5bdo8D8CsWRGMHjPZ2SBd4uVPOpKnxCOkSJuSweuGM2PQJIJDggFY9vUi6rZvSIq0KWnWuxUQMxewR60uHNpzgGn9J9JlXHdMkCEqMoox7wzn5MHjtz3myslLaT2oPf1XfsaFMxf4rF3MKs0HCueh5sv1iLoShbWWMW8Pu67y4g/cPFk2vpiE/svW6ZZQIPg7UhN7E5oxAfBFHS4QrUpbgnsmrJjTIQSEcftm3NUPjY45G8XbP55Bv09y5Qee/y1GFxEREb/jqfkoIiIicr1AmHSrhEVERMTjbADMYVFLSERERFxPFRYRERGPU0tIREREXC8QljWrJSQiIiKupwqLiIiIx/l/fUUJi4iIiOepJSQiIiLiAqqwiIiIeJxWCYmIiIjr6cJxIiIiIi6gCouIiIjHqSUkIiIirqeWkIiIiIgLqMIiIiLicWoJiYiIiOtFW7WERERERBynCouIiIjH+X99RQmLiIiI5+m7hERERERcQBUWERERjwuE67AoYREREfG4QFjWrJaQiIiIuJ4qLCIiIh4XCJNulbCIiIh4XCDMYVFLSEREROLMGDPKGHPMGPPTVWPpjDGLjTF7fP+f1jdujDGfGGP2GmN+MMYUvOo5zXz77zHGNLvdcZWwiIiIeFx0PN7iYDRQ9X/GugJLrbW5gaW+xwDVgNy+WyvgC4hJcIAeQDGgKNDjnyTnZpSwiIiIeJy1Nt5ucTjWKuDU/wzXAcb47o8B6l41PtbGWAekMcaEAVWAxdbaU9ba08Birk+CrqGERURERGIZY1oZYzZddWsVh6dlttYe9t0/AmT23c8G/HHVfgd8YzcbvylNuhUREfG4+FwlZK0dBgy7g+dbY0y8zwJO8ITl/lRhCX2IgLf77EGnQ/B7SUMSOR1CQLhw+U+nQ/B7ozb3dzoESQAuuHDcUWNMmLX2sK/lc8w3fhDIftV+4b6xg0DZ/xlfcasDqCUkIiLicTYe//cfzQH+WenTDJh91XhT32qh4sBZX+toIVDZGJPWN9m2sm/sptQSEhERkTgzxkwkpjqSwRhzgJjVPn2BKcaYFsA+oKFv9wigOrAXuAQ0B7DWnjLG9AY2+vbrZa3934m811DCIiIi4nF380q31tpnbrKpwg32tUDbm7zOKGBUXI+rhEVERMTj4rIc2es0h0VERERcTxUWERERj3PBKqEEp4RFRETE4/TlhyIiIiIuoAqLiIiIx93NVUJOUcIiIiLicVolJCIiIuICqrCIiIh4nFpCIiIi4npaJSQiIiLiAqqwiIiIeFx0AEy6VcIiIiLicf6frqglJCIiIh6gCouIiIjHaZWQiIiIuF4gJCxqCYmIiIjrqcIiIiLicYFwaX4lLCIiIh6nlpCIiIiIC6jCIiIi4nGBcGl+JSwiIiIeFwhzWNQSEhEREddThUVERMTjAmHSrRIWERERj1NLSERERMQFVGERERHxOLWERERExPUCYVmzWkIiIiLieqqwiIiIeFx0AEy6VcIiIiLicWoJeVjKVCnoP+J9Zq2eyMxVE3is0CN39Hq1GlZjzreTmfPtZGo1rAZAkqSJ+XR8f2atnsiMleNp361NfITud9q2bc6mTYvYvHkx7dq9AMCjj+ZhxYqZbNy4kGnTRpIyZQqHo3RetmxZmD1/HN9t/IZvN0TwUptm1+3zVMParP5uLmvWzWPBksnkfeShOz5uokSJGDn6YzZtXcLiZdPIniMbAGXLlWTZqpmsWTePZatmUrpM8Ts+ltft3vUd329ewsYNC/nu2/kAPPZoHlatnM33m5cwc8ZX+ln2iYqK4qnn2/Jy5x7XbTt85BjN273BU8+3pV7TNqz6dsMdH+/AoSM882IHqjV8gU7v9OHKlSsATJ45n3pN2lC/WVuatOnEL7/tu+NjiTP8NmHp8l4H1i5bR93Sz9CgQlN+2/N7nJ43YsYQsmbPcs1YqjQpad3pBZ6r3pLG1VrSutMLpEydEoCxX0ygbulnaFjxefIXeZSS5fWhfrWHH36A5s2foXTp2hQtWpVq1Spw33338MUX/Xj77b4UKVKFOXMW0rHjS06H6rjIyCjeeasPJYpUo3L5BrRo1ZgHH8x1zT779/1BzWqNKVW8Jv37fcbHn7wX59fPniMbcyLGXzf+XNOnOHPmHIXzV+SLz77i3V6dATh58jTPNnyJUsVr0valLnwx/KM7O0E/UalyA4oUrUKJx2sAMHToR3R7uw8FC1Vk1uwFdHqttcMRusP4qbO5L2eOG277csxEqlQozbTRn9G/Z1feG/BZnF931vzFfDby+p/jQV+MosnTdflmyihSpUzB9HkLAahRuSwzx33B9DGf8cKzDfjw0+H/7YRcLtraeLu51b9KWIwxpYwxrxljKidUQPEhRcrkFCqen5kT5gIQeSWS8+cuEH5PNj6fMJCJC0fx1azPyZnrnji93uNli7Nu5UbOnTnP+bPnWbdyIyXLFeevP/9m49rvY4/x84+7yRyWKcHOy4seeigXGzdu5c8//yIqKorVq9dTt25VcuW6lzVr1gOwbNlq6tat5nCkzjt69Dg/bNsBwIULF9m96xfCsma+Zp8N67dw9sw5ADZu3EpYtv/f3uDp2ixePo2Va+cwcHBvgoLi9s+7eo2KTJowA4DZsxZQpmwJAH78YQdHjhwD4Oef95A0SRISJUp0Zyfph3Lnvo/Vq9cBsHTpKurVq+5wRM47cuw4q77dQP1aVW643RjDxYuXADh/8RIZM6QHYqoy/YeM4OkWr1KvaRumzIqI0/GstazfvI3KZUsDUKd6RZat+g6AFMmTx+73519/YYz5z+flZjYe/+dWt/xEM8ZsuOr+i8AQICXQwxjTNYFj+8+y5cjK6ZNn6DW4G5MXj6bHgK4kTZaE7v3foG+3gTxT5QUG9hxCt76vx+n1MoVl4MihY7GPjx4+RqawDNfskzJVCp6oXJL1qzfF67l43fbtuylZsgjp0qUhadIkVK1ajvDwrPz88x5q1YrJe598sgbh4WEOR+ou2XNk47HHHmbzpm033adJ0wYsXbwKgAcevJ969WtQrVIjnihZm6ioKBo8XTtOxwrLmpmDB44AMb8wzp29QLr0aa/Zp3adqmzbtp3Lly//xzPyDxZLxPwJrPsughYtGgOwY8duateO+cVcv35NwsOzOhmiK/Qb/CWvvdwCY278K+blF55j3sLlVKj7HC+/3p23Osa002fMW0jKFMmZPPITJo8YzLQ5Czhw6Mhtj3fm7DlSpkhOSEgwAJkzZuDY8ZOx2ydOn0vVBs0Z8PlI3uygCphX3W7SbehV91sBlay1x40x/YF1QN8bPckY08q3P9lS3kf6ZJlvtFuCCQ4J5qFHH6DvWwP5ccsOuvTuQNs3WpGv8KN8NPz/S+j//LVYp1ENnm3ZAIAc94Yz5OsBXLl8hUP7D9PxhTdvf7zgYPoO7cmEEVM5uP9QwpyUR+3atZcBA4Yyd+54Ll26xLZt24mKiuKllzozYMC7dO36KvPnL+by5StOh+oayZMnY8z4IbzV9X3On79ww31KlS7Gc00bUK1yIwDKPFGCfPnzsnRlTKUkSdLEnPB9YI+d8Bn33JOdRIlCyRYexsq1cwD48osxTBg//bbxPPRQLnr06kz9us3j4/Q8rVy5Jzl06AgZM6bnm4iJ7Nq1l1YvdWLgwF689WZ75s3Tz/KKtetJlzYNeR/KzYbvf7jhPhFLVlCnekWef6Y+W3/6mTd7f8SscUP5dsP37P7ldxYtXwPAhYsX2ffHQVIkT0aLV2M+i8+eP8+VK5GxFZQ+3V8nY/p0t4zpmfq1eKZ+LeYvWs6XoyfywTtx+2PVS9zcyokvt0tYgowxaYmpxBhr7XEAa+1FY0zkzZ5krR0GDAPIl+Xxu/4uHj10jKOHj/Pjlpjy+uJ5y2nbpSXnz53n6YrPX7f/7EnzmT0pZgLdiBlD6N7+PQ798f9Z/bHDJyjyeIHYx5nDMrHx2y2xj7v3f4P9vx7g6+FTEuiMvG3MmMmMGTMZgJ49O3Pw4BF27/6FWrWaAJAr171Uq1beyRBdIyQkhDHjhzBtyhzmzVl0w30ezvsgg4d8QMP6LTh96gwQU2KfNGEmvd8dcN3+TZ9tC8RUbT4b2o/a1Z+7ZvvhQ0fJFp6FQ4eOEBwcTKrUKTh18jQAWbNmYezEz3n5pc78/tv+eDxTbzrk+2v/+PGTzJ69gCJF8jNo0JfUqBFTbcmd+16qVavgZIiO2/LDDlasWcfq7zby9+UrXLx4iTd6fki/Hl1i95kxdyFDB8b88Zj/kTxcvnyF02fPYS281bENJYsVuu51p4+Jmecya/5iDh45StsW//9zbK3l/IWLREZGERISzNHjJ8iUMf11r1Gt4hP07j8kvk/ZFdzcyokvt2typwY2A5uAdMaYMABjTArAtY3Ak8dPcfTgUe65P2bCV7HShdm+bScH9x+mUq1ysfs98HCum73ENb5dsY4SZYuSMnVKUqZOSYmyRfl2RUzPuu0brUiRMjkfvvNxvJ+Hv8jo++DInj0rdepUZfLk2bFjxhi6dn2F4cO/djJE1/jksw/YvesXPh/y1Q23ZwsPY+zXn9Gm1ev8svf32PFVK76jdp2qZMgQ85dmmrSpCc8et9bENxFLafTskwDUqVuV1StjfrZTpU7JpGnD6NWjP+vXfX8HZ+UfkiVLSooUyWPvV6xYhu3bd13zs/xm1/YMGz7OyTAd17FNc5bOGs+i6WP4qGdXihbKd02yAhCWJRPrN20F4Jff9/P335dJlyY1JYsVZPLM+VyJjPl7+Pf9B7j051+3PaYxhqIFH2PRitUAzI5YQvnSMXOx9v1xMHa/Vd9uIEd4tvg4TXHALSss1tqcN9kUDdSL92jiUd9ug+jzeQ9CQ0M5sO8Q3Tu8T6rUKejWtzMvdniekNAQFs5awu4de2/7WufOnGfYoK+YsGAkAF8O/IpzZ86TKSwjrTo+z6+7f2fS4phfMJNGTY+d7CsxJk4cSrp0ably5QodOnTn7NlztG3bnJdeagrA7NkLGDtW1aliJQrR6Nl6bP9pZ2zbpnfPAbFzIkaPmkiXru1Ily4NHw3sCUBkZCQVnniSXbv28kHvQUyfPZqgIMOVK5F06dSTA3/cvkU5fuxUhg7vz6atSzh9+gwtm3cE4MVWTbj3vnvo/EY7Or/RDoD6dZ7nxIlTCXH6rpc5c0amThkBQEhIMJMmzWLRohW0a9eCNq1jlqDPmvVNbDVRrjVk+FjyPvQA5UoXp3O7lvTo9wljp8zEYHiv22sYY6hfqyoHDx+jYfNXsNaSNk1qPunbPU6v37HNC3Tu0ZdPh40lzwP382TNmDlyE6bPZd3GLYSEhJAqZQo+eLtTQp6mYwKhJWQS+iupnWgJBZrdZw/efie5I0lDtDrmbrhw+U+nQ/B7Fw+ucjqEgBCa4b672oW4L0OBePtd++uJLa7soPjtdVhERETEf+jS/CIiIh5nbbTTISQ4JSwiIiIeF61VQiIiIiLOU4VFRETE4xJ6AY0bKGERERHxOLWERERERFxAFRYRERGPU0tIREREXC8QrnSrlpCIiIi4niosIiIiHhcI39ashEVERMTjNIdFREREXE/LmkVERERcQBUWERERj1NLSERERFxPy5pFREREXEAVFhEREY9TS0hERERcT6uERERERFxAFRYRERGPU0tIREREXE+rhERERERcQBUWERERj9OXH4qIiIjrqSUkIiIi4gKqsIiIiHicVgmJiIiI6wXCHBa1hERERMT1VGERERHxOLWERERExPUCIWFRS0hERERcTxUWERERj/P/+gqYQCgj/VvGmFbW2mFOx+HP9B4nPL3Hd4fe54Sn91hALaGbaeV0AAFA73HC03t8d+h9Tnh6j0UJi4iIiLifEhYRERFxPSUsN6ZeacLTe5zw9B7fHXqfE57eY9GkWxEREXE/VVhERETE9ZSwiIiIiOspYbmKMWaUMeaYMeYnp2PxV8aY7MaY5caYHcaY7caY9k7H5G+MMUmMMRuMMdt873FPp2PyV8aYYGPMFmPMPKdj8VfGmN+NMT8aY7YaYzY5HY84R3NYrmKMKQNcAMZaax9xOh5/ZIwJA8Kstd8bY1ICm4G61todDofmN4wxBkhurb1gjAkF1gDtrbXrHA7N7xhjXgMKA6mstTWdjscfGWN+Bwpba084HYs4SxWWq1hrVwGnnI7Dn1lrD1trv/fdPw/8DGRzNir/YmNc8D0M9d30l0k8M8aEAzWAEU7HIhIIlLCIY4wxOYECwHqHQ/E7vlbFVuAYsNhaq/c4/n0MdAGiHY7D31lgkTFmszFGV7wNYEpYxBHGmBTAdKCDtfac0/H4G2ttlLU2PxAOFDXGqMUZj4wxNYFj1trNTscSAEpZawsC1YC2vta9BCAlLHLX+eZVTAe+ttbOcDoef2atPQMsB6o6HIq/KQnU9s2vmASUN8aMdzYk/2StPej7/2PATKCosxGJU5SwyF3lmxA6EvjZWjvQ6Xj8kTEmozEmje9+UqASsNPRoPyMtfZNa224tTYn0AhYZq19zuGw/I4xJrlvcj7GmORAZUCrOAOUEparGGMmAt8BDxpjDhhjWjgdkx8qCTQh5i/Srb5bdaeD8jNhwHJjzA/ARmLmsGjZrXhRZmCNMWYbsAGYb61d4HBM4hAtaxYRERHXU4VFREREXE8Ji4iIiLieEhYRERFxPSUsIiIi4npKWERERMT1lLCIiIiI6ylhEREREdf7P/sWI9kNE1vPAAAAAElFTkSuQmCC\n",
"text/plain": [
"<Figure size 720x504 with 2 Axes>"
]
},
"metadata": {
"needs_background": "light"
},
"output_type": "display_data"
}
],
"source": [
"import seaborn as sn\n",
"import pandas as pd\n",
"import matplotlib.pyplot as plt\n",
"array = metrics.confusion_matrix(test_ratings, test_predictions)\n",
"df_cm = pd.DataFrame(array, index = [i for i in range(1, 6)],\n",
" columns = [i for i in range(1, 6)])\n",
"plt.figure(figsize = (10,7))\n",
"sn.heatmap(df_cm, annot=True)\n"
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {},
"outputs": [],
"source": [
"def predict_rating(text):\n",
" test_reviews = pd.DataFrame([text], columns=['reviews'])\n",
"\n",
" test_reviews, _ = preprocess_data(tokenizer, test_reviews, maxlen=train_reviews.shape[1])\n",
"\n",
" test_predictions = model.predict(test_reviews)\n",
" test_ratings = np.argmax(np.array(test_predictions), axis=1) + 1\n",
" \n",
" import tabulate\n",
" \n",
" str = f\"\\nPredicted Rating: {test_ratings}\\n\\n\\n\"\n",
" str += tabulate.tabulate(test_predictions, headers=[\"Rating-1\", \"Rating-2\", \"Rating-3\", \"Rating-4\", \"Rating-5\"])\n",
" return str"
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {},
"outputs": [],
"source": [
"import tkinter as tk\n",
" \n",
"# Top level window \n",
"frame = tk.Tk() \n",
"frame.title(\"Rate reviews\") \n",
"frame.geometry('600x600') \n",
"\n",
"# Function for getting Input from textbox and printing it at label widget \n",
"def printInput(): \n",
" inp = inputtxt.get(1.0, \"end-1c\") \n",
" output = predict_rating(inp)\n",
" lbl.config(text = output) \n",
"\n",
"# TextBox Creation \n",
"inputtxt = tk.Text(frame, \n",
" height = 10, \n",
" width = 40, \n",
" font=(\"Courier\", 18)) \n",
" \n",
"inputtxt.pack() \n",
" \n",
"# Button Creation \n",
"printButton = tk.Button(frame, \n",
" text = \"Print\", \n",
" command = printInput, \n",
" font=(\"monospace\", 14)) \n",
"printButton.pack() \n",
" \n",
"# Label Creation \n",
"lbl = tk.Label(frame, text = \"\") \n",
"lbl.pack() \n",
"frame.mainloop() "
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.5"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
Instructions to run the code file for Assignment-2
1. Open code.ipynb file.
2. Run the cells in the file one by one.
3. Run the cell To access Pre Trained word embeddings from google drive which are mounted from our folder. In order to access it please mail your id to us, we will share with you the folder so all embeddings(glove.6B.300d.txt, glove.6B.200d.txt, glove.6B.300d.txt, wiki-news-300d-1M.vec, and GoogleNews-vectors-negative300.bin) and datasets( train.csv, gold_test.csv) could be accessed by you.
4. Run the Header files section.
5. Run the Pre-processing function section.
6. Run the Function for Neural Network.
7. Run the Required for word embedding.
8. There are 3 embeddings sections in the file Glove, Wod2vec and fasttext. These all embeddings are present Word Embeddings section.
9. Any one embedding section could be run.
10. Run the section Train Neural Network to get train accuracy.
11. Run the Test accuracy model to get the gold_test data accuracy.
12. First the section with headers will be run, then section with data preprocessing, then initialising the Neural Network, then run the required embedding(Glove, word2vec or fasttext) and then train the Neural Network. Train accuracy will be displayed then run the test model.
13. To run the imbalanced data handling techniques (like Undersampling Technique-1 etc.) first run the functions section, then run the Set Hyperparameter section (you can change the hyperparameters here), then run any of the following sections for using the corresponding technique. By default glove word embedding-300d is used that is embed==1, to change this go to Data Imbalanced Handling and at there acces Functions section, change the initialisation of embed. If embed==2, fasttext word embedding will be used and integers other than 1 and 2 will access word2vec pretrained embeddings.
Steps 1, 2, 3, 4, 5, 6 are necessary to execute step-12.
We have also attached a file containing the GUI (GUI.ipynb). From which GUI could be run. code.ipynb contains embedding whereas GUI.ipynb only represents GUI as input text and gives output with probabilities.
In this python notebook, the last 2 cells should be run to generate to GUI. There will be one textbox where the user has to insert text and then he can press the “Print” button to get the predicted rating and probabilities of each rating.
\ No newline at end of file
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"name": "code.ipynb",
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.5"
}
},
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "jN5i-YL1dNqz"
},
"source": [
"#### Install python packages"
]
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "n-6Y9V3GOPQq",
"outputId": "94cc23d1-dfc9-4339-ccb5-a7efa6c19d72"
},
"source": [
"!pip install PyDrive\n",
"!pip install --upgrade keras"
],
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"text": [
"Requirement already satisfied: PyDrive in /usr/local/lib/python3.7/dist-packages (1.3.1)\n",
"Requirement already satisfied: google-api-python-client>=1.2 in /usr/local/lib/python3.7/dist-packages (from PyDrive) (1.12.8)\n",
"Requirement already satisfied: oauth2client>=4.0.0 in /usr/local/lib/python3.7/dist-packages (from PyDrive) (4.1.3)\n",
"Requirement already satisfied: PyYAML>=3.0 in /usr/local/lib/python3.7/dist-packages (from PyDrive) (3.13)\n",
"Requirement already satisfied: google-auth-httplib2>=0.0.3 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client>=1.2->PyDrive) (0.0.4)\n",
"Requirement already satisfied: uritemplate<4dev,>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client>=1.2->PyDrive) (3.0.1)\n",
"Requirement already satisfied: google-auth>=1.16.0 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client>=1.2->PyDrive) (1.27.1)\n",
"Requirement already satisfied: httplib2<1dev,>=0.15.0 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client>=1.2->PyDrive) (0.17.4)\n",
"Requirement already satisfied: google-api-core<2dev,>=1.21.0 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client>=1.2->PyDrive) (1.26.1)\n",
"Requirement already satisfied: six<2dev,>=1.13.0 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client>=1.2->PyDrive) (1.15.0)\n",
"Requirement already satisfied: pyasn1>=0.1.7 in /usr/local/lib/python3.7/dist-packages (from oauth2client>=4.0.0->PyDrive) (0.4.8)\n",
"Requirement already satisfied: rsa>=3.1.4 in /usr/local/lib/python3.7/dist-packages (from oauth2client>=4.0.0->PyDrive) (4.7.2)\n",
"Requirement already satisfied: pyasn1-modules>=0.0.5 in /usr/local/lib/python3.7/dist-packages (from oauth2client>=4.0.0->PyDrive) (0.2.8)\n",
"Requirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from google-auth>=1.16.0->google-api-python-client>=1.2->PyDrive) (4.2.1)\n",
"Requirement already satisfied: setuptools>=40.3.0 in /usr/local/lib/python3.7/dist-packages (from google-auth>=1.16.0->google-api-python-client>=1.2->PyDrive) (54.0.0)\n",
"Requirement already satisfied: requests<3.0.0dev,>=2.18.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core<2dev,>=1.21.0->google-api-python-client>=1.2->PyDrive) (2.23.0)\n",
"Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core<2dev,>=1.21.0->google-api-python-client>=1.2->PyDrive) (1.53.0)\n",
"Requirement already satisfied: protobuf>=3.12.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core<2dev,>=1.21.0->google-api-python-client>=1.2->PyDrive) (3.12.4)\n",
"Requirement already satisfied: pytz in /usr/local/lib/python3.7/dist-packages (from google-api-core<2dev,>=1.21.0->google-api-python-client>=1.2->PyDrive) (2018.9)\n",
"Requirement already satisfied: packaging>=14.3 in /usr/local/lib/python3.7/dist-packages (from google-api-core<2dev,>=1.21.0->google-api-python-client>=1.2->PyDrive) (20.9)\n",
"Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0dev,>=2.18.0->google-api-core<2dev,>=1.21.0->google-api-python-client>=1.2->PyDrive) (2.10)\n",
"Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0dev,>=2.18.0->google-api-core<2dev,>=1.21.0->google-api-python-client>=1.2->PyDrive) (1.24.3)\n",
"Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0dev,>=2.18.0->google-api-core<2dev,>=1.21.0->google-api-python-client>=1.2->PyDrive) (3.0.4)\n",
"Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0dev,>=2.18.0->google-api-core<2dev,>=1.21.0->google-api-python-client>=1.2->PyDrive) (2020.12.5)\n",
"Requirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging>=14.3->google-api-core<2dev,>=1.21.0->google-api-python-client>=1.2->PyDrive) (2.4.7)\n",
"Requirement already up-to-date: keras in /usr/local/lib/python3.7/dist-packages (2.4.3)\n",
"Requirement already satisfied, skipping upgrade: pyyaml in /usr/local/lib/python3.7/dist-packages (from keras) (3.13)\n",
"Requirement already satisfied, skipping upgrade: numpy>=1.9.1 in /usr/local/lib/python3.7/dist-packages (from keras) (1.19.5)\n",
"Requirement already satisfied, skipping upgrade: h5py in /usr/local/lib/python3.7/dist-packages (from keras) (2.10.0)\n",
"Requirement already satisfied, skipping upgrade: scipy>=0.14 in /usr/local/lib/python3.7/dist-packages (from keras) (1.4.1)\n",
"Requirement already satisfied, skipping upgrade: six in /usr/local/lib/python3.7/dist-packages (from h5py->keras) (1.15.0)\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "nuyriDkqT_Sz"
},
"source": [
"#### To access pre-trained embeddings and dataset (train and test)"
]
},
{
"cell_type": "code",
"metadata": {
"id": "f068QkbOOpUC"
},
"source": [
"from pydrive.auth import GoogleAuth\n",
"from pydrive.drive import GoogleDrive\n",
"from google.colab import auth\n",
"from oauth2client.client import GoogleCredentials\n",
"\n",
"auth.authenticate_user()\n",
"gauth = GoogleAuth()\n",
"gauth.credentials = GoogleCredentials.get_application_default()\n",
"drive = GoogleDrive(gauth)\n",
"\n",
"downloaded = drive.CreateFile({'id':'1GH7dGh9ftQikz8KIDedxBooBzX_qw0zW'}) # replace the id with id of file you want to access\n",
"downloaded.GetContentFile('glove.6B.100d.txt') # replace the file name with your file\n",
"\n",
"download = drive.CreateFile({'id':'1gVExcJs31_mHWFGNvH3YpXBnS-A_XurA'}) # replace the id with id of file you want to access\n",
"download.GetContentFile('train.csv') # replace the file name with your file\n",
"\n",
"download = drive.CreateFile({'id':'13Nc1ZZaJD7_kBup22ScZYXmxAYNg1TOT'}) # replace the id with id of file you want to access\n",
"download.GetContentFile('test.csv') # replace the file name with your file\n",
"\n",
"download = drive.CreateFile({'id':'1xyw2FyO1RTOAK-h7V3PYF_Sxmn8ho5jh'}) # replace the id with id of file you want to access\n",
"download.GetContentFile('gold_test.csv') # replace the file name with your file\n",
"\n",
"download = drive.CreateFile({'id':'1daXpP2rI4YDUHwl4I-kDolS9JfoTY_r4'}) # replace the id with id of file you want to access\n",
"download.GetContentFile('glove.6B.200d.txt') # replace the file name with your file\n",
"\n",
"download = drive.CreateFile({'id':'15IB-nj1e-E6B9PRpJslkB1RMBEtiCA6l'}) # replace the id with id of file you want to access\n",
"download.GetContentFile('glove.6B.300d.txt') # replace the file name with your file\n",
"\n",
"download = drive.CreateFile({'id':'1rH_EvmtKprvULcKj5ARNI3r3p6K9EGwO'}) # replace the id with id of file you want to access\n",
"download.GetContentFile('wiki-news-300d-1M.vec') # replace the file name with your file\n",
"\n",
"download = drive.CreateFile({'id':'1mPXuUDiPAPid2nGjnhUQIzn8Qz-nHp6V'}) # replace the id with id of file you want to access\n",
"download.GetContentFile('GoogleNews-vectors-negative300.bin') # replace the file name with your file"
],
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"metadata": {
"id": "fEC0XB9cUQfA"
},
"source": [
"#### Header files"
]
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "r5NmV1ciOT0h",
"outputId": "dea55388-b535-4937-b479-417352850317"
},
"source": [
"import pandas as pd\n",
"import numpy as np\n",
"import string\n",
"import nltk\n",
"from nltk.corpus import stopwords\n",
"import tensorflow as tf\n",
"from tensorflow.keras.preprocessing.text import Tokenizer\n",
"from tensorflow.keras.preprocessing.text import one_hot\n",
"from tensorflow.keras.preprocessing.sequence import pad_sequences\n",
"from tensorflow.keras import Sequential\n",
"from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n",
"from sklearn import datasets, model_selection, metrics\n",
"from keras.layers.embeddings import Embedding\n",
"from keras.initializers import Constant\n",
"from nltk.tokenize import word_tokenize\n",
"from sklearn.model_selection import train_test_split \n",
"nltk.download('stopwords')\n",
"stopword = stopwords.words('english') \n",
"nltk.download('punkt')"
],
"execution_count": 6,
"outputs": [
{
"output_type": "stream",
"text": [
"[nltk_data] Downloading package stopwords to /root/nltk_data...\n",
"[nltk_data] Unzipping corpora/stopwords.zip.\n",
"[nltk_data] Downloading package punkt to /root/nltk_data...\n",
"[nltk_data] Unzipping tokenizers/punkt.zip.\n"
],
"name": "stdout"
},
{
"output_type": "execute_result",
"data": {
"text/plain": [
"True"
]
},
"metadata": {
"tags": []
},
"execution_count": 6
}
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "1vS9WexsUVPZ"
},
"source": [
"#### Preprocessing function"
]
},
{
"cell_type": "code",
"metadata": {
"id": "wsu00ZJwPCb5"
},
"source": [
"def encode_data(tokenizer, text, tokens, preprocessing_training_data = False):\n",
" # This function will be used to encode the reviews using a dictionary (created using corpus vocabulary) \n",
"\n",
" # Example of encoding :\"The food was fabulous but pricey\" has a vocabulary of 4 words, each one has to be mapped to an integer like: \n",
" # {'The':1,'food':2,'was':3 'fabulous':4 'but':5 'pricey':6} this vocabulary has to be created for the entire corpus and then be used to \n",
" # encode the words into integers \n",
"\n",
" # return encoded examples\n",
" if preprocessing_training_data:\n",
" tokenizer = Tokenizer(oov_token = '<oov>')\n",
" tokenizer.fit_on_texts(tokens)\n",
"\n",
" sequences = tokenizer.texts_to_sequences(text)\n",
" return sequences, tokenizer\n",
"\n",
"def convert_to_lower(text):\n",
" # return the reviews after convering then to lowercase\n",
" lower_text = text.lower()\n",
" return lower_text\n",
"\n",
"def perform_tokenization(text):\n",
" # return the reviews after performing tokenization\n",
" token=nltk.word_tokenize(text)\n",
" return token\n",
"\n",
"def remove_stopwords(text):\n",
"\t# return the reviews after removing the stopwords\n",
" stopword = [] # not any stopword\n",
" removing_stopwords=[word for word in text if word not in stopword]\n",
" return removing_stopwords\n",
"\t#print(removing_stopwords)\n",
"\n",
"def remove_punctuation(text):\n",
" # return the reviews after removing punctuations\n",
" removing_punctuation = [word for word in text if word.isalpha()]\n",
" return removing_punctuation\n",
"\n",
"def perform_padding(data, maxlen):\n",
" # return the reviews after padding the reviews to maximum length\n",
"\tpadded_data = pad_sequences(data, maxlen=maxlen, padding='post')\n",
"\treturn padded_data\n",
"\n",
"def preprocess_data(tokenizer, data, preprocessing_training_data=False, maxlen=None):\n",
" # make all the following function calls on your data\n",
" # EXAMPLE:->\n",
" '''\n",
" review = data[\"reviews\"]\n",
" review = convert_to_lower(review)\n",
" review = remove_punctuation(review)\n",
" review = remove_stopwords(review)\n",
" review = perform_tokenization(review)\n",
" review = encode_data(review)\n",
" review = perform_padding(review)\n",
" '''\n",
" # return processed data\n",
"\n",
" reviews = data[\"reviews\"]\n",
" list_of_reviews = list(reviews)\n",
" string_of_reviews = ' '.join(str(e) for e in list_of_reviews)\n",
"\n",
" lower_text = convert_to_lower(string_of_reviews)\n",
" # print(lower_text, end='\\n\\n\\n\\n')\n",
" tokens = perform_tokenization(lower_text)\n",
" #print(tokens, end='\\n\\n\\n\\n')\n",
" tokens = remove_stopwords(tokens)\n",
"# print(tokens, end='\\n\\n\\n\\n')\n",
" tokens = remove_punctuation(tokens)\n",
"# print(tokens, end='\\n\\n\\n\\n')\n",
" encoded_data, tokenizer = encode_data(tokenizer, reviews, tokens, preprocessing_training_data)\n",
"# print(encoded_data, end='\\n\\n\\n\\n')\n",
" reviews = perform_padding(encoded_data, maxlen)\n",
"# print(review, end='\\n\\n\\n\\n')\n",
"\n",
" # TODO: Use word embedding for better results \n",
" return pd.DataFrame(reviews), tokenizer"
],
"execution_count": 2,
"outputs": []
},
{
"cell_type": "markdown",
"metadata": {
"id": "NFdxrcfdUldJ"
},
"source": [
"#### Function for Neural Network"
]
},
{
"cell_type": "code",
"metadata": {
"id": "yTu0_OclPIol"
},
"source": [
"def softmax_activation(x):\n",
" # write your own implementation from scratch and return softmax values(using predefined softmax is prohibited)\n",
" Exponent_calculation=tf.exp(x-tf.reduce_max(x,axis=-1,keepdims=True))\n",
" Normalization=tf.reduce_sum(Exponent_calculation,axis=-1,keepdims=True)\n",
" return Exponent_calculation/Normalization\n",
"\n",
"\n",
"class NeuralNet:\n",
"\n",
" def __init__(self, reviews, ratings,e):\n",
" self.reviews = reviews\n",
" self.ratings = ratings\n",
" self.e = e\n",
" def build_nn(self):\n",
" #add the input and output layer here; you can use either tensorflow or pytorch\n",
" self.model = Sequential()\n",
" print(\"Reviews Shape: \", self.reviews.shape)\n",
" print(\"Ratings Shape: \", self.ratings.shape)\n",
" self.model.add(e)\n",
" self.model.add(Flatten())\n",
" #self.model.add(Dense(5 , activation=\"relu\" ))\n",
" #self.model.add(Dense(32, activation='sigmoid'))\n",
" self.model.add(Dense(5))\n",
" self.model.add(Activation(softmax_activation, name='Softmax'))\n",
"\n",
" self.model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n",
" \n",
" def train_nn(self,batch_size,epochs, ):\n",
" # write the training loop here; you can use either tensorflow or pytorch\n",
" # print validation accuracy\n",
" self.model.fit(self.reviews, self.ratings, epochs=epochs, batch_size=batch_size, verbose=1)\n",
"\n",
" def predict(self, reviews):\n",
" # return a list containing all the ratings predicted by the trained model\n",
" predicted = self.model.predict(reviews)\n",
" return predicted"
],
"execution_count": 3,
"outputs": []
},
{
"cell_type": "markdown",
"metadata": {
"id": "IZctTDz-gqok"
},
"source": [
"#### Required for word embedding"
]
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 229
},
"id": "iISfyYVQgktl",
"outputId": "721296e8-1dfd-45d6-9d85-cc41b9b0133b"
},
"source": [
"# reading dataset\n",
"train_data = pd.read_csv(\"train.csv\", index_col=0)\n",
"test_data = pd.read_csv(\"test.csv\", index_col=0)\n",
"\n",
"\n",
"# separate reviews, ratings\n",
"train_reviews = train_data.iloc[:, :-1]\n",
"train_ratings = train_data.iloc[:, -1]\n",
"train_ratings = pd.get_dummies(train_ratings)\n",
" \n",
"tokenizer = Tokenizer(oov_token = '<oov>')\n",
"\n",
"# pre-process data\n",
"train_reviews, tokenizer = preprocess_data(tokenizer, train_data, preprocessing_training_data=True)\n",
"test_reviews, tokenizer = preprocess_data(tokenizer, test_data, maxlen=train_reviews.shape[1])\n",
"#print(len(train_reviews))\n",
"#print(len(test_reviews))\n",
"maxlen=train_reviews.shape[1]\n",
"vocab_size = len(tokenizer.word_index) + 1\n",
"print(vocab_size)"
],
"execution_count": 4,
"outputs": [
{
"output_type": "error",
"ename": "NameError",
"evalue": "ignored",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-4-53c4bc8326ea>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;31m# reading dataset\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mtrain_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread_csv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"train.csv\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mindex_col\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3\u001b[0m \u001b[0mtest_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread_csv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"test.csv\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mindex_col\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mNameError\u001b[0m: name 'pd' is not defined"
]
}
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "MaNBH_r4UcRE"
},
"source": [
"### Data Imbalanced Handling\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "cXdVILD7b9gr"
},
"source": [
"#### Functions"
]
},
{
"cell_type": "code",
"metadata": {
"id": "IgQH8sYKPPzr",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 229
},
"outputId": "0cf1242b-78e5-4d4c-a380-1c3624cb45a9"
},
"source": [
"class NeuralNet:\n",
"\n",
" def __init__(self, reviews, ratings,e):\n",
" self.reviews = reviews\n",
" self.ratings = ratings\n",
" self.e = e\n",
" def build_nn(self):\n",
" #add the input and output layer here; you can use either tensorflow or pytorch\n",
" self.model = Sequential()\n",
" print(\"Reviews Shape: \", self.reviews.shape)\n",
" print(\"Ratings Shape: \", self.ratings.shape)\n",
" print(\"Columns: \", self.ratings.columns)\n",
" self.model.add(e)\n",
" self.model.add(Flatten())\n",
" # self.model.add(Dense(30, activation='sigmoid', kernel_regularizer=tf.keras.regularizers.l2(l=0.1)))\n",
" self.model.add(Dense(5))\n",
" # self.model.add(Dense(5 , input_shape=(self.reviews.shape[1], )))\n",
" #self.model.add(Dense(5 , activation=\"relu\"))\n",
" self.model.add(Activation(softmax_activation, name='Softmax'))\n",
" # TODO: Add our softmax function \n",
"\n",
" self.model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n",
" \n",
" def train_nn(self,batch_size,epochs, cv_reviews, cv_ratings):\t\n",
"\t # write the training loop here; you can use either tensorflow or pytorch\t\n",
"\t # print validation accuracy\t\n",
"\t self.model.fit(self.reviews, self.ratings, epochs=epochs, batch_size=batch_size, verbose=1, validation_data=(cv_reviews, cv_ratings))\t\n",
"\n",
" def predict(self, reviews):\n",
" # return a list containing all the ratings predicted by the trained model\n",
" predicted = self.model.predict(reviews)\n",
" return predicted\n",
"\n",
"\n",
"# get e first\n",
"def get_e(tokenizer, maxlen, embed=1):\n",
" if embed==1: #glove\n",
" # load the whole embedding into memory\n",
" embeddings_index = dict()\n",
" f = open('glove.6B.300d.txt')\n",
" for line in f:\n",
" values = line.split()\n",
" word = values[0]\n",
" coefs = np.asarray(values[1:], dtype='float32')\n",
" embeddings_index[word] = coefs\n",
" f.close()\n",
" print('Loaded %s word vectors.' % len(embeddings_index))\n",
"\n",
" # create a weight matrix for words in training docs\n",
" embedding_matrix = np.zeros((vocab_size, 300))\n",
" for word, i in tokenizer.word_index.items():\n",
" embedding_vector = embeddings_index.get(word)\n",
" if embedding_vector is not None:\n",
" embedding_matrix[i] = embedding_vector\n",
" print(train_reviews.shape)\n",
" print(embedding_matrix.shape)\n",
" e = Embedding(vocab_size, 300, embeddings_initializer=Constant(embedding_matrix), input_length=maxlen, trainable=False)\n",
" # TODO: Cross-validate\n",
" print(train_reviews.shape)\n",
" elif embed==2: # fasttext\n",
" # load the whole embedding into memory\n",
" embeddings_index = dict()\n",
" f = open('wiki-news-300d-1M.vec')\n",
" for line in f:\n",
" values = line.split()\n",
" word = values[0]\n",
" coefs = np.asarray(values[1:], dtype='float32')\n",
" embeddings_index[word] = coefs\n",
" f.close()\n",
" print('Loaded %s word vectors.' % len(embeddings_index))\n",
" #print(values)\n",
" # create a weight matrix for words in training docs\n",
" embedding_matrix = np.zeros((vocab_size, 300))\n",
" for word, i in tokenizer.word_index.items():\n",
" embedding_vector = embeddings_index.get(word)\n",
" if embedding_vector is not None:\n",
" embedding_matrix[i] = embedding_vector\n",
" print(train_reviews.shape)\n",
" print(embedding_matrix.shape)\n",
" e = Embedding(vocab_size, 300, embeddings_initializer=Constant(embedding_matrix), input_length=maxlen, trainable=True)\n",
" # TODO: Cross-validate\n",
" print(train_reviews.shape)\n",
" else: # word2vec\n",
" from gensim.models import KeyedVectors\n",
"\n",
" # load the whole embedding into memory\n",
" embeddings_index = dict()\n",
"\n",
"\n",
" #f = open('GoogleNews-vectors-negative300.bin')\n",
" model_w2v = KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin', binary=True)\n",
" print(type(model_w2v))\n",
"\n",
" # prepare embedding matrix\n",
" embedding_matrix = np.zeros((vocab_size, 300))\n",
" for word, i in tokenizer.word_index.items():\n",
" if word in model_w2v.vocab:\n",
" embedding_vector = model_w2v[word]\n",
" embedding_vector = np.array(embedding_vector)\n",
" if embedding_vector is not None:\n",
" embedding_matrix[i] = embedding_vector\n",
"\n",
" e = Embedding(vocab_size, 300, embeddings_initializer=Constant(embedding_matrix), input_length=maxlen, trainable=False)\n",
" \n",
" return e\n",
"\n",
"# reading dataset\n",
"train_data = pd.read_csv(\"train.csv\", index_col=0)#.iloc[:1000,:]\n",
"test_data = pd.read_csv(\"test.csv\", index_col=0)#.iloc[:1000,:]\n",
"gold_test_data = pd.read_csv(\"gold_test.csv\", index_col=0)\n",
"\n",
"def prepare_data(is_preprocessed, train_data, test_data, gold_test_data=None, cv=0.2):\n",
" '''\n",
" Returns:\n",
" processed_train_data : [preprocessed-reviews ratings]\n",
" train_ratings : ratings (one hot encoded)\n",
" \n",
" processed_cv_data : [preprocessed-reviews ratings]\n",
" cv_ratings : ratings (one hot encoded)\n",
" \n",
" processed_test_data : [preprocessed-reviews ratings] ::- ratings is optinal (it will be present if gold_test_data is given)\n",
" test_ratings : ratings (one hot encoded) (optional)\n",
"\n",
" vocab_size\n",
" '''\n",
" # initialize tokenizer\n",
" tokenizer = Tokenizer(oov_token = '<oov>')\n",
" \n",
" if not is_preprocessed:\n",
" # pre-process data\n",
" train_reviews, tokenizer = preprocess_data(tokenizer, train_data, preprocessing_training_data=True)\n",
" test_reviews, tokenizer = preprocess_data(tokenizer, test_data, maxlen=train_reviews.shape[1])\n",
" else:\n",
" train_reviews = pd.drop(train_data, columns=['ratings'])\n",
" test_reviews = test_data\n",
" \n",
" \n",
" # print(f\"train_reviews {train_reviews.shape}\")\n",
" # print(f\"test_reviews {test_reviews.shape}\")\n",
" \n",
" # print(pd.concat([train_reviews, train_data['ratings']], axis=1).shape)\n",
"\n",
" # split dataset into train & cross-validation set\n",
" train_reviews, cv_reviews, train_ratings, cv_ratings = train_test_split(train_reviews, train_data['ratings'], test_size=cv, random_state=0, stratify=train_data['ratings']) \n",
" \n",
" # print(f\"train_reviews {train_reviews.shape}\")\n",
" # print(f\"cv_reviews {cv_reviews.shape}\")\n",
" # print(f\"train_ratings {train_ratings.shape}\")\n",
" # print(f\"cv_ratings {cv_ratings.shape}\")\n",
" \n",
" processed_train_data = pd.concat([train_reviews, train_ratings], axis=1)\n",
" processed_cv_data = pd.concat([cv_reviews, cv_ratings], axis=1)\n",
" \n",
" # print(f\"processed_train_data {processed_train_data.shape}\")\n",
" # print(f\"processed_cv_data {processed_cv_data.shape}\")\n",
" \n",
" # One hot encode train ratings\n",
" train_ratings = pd.get_dummies(train_ratings)\n",
"\n",
" # One hot encode CV ratings\n",
" cv_ratings = pd.get_dummies(cv_ratings)\n",
"\n",
" # print(f\"OHE train_ratings {train_ratings.shape}\")\n",
" # print(f\"OHE cv_ratings {cv_ratings.shape}\")\n",
" \n",
" # One hot encode gold test ratings\n",
" if gold_test_data is not None:\n",
" gold_test_ratings = gold_test_data['ratings']\n",
" processed_gold_test_data = pd.concat([test_reviews, gold_test_ratings], axis=1)\n",
" \n",
" # One hot encode gold test ratings\n",
" gold_test_ratings = pd.get_dummies(gold_test_ratings)\n",
"\n",
" maxlen=train_reviews.shape[1] \n",
" e = get_e(tokenizer, maxlen)\n",
" return processed_train_data, train_ratings, processed_cv_data, cv_ratings, processed_gold_test_data, gold_test_ratings, e\n",
"\n",
" return processed_train_data, train_ratings, processed_cv_data, cv_ratings, test_reviews, None, None\n",
"\n",
"def train_model_and_check_test_accuracy(e, batch_size, epochs, train_reviews, train_ratings, cv_reviews, cv_ratings, test_reviews, test_ratings):\n",
" # Build model\n",
" train_ratings = pd.get_dummies(train_ratings)\n",
" cv_ratings = pd.get_dummies(cv_ratings)\n",
" test_ratings = pd.get_dummies(test_ratings)\n",
" \n",
" model = NeuralNet(train_reviews, train_ratings, e)\n",
" model.build_nn()\n",
" model.train_nn(batch_size, epochs, cv_reviews, cv_ratings)\n",
"\n",
" # predict\n",
" test_predictions = model.predict(test_reviews)\n",
" train_predictions = model.predict(train_reviews)\n",
"\n",
" # get ratings from probabilities\n",
" train_predictions = np.argmax(train_predictions, axis=1) + 1\n",
" # test_predictions = np.argmax(test_predictions, axis=1)\n",
" train_ratings = np.argmax(np.array(train_ratings), axis=1) + 1\n",
"\n",
" # report generation on training data\n",
" print(f\"Classification report:\\n{metrics.classification_report(train_ratings, train_predictions)}\\n\")\n",
" metrics.confusion_matrix(train_ratings, train_predictions)\n",
"\n",
" # predict\n",
" test_predictions = model.predict(test_reviews)\n",
"\n",
" # get ratings from probabilities\n",
" test_predictions = np.argmax(test_predictions, axis=1) + 1\n",
" test_ratings = np.argmax(np.array(test_ratings), axis=1) + 1\n",
"\n",
" # report generation on training data\n",
" print(f\"Classification report:\\n{metrics.classification_report(test_ratings, test_predictions)}\\n\")\n",
" metrics.confusion_matrix(test_ratings, test_predictions)\n",
"\n",
"def class_counts(ratings): \n",
" # find # of elements of each class\n",
" cnts = ratings.value_counts()\n",
" class1_count = cnts[1]\n",
" class2_count = cnts[2]\n",
" class3_count = cnts[3]\n",
" class4_count = cnts[4]\n",
" class5_count = cnts[5]\n",
" \n",
" return class1_count, class2_count, class3_count, class4_count, class5_count\n",
"\n",
"def minority_class_count(ratings):\n",
" # find # no of elements in the minority class\n",
" return min(class_counts(ratings))\n",
"\n",
"def majority_class_count(ratings):\n",
" # find # no of elements in the minority class\n",
" return max(class_counts(ratings))\n",
"\n",
"def average_class_count(ratings):\n",
" return int(sum(class_counts(ratings))/5) \n"
],
"execution_count": 5,
"outputs": [
{
"output_type": "error",
"ename": "NameError",
"evalue": "ignored",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-5-a4ea6d914189>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 106\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 107\u001b[0m \u001b[0;31m# reading dataset\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 108\u001b[0;31m \u001b[0mtrain_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread_csv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"train.csv\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mindex_col\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;31m#.iloc[:1000,:]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 109\u001b[0m \u001b[0mtest_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread_csv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"test.csv\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mindex_col\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;31m#.iloc[:1000,:]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 110\u001b[0m \u001b[0mgold_test_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread_csv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"gold_test.csv\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mindex_col\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mNameError\u001b[0m: name 'pd' is not defined"
]
}
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "K_tcvDwJfcvU"
},
"source": [
"#### Set Hyperparameter"
]
},
{
"cell_type": "code",
"metadata": {
"id": "wPhgCP_ofgLC"
},
"source": [
"batch_size, epoch = 32, 1"
],
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"metadata": {
"id": "kNFlkgEPcE7z"
},
"source": [
"#### Undersampling Technique-1"
]
},
{
"cell_type": "code",
"metadata": {
"id": "F6brLxAQbwwh"
},
"source": [
"# 1. reduce the number of ratings of each class to the 'minority_class_count' \n",
"preprocessed_train_data, train_ratings, processed_cv_data, cv_ratings, processed_gold_test_data, gold_test_ratings, e = prepare_data(False, train_data, test_data, gold_test_data)\n",
"class1_data = preprocessed_train_data[preprocessed_train_data['ratings']==1]\n",
"class2_data = preprocessed_train_data[preprocessed_train_data['ratings']==2]\n",
"class3_data = preprocessed_train_data[preprocessed_train_data['ratings']==3]\n",
"class4_data = preprocessed_train_data[preprocessed_train_data['ratings']==4]\n",
"class5_data = preprocessed_train_data[preprocessed_train_data['ratings']==5]\n",
"\n",
"minority_class_cnt = minority_class_count(preprocessed_train_data['ratings'])\n",
"class1_data = class1_data.sample(minority_class_cnt, random_state=1)\n",
"class2_data = class2_data.sample(minority_class_cnt, random_state=1)\n",
"class3_data = class3_data.sample(minority_class_cnt, random_state=1)\n",
"class4_data = class4_data.sample(minority_class_cnt, random_state=1)\n",
"class5_data = class5_data.sample(minority_class_cnt, random_state=1)\n",
"\n",
"train_data_undersample = pd.concat([class1_data, class2_data, class3_data, class4_data, class5_data], axis=0)\n",
"\n",
"train_model_and_check_test_accuracy(e, batch_size, epoch, train_data_undersample.drop(['ratings'], axis=1), train_data_undersample['ratings'], processed_cv_data.drop(['ratings'], axis=1), processed_cv_data['ratings'], processed_gold_test_data.drop(['ratings'], axis=1), processed_gold_test_data['ratings'])\n"
],
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"metadata": {
"id": "jmCPQLwncOqL"
},
"source": [
"#### Undersampling technique-2"
]
},
{
"cell_type": "code",
"metadata": {
"id": "TMon7DHQbwnN"
},
"source": [
"# 2. reduce the number of ratings of each class to the 'avg_class_count' \n",
"preprocessed_train_data, train_ratings, processed_cv_data, cv_ratings, processed_gold_test_data, gold_test_ratings, e = prepare_data(False, train_data, test_data, gold_test_data)\n",
"class1_data = preprocessed_train_data[preprocessed_train_data['ratings']==1]\n",
"class2_data = preprocessed_train_data[preprocessed_train_data['ratings']==2]\n",
"class3_data = preprocessed_train_data[preprocessed_train_data['ratings']==3]\n",
"class4_data = preprocessed_train_data[preprocessed_train_data['ratings']==4]\n",
"class5_data = preprocessed_train_data[preprocessed_train_data['ratings']==5]\n",
"\n",
"avg_class_count = average_class_count(preprocessed_train_data['ratings'])\n",
"class1_data = class1_data.sample(min(class1_data.shape[0], avg_class_count), random_state=1)\n",
"class2_data = class2_data.sample(min(class2_data.shape[0], avg_class_count), random_state=1)\n",
"class3_data = class3_data.sample(min(class3_data.shape[0], avg_class_count), random_state=1)\n",
"class4_data = class4_data.sample(min(class4_data.shape[0], avg_class_count), random_state=1)\n",
"class5_data = class5_data.sample(min(class5_data.shape[0], avg_class_count), random_state=1)\n",
"\n",
"train_data_undersample = pd.concat([class1_data, class2_data, class3_data, class4_data, class5_data], axis=0)\n",
"print(train_data_undersample['ratings'].value_counts())\n",
"print(processed_cv_data['ratings'].value_counts())\n",
"\n",
"train_model_and_check_test_accuracy(e, batch_size, epoch, train_data_undersample.drop(['ratings'], axis=1), train_data_undersample['ratings'], processed_cv_data.drop(['ratings'], axis=1), processed_cv_data['ratings'], processed_gold_test_data.drop(['ratings'], axis=1), processed_gold_test_data['ratings'])\n"
],
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"metadata": {
"id": "R3l5LVS8cY1Q"
},
"source": [
"#### Oversampling Technique-1\n"
]
},
{
"cell_type": "code",
"metadata": {
"id": "pjv5TTLqccHH"
},
"source": [
"# 1. increase the number of ratings of each class to the 'majority_class_count' \n",
"preprocessed_train_data, train_ratings, processed_cv_data, cv_ratings, processed_gold_test_data, gold_test_ratings, e = prepare_data(False, train_data, test_data, gold_test_data)\n",
"class1_data = preprocessed_train_data[preprocessed_train_data['ratings']==1]\n",
"class2_data = preprocessed_train_data[preprocessed_train_data['ratings']==2]\n",
"class3_data = preprocessed_train_data[preprocessed_train_data['ratings']==3]\n",
"class4_data = preprocessed_train_data[preprocessed_train_data['ratings']==4]\n",
"class5_data = preprocessed_train_data[preprocessed_train_data['ratings']==5]\n",
"\n",
"majority_class_cnt = majority_class_count(preprocessed_train_data['ratings'])\n",
"class1_data = class1_data.sample(majority_class_cnt, random_state=1, replace=True)\n",
"class2_data = class2_data.sample(majority_class_cnt, random_state=1, replace=True)\n",
"class3_data = class3_data.sample(majority_class_cnt, random_state=1, replace=True)\n",
"class4_data = class4_data.sample(majority_class_cnt, random_state=1, replace=True)\n",
"class5_data = class5_data.sample(majority_class_cnt, random_state=1, replace=True)\n",
"\n",
"train_data_undersample = pd.concat([class1_data, class2_data, class3_data, class4_data, class5_data], axis=0)\n",
"\n",
"print(train_data_undersample['ratings'].value_counts())\n",
"print(processed_cv_data['ratings'].value_counts())\n",
"\n",
"train_model_and_check_test_accuracy(e, batch_size, epoch, train_data_undersample.drop(['ratings'], axis=1), train_data_undersample['ratings'], processed_cv_data.drop(['ratings'], axis=1), processed_cv_data['ratings'], processed_gold_test_data.drop(['ratings'], axis=1), processed_gold_test_data['ratings'])\n"
],
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"metadata": {
"id": "nSr7yoTscium"
},
"source": [
"#### Oversampling Technique-2"
]
},
{
"cell_type": "code",
"metadata": {
"id": "uVuXnTI3cllj"
},
"source": [
"# 2. reduce the number of ratings of each class to the 'avg_class_count' \n",
"preprocessed_train_data, train_ratings, processed_cv_data, cv_ratings, processed_gold_test_data, gold_test_ratings, e = prepare_data(False, train_data, test_data, gold_test_data)\n",
"class1_data = preprocessed_train_data[preprocessed_train_data['ratings']==1]\n",
"class2_data = preprocessed_train_data[preprocessed_train_data['ratings']==2]\n",
"class3_data = preprocessed_train_data[preprocessed_train_data['ratings']==3]\n",
"class4_data = preprocessed_train_data[preprocessed_train_data['ratings']==4]\n",
"class5_data = preprocessed_train_data[preprocessed_train_data['ratings']==5]\n",
"\n",
"avg_class_count = average_class_count(preprocessed_train_data['ratings'])\n",
"class1_data = class1_data.sample(max(class1_data.shape[0], avg_class_count), random_state=1, replace=True)\n",
"class2_data = class2_data.sample(max(class2_data.shape[0], avg_class_count), random_state=1, replace=True)\n",
"class3_data = class3_data.sample(max(class3_data.shape[0], avg_class_count), random_state=1, replace=True)\n",
"class4_data = class4_data.sample(max(class4_data.shape[0], avg_class_count), random_state=1, replace=True)\n",
"class5_data = class5_data.sample(max(class5_data.shape[0], avg_class_count), random_state=1, replace=True)\n",
"\n",
"train_data_undersample = pd.concat([class1_data, class2_data, class3_data, class4_data, class5_data], axis=0)\n",
"print(train_data_undersample['ratings'].value_counts())\n",
"print(processed_cv_data['ratings'].value_counts())\n",
"\n",
"train_model_and_check_test_accuracy(e, batch_size, epoch, train_data_undersample.drop(['ratings'], axis=1), train_data_undersample['ratings'], processed_cv_data.drop(['ratings'], axis=1), processed_cv_data['ratings'], processed_gold_test_data.drop(['ratings'], axis=1), processed_gold_test_data['ratings'])\n"
],
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"metadata": {
"id": "BXAOF039clD8"
},
"source": [
"#### Oversampling & Undersampling Technique-3\n"
]
},
{
"cell_type": "code",
"metadata": {
"id": "mMDNn5Sjcx3K"
},
"source": [
"# 3. change the number of ratings of each class to the 'avg_class_count' \n",
"preprocessed_train_data, train_ratings, processed_cv_data, cv_ratings, processed_gold_test_data, gold_test_ratings, e = prepare_data(False, train_data, test_data, gold_test_data)\n",
"class1_data = preprocessed_train_data[preprocessed_train_data['ratings']==1]\n",
"class2_data = preprocessed_train_data[preprocessed_train_data['ratings']==2]\n",
"class3_data = preprocessed_train_data[preprocessed_train_data['ratings']==3]\n",
"class4_data = preprocessed_train_data[preprocessed_train_data['ratings']==4]\n",
"class5_data = preprocessed_train_data[preprocessed_train_data['ratings']==5]\n",
"\n",
"avg_class_count = average_class_count(preprocessed_train_data['ratings'])\n",
"class1_data = class1_data.sample(avg_class_count, random_state=1, replace=True)\n",
"class2_data = class2_data.sample(avg_class_count, random_state=1, replace=True)\n",
"class3_data = class3_data.sample(avg_class_count, random_state=1, replace=True)\n",
"class4_data = class4_data.sample(avg_class_count, random_state=1, replace=True)\n",
"class5_data = class5_data.sample(avg_class_count, random_state=1, replace=True)\n",
"\n",
"train_data_undersample = pd.concat([class1_data, class2_data, class3_data, class4_data, class5_data], axis=0)\n",
"print(train_data_undersample['ratings'].value_counts())\n",
"print(processed_cv_data['ratings'].value_counts())\n",
"\n",
"train_model_and_check_test_accuracy(e, batch_size, epoch, train_data_undersample.drop(['ratings'], axis=1), train_data_undersample['ratings'], processed_cv_data.drop(['ratings'], axis=1), processed_cv_data['ratings'], processed_gold_test_data.drop(['ratings'], axis=1), processed_gold_test_data['ratings'])\n",
"\n"
],
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"metadata": {
"id": "20O2-bhxemrB"
},
"source": [
"### Word Embeddings"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "Kr21JYWpPlBr"
},
"source": [
"#### Glove"
]
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "iKLm2r3TPjYy",
"outputId": "86d795d2-31d5-4cd5-c564-2fa166e9cb5e"
},
"source": [
"# load the whole embedding into memory\n",
"embeddings_index = dict()\n",
"f = open('glove.6B.300d.txt')\n",
"for line in f:\n",
" values = line.split()\n",
" word = values[0]\n",
" coefs = np.asarray(values[1:], dtype='float32')\n",
" embeddings_index[word] = coefs\n",
"f.close()\n",
"print('Loaded %s word vectors.' % len(embeddings_index))\n",
"\n",
"# create a weight matrix for words in training docs\n",
"embedding_matrix = np.zeros((vocab_size, 300))\n",
"for word, i in tokenizer.word_index.items():\n",
" embedding_vector = embeddings_index.get(word)\n",
" if embedding_vector is not None:\n",
" embedding_matrix[i] = embedding_vector\n",
"print(train_reviews.shape)\n",
"print(embedding_matrix.shape)\n",
"e = Embedding(vocab_size, 300, embeddings_initializer=Constant(embedding_matrix), input_length=maxlen, trainable=False)\n",
"# TODO: Cross-validate\n",
"print(train_reviews.shape)"
],
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"text": [
"Loaded 400000 word vectors.\n",
"(50000, 31)\n",
"(15956, 300)\n",
"(50000, 31)\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "8ETH36deQRfc"
},
"source": [
"#### Fasttext\n"
]
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "34JQ-3RZQDcl",
"outputId": "c53ce3a6-0787-432d-8808-d25f946e99ea"
},
"source": [
"# load the whole embedding into memory\n",
"embeddings_index = dict()\n",
"f = open('wiki-news-300d-1M.vec')\n",
"for line in f:\n",
" values = line.split()\n",
" word = values[0]\n",
" coefs = np.asarray(values[1:], dtype='float32')\n",
" embeddings_index[word] = coefs\n",
"f.close()\n",
"print('Loaded %s word vectors.' % len(embeddings_index))\n",
"#print(values)\n",
"# create a weight matrix for words in training docs\n",
"embedding_matrix = np.zeros((vocab_size, 300))\n",
"for word, i in tokenizer.word_index.items():\n",
" embedding_vector = embeddings_index.get(word)\n",
" if embedding_vector is not None:\n",
" embedding_matrix[i] = embedding_vector\n",
"print(train_reviews.shape)\n",
"print(embedding_matrix.shape)\n",
"e = Embedding(vocab_size, 300, embeddings_initializer=Constant(embedding_matrix), input_length=maxlen, trainable=True)\n",
"# TODO: Cross-validate\n",
"print(train_reviews.shape)"
],
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"text": [
"Loaded 999995 word vectors.\n",
"(50000, 31)\n",
"(15816, 300)\n",
"(50000, 31)\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "WXjRde9FQVQF"
},
"source": [
"####Word2vec"
]
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "WKm4zt-bQY2D",
"outputId": "0427b5ce-8f8c-44c8-eeb3-6700dab476cb"
},
"source": [
"from gensim.models import KeyedVectors\n",
"\n",
"# load the whole embedding into memory\n",
"embeddings_index = dict()\n",
"\n",
"\n",
"#f = open('GoogleNews-vectors-negative300.bin')\n",
"model_w2v = KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin', binary=True)\n",
"print(type(model_w2v))\n",
"\n",
"# prepare embedding matrix\n",
"embedding_matrix = np.zeros((vocab_size, 300))\n",
"for word, i in tokenizer.word_index.items():\n",
" if word in model_w2v.vocab:\n",
" embedding_vector = model_w2v[word]\n",
" embedding_vector = np.array(embedding_vector)\n",
" if embedding_vector is not None:\n",
" embedding_matrix[i] = embedding_vector\n",
"\n",
"e = Embedding(vocab_size, 300, embeddings_initializer=Constant(embedding_matrix), input_length=maxlen, trainable=False)\n"
],
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"text": [
"<class 'gensim.models.keyedvectors.Word2VecKeyedVectors'>\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "1rx4hyhmCpZ9"
},
"source": [
""
],
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"metadata": {
"id": "Cj7uViOAP8nZ"
},
"source": [
"### Train Neural Network\n"
]
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "u8cPuyIaPcsI",
"outputId": "8432c74d-a9c8-47eb-8d81-a0149f9f9c7c"
},
"source": [
"batch_size, epochs = 32, 5\n",
"\n",
"train_ratings = pd.get_dummies(train_ratings) \n",
"\n",
"model = NeuralNet(train_reviews, train_ratings,e)\n",
"model.build_nn()\n",
"model.train_nn(batch_size, epochs)\n",
"\n",
"# predict\n",
"test_predictions = model.predict(test_reviews)\n",
"train_predictions = model.predict(train_reviews)\n",
"\n",
"# get ratings from probabilities\n",
"train_predictions = np.argmax(train_predictions, axis=1)\n",
"test_predictions = np.argmax(test_predictions, axis=1)\n",
"train_ratings = np.argmax(np.array(train_ratings), axis=1)\n",
"\n",
"# report generation on training data\n",
"print(f\"Classification report:\\n{metrics.classification_report(train_ratings, train_predictions)}\\n\")\n",
"metrics.confusion_matrix(train_ratings, train_predictions)"
],
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"text": [
"Reviews Shape: (50000, 31)\n",
"Ratings Shape: (50000, 5)\n",
"Epoch 1/5\n",
"1563/1563 [==============================] - 5s 3ms/step - loss: 0.9450 - accuracy: 0.6715\n",
"Epoch 2/5\n",
"1563/1563 [==============================] - 4s 3ms/step - loss: 0.7339 - accuracy: 0.7343\n",
"Epoch 3/5\n",
"1563/1563 [==============================] - 4s 3ms/step - loss: 0.6787 - accuracy: 0.7541\n",
"Epoch 4/5\n",
"1563/1563 [==============================] - 4s 2ms/step - loss: 0.6527 - accuracy: 0.7598\n",
"Epoch 5/5\n",
"1563/1563 [==============================] - 4s 3ms/step - loss: 0.6331 - accuracy: 0.7668\n",
"Classification report:\n",
" precision recall f1-score support\n",
"\n",
" 0 0.81 0.67 0.73 4059\n",
" 1 0.68 0.52 0.59 2265\n",
" 2 0.72 0.44 0.55 3612\n",
" 3 0.62 0.32 0.42 6871\n",
" 4 0.82 0.96 0.88 33193\n",
"\n",
" accuracy 0.79 50000\n",
" macro avg 0.73 0.58 0.63 50000\n",
"weighted avg 0.77 0.79 0.77 50000\n",
"\n",
"\n"
],
"name": "stdout"
},
{
"output_type": "execute_result",
"data": {
"text/plain": [
"array([[ 2703, 161, 103, 123, 969],\n",
" [ 197, 1173, 120, 114, 661],\n",
" [ 172, 164, 1600, 356, 1320],\n",
" [ 107, 107, 236, 2172, 4249],\n",
" [ 174, 111, 169, 755, 31984]])"
]
},
"metadata": {
"tags": []
},
"execution_count": 14
}
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "x2iF2egfUspg"
},
"source": [
"#### Test accuracy model"
]
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "foQ-CrQnQHyL",
"outputId": "fbaee72a-374c-4893-c0b4-c915ffb690c3"
},
"source": [
"test_data = pd.read_csv(\"gold_test.csv\", index_col=0)\n",
"\n",
"# separate reviews, ratings\n",
"test_reviews = test_data.iloc[:, :-1]\n",
"test_ratings = test_data.iloc[:, -1]\n",
"test_ratings = pd.get_dummies(test_ratings)\n",
"\n",
"\n",
"# pre-process data\n",
"test_reviews, tokenizer = preprocess_data(tokenizer, test_reviews, maxlen=train_reviews.shape[1])\n",
"\n",
"# predict\n",
"test_predictions = model.predict(test_reviews)\n",
"\n",
"# get ratings from probabilities\n",
"test_predictions = np.argmax(test_predictions, axis=1)\n",
"test_ratings = np.argmax(np.array(test_ratings), axis=1)\n",
"\n",
"# report generation on training data\n",
"print(f\"Classification report:\\n{metrics.classification_report(test_ratings, test_predictions)}\\n\")\n",
"metrics.confusion_matrix(test_ratings, test_predictions)"
],
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"text": [
"Classification report:\n",
" precision recall f1-score support\n",
"\n",
" 0 0.59 0.42 0.49 1271\n",
" 1 0.21 0.15 0.17 630\n",
" 2 0.32 0.17 0.22 911\n",
" 3 0.29 0.15 0.20 1404\n",
" 4 0.72 0.92 0.81 5784\n",
"\n",
" accuracy 0.63 10000\n",
" macro avg 0.42 0.36 0.38 10000\n",
"weighted avg 0.57 0.63 0.59 10000\n",
"\n",
"\n"
],
"name": "stdout"
},
{
"output_type": "execute_result",
"data": {
"text/plain": [
"array([[ 531, 147, 85, 68, 440],\n",
" [ 146, 93, 67, 73, 251],\n",
" [ 95, 103, 159, 127, 427],\n",
" [ 56, 46, 121, 209, 972],\n",
" [ 79, 63, 71, 232, 5339]])"
]
},
"metadata": {
"tags": []
},
"execution_count": 15
}
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "uiB2cRSVQlZg"
},
"source": [
"#### Test input"
]
},
{
"cell_type": "code",
"metadata": {
"id": "d1gB_zAqQkrf",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "4de0015a-6423-4177-c2f8-a7b766e66a4b"
},
"source": [
"# Test input\n",
"Test = ['this is bad', 'wow nice!', 'this is a great product']\n",
"test_reviews = pd.DataFrame(Test, columns=['reviews'])\n",
"\n",
"# pre-process data\n",
"test_reviews, tokenizer = preprocess_data(tokenizer, test_reviews, maxlen=train_reviews.shape[1])\n",
"\n",
"# predict\n",
"test_predictions = model.predict(test_reviews)\n",
"\n",
"# show probabilities\n",
"print(test_predictions)"
],
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"text": [
"[[0.489183 0.03963117 0.05302086 0.06513943 0.35302556]\n",
" [0.00511781 0.01673113 0.02535018 0.1042458 0.84855515]\n",
" [0.01163882 0.00901665 0.00830102 0.03947002 0.9315735 ]]\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "DyY0ZJ_U9XV5"
},
"source": [
"#### Testing on custom input GUI"
]
},
{
"cell_type": "code",
"metadata": {
"id": "zoBnu4pB9eF4"
},
"source": [
"import tabulate\n",
"def predict_rating(text):\n",
" test_reviews = pd.DataFrame([text], columns=['reviews'])\n",
"\n",
" test_reviews, _ = preprocess_data(tokenizer, test_reviews, maxlen=maxlen)\n",
"\n",
" test_predictions = model.predict(test_reviews)\n",
" test_ratings = np.argmax(np.array(test_predictions), axis=1) + 1\n",
" print(tabulate.tabulate(test_predictions, headers=['Rating-1', 'Rating-2', 'Rating-3', 'Rating-4', 'Rating-5']))\n",
" str = f\"\\nPredicted Rating: {test_ratings}\"\n",
" return str"
],
"execution_count": 1,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"cellView": "form",
"id": "7Eg9rqGQ9ckJ",
"outputId": "e7fd0687-3c57-4f57-895a-a7d0e4ce9a12"
},
"source": [
"#@title Predict\n",
"InputText = 'this is good' #@param {type: 'string'}\n",
"output = predict_rating(InputText)\n",
"\n",
"print(output)\n"
],
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"text": [
" Rating-1 Rating-2 Rating-3 Rating-4 Rating-5\n",
"---------- ---------- ---------- ---------- ----------\n",
" 0.0630881 0.0287269 0.0568154 0.129721 0.721648\n",
"\n",
"Predicted Rating: [5]\n"
],
"name": "stdout"
}
]
}
]
}
\ No newline at end of file
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"[nltk_data] Downloading package stopwords to /home/rohit/nltk_data...\n",
"[nltk_data] Package stopwords is already up-to-date!\n",
"[nltk_data] Downloading package punkt to /home/rohit/nltk_data...\n",
"[nltk_data] Package punkt is already up-to-date!\n"
]
},
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import tensorflow as tf\n",
"import pickle \n",
"import pandas as pd\n",
"import numpy as np\n",
"import string\n",
"import nltk\n",
"from nltk.corpus import stopwords\n",
"import tensorflow as tf\n",
"from tensorflow.keras.preprocessing.text import Tokenizer\n",
"from tensorflow.keras.preprocessing.text import one_hot\n",
"from tensorflow.keras.preprocessing.sequence import pad_sequences\n",
"from tensorflow.keras import Sequential\n",
"from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, SimpleRNN, LSTM, Bidirectional, GRU\n",
"from sklearn import datasets, model_selection, metrics\n",
"from keras.layers.embeddings import Embedding\n",
"from keras.initializers import Constant\n",
"from nltk.tokenize import word_tokenize\n",
"from sklearn.model_selection import train_test_split \n",
"nltk.download('stopwords')\n",
"stopword = stopwords.words('english') \n",
"nltk.download('punkt')"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"def encode_data(tokenizer, text, tokens, preprocessing_training_data = False):\n",
" # This function will be used to encode the reviews using a dictionary (created using corpus vocabulary) \n",
"\n",
" # Example of encoding :\"The food was fabulous but pricey\" has a vocabulary of 4 words, each one has to be mapped to an integer like: \n",
" # {'The':1,'food':2,'was':3 'fabulous':4 'but':5 'pricey':6} this vocabulary has to be created for the entire corpus and then be used to \n",
" # encode the words into integers \n",
"\n",
" # return encoded examples\n",
" if preprocessing_training_data:\n",
" tokenizer = Tokenizer(oov_token = '<oov>')\n",
" tokenizer.fit_on_texts(tokens)\n",
"\n",
" sequences = tokenizer.texts_to_sequences(text)\n",
" return sequences, tokenizer\n",
"\n",
"def convert_to_lower(text):\n",
" # return the reviews after convering then to lowercase\n",
" lower_text = text.lower()\n",
" return lower_text\n",
"\n",
"def perform_tokenization(text):\n",
" # return the reviews after performing tokenization\n",
" token=nltk.word_tokenize(text)\n",
" return token\n",
"\n",
"def remove_stopwords(text):\n",
" # return the reviews after removing the stopwords\n",
" stopword = [] # not any stopword\n",
" removing_stopwords=[word for word in text if word not in stopword]\n",
" return removing_stopwords\n",
"\n",
"def remove_punctuation(text):\n",
" # return the reviews after removing punctuations\n",
" removing_punctuation = [word for word in text if word.isalpha()]\n",
" return removing_punctuation\n",
"\n",
"def perform_padding(data, maxlen):\n",
" # return the reviews after padding the reviews to maximum length\n",
" padded_data = pad_sequences(data, maxlen=maxlen, padding='post')\n",
" return padded_data\n",
"\n",
"def preprocess_data(tokenizer, data, preprocessing_training_data=False, maxlen=None):\n",
" # make all the following function calls on your data\n",
" # EXAMPLE:->\n",
" '''\n",
" review = data[\"reviews\"]\n",
" review = convert_to_lower(review)\n",
" review = remove_punctuation(review)\n",
" review = remove_stopwords(review)\n",
" review = perform_tokenization(review)\n",
" review = encode_data(review)\n",
" review = perform_padding(review)\n",
" '''\n",
" # return processed data\n",
"\n",
" reviews = data[\"reviews\"]\n",
" list_of_reviews = list(reviews)\n",
" string_of_reviews = ' '.join(str(e) for e in list_of_reviews)\n",
"\n",
" lower_text = convert_to_lower(string_of_reviews)\n",
" tokens = perform_tokenization(lower_text)\n",
" tokens = remove_stopwords(tokens)\n",
" tokens = remove_punctuation(tokens)\n",
" encoded_data, tokenizer = encode_data(tokenizer, reviews, tokens, preprocessing_training_data)\n",
" reviews = perform_padding(encoded_data, maxlen)\n",
"\n",
" return pd.DataFrame(reviews), tokenizer"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"maxlen = 31 # verified "
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"! tar -xzf model.tar.gz\n",
"model = tf.keras.models.load_model(\"model\", compile = False)\n",
"\n",
"with open(r\"tokenizer.pkl\", \"rb\") as input_file:\n",
" tokenizer = pickle.load(input_file)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[[7.46410131e-01 1.02791116e-01 1.10829026e-01 2.15716772e-02\n",
" 1.83980539e-02]\n",
" [9.36661454e-05 1.18578457e-04 2.48400168e-03 7.10299909e-02\n",
" 9.26273704e-01]\n",
" [1.44877762e-03 4.65300196e-04 2.53088167e-03 4.58934791e-02\n",
" 9.49661493e-01]]\n"
]
}
],
"source": [
"# Test input\n",
"Test = ['this is bad', 'wow nice!', 'this is a great product']\n",
"test_reviews = pd.DataFrame(Test, columns=['reviews'])\n",
"\n",
"# pre-process data\n",
"test_reviews, tokenizer = preprocess_data(tokenizer, test_reviews, maxlen=maxlen)\n",
"\n",
"# predict\n",
"test_predictions = model.predict(test_reviews)\n",
"\n",
"# show probabilities\n",
"print(test_predictions)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"def predict_rating(text):\n",
" test_reviews = pd.DataFrame([text], columns=['reviews'])\n",
"\n",
"# test_reviews, _ = preprocess_data(tokenizer, test_reviews, maxlen=train_reviews.shape[1])\n",
" test_reviews, _ = preprocess_data(tokenizer, test_reviews, maxlen=31)\n",
"\n",
" test_predictions = model.predict(test_reviews)\n",
" test_ratings = np.argmax(np.array(test_predictions), axis=1) + 1\n",
" \n",
" import tabulate\n",
" \n",
" str = f\"\\nPredicted Rating: {test_ratings}\\n\\n\\n\"\n",
" str += tabulate.tabulate(test_predictions, headers=[\"Rating-1\", \"Rating-2\", \"Rating-3\", \"Rating-4\", \"Rating-5\"])\n",
" return str"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"import tkinter as tk\n",
" \n",
"# Top level window \n",
"frame = tk.Tk() \n",
"frame.title(\"Rate reviews\") \n",
"frame.geometry('600x600') \n",
"\n",
"# Function for getting Input from textbox and printing it at label widget \n",
"def printInput(): \n",
" inp = inputtxt.get(1.0, \"end-1c\") \n",
" output = predict_rating(inp)\n",
" lbl.config(text = output) \n",
"\n",
"# TextBox Creation \n",
"inputtxt = tk.Text(frame, \n",
" height = 10, \n",
" width = 40, \n",
" font=(\"Courier\", 18)) \n",
" \n",
"inputtxt.pack() \n",
" \n",
"# Button Creation \n",
"printButton = tk.Button(frame, \n",
" text = \"Print\", \n",
" command = printInput, \n",
" font=(\"monospace\", 14)) \n",
"printButton.pack() \n",
" \n",
"# Label Creation \n",
"lbl = tk.Label(frame, text = \"\") \n",
"lbl.pack() \n",
"frame.mainloop() "
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.5"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
Instructions to run the code file for Assignment-3
WITHOUT PRE-TRAINED EMBEDDING:
1. Open code.ipynb file.
2. Run the following sections in the file one by one.
* Install python packages
* To access pre-trained embeddings and dataset (train and test)
* Header files
* Preprocessing function
* Define Models (without pre-trained embedding layer)
* Import Datasets
* Without pre-trained word embedding
3. If you want to run the data imbalanced handling techniques, then go to “Data Imbalanced Handling” section and run the first cell “Utility Functions”. Then you can run any of the techniques (Undersampling Technique-1, Undersampling Technique-1 and so on).
4. Run any of the models (FFNN, LSTM, RNN, Bi-LSTM, GRU, Bi-GRU) by running the section “Train X Model” (replace X with the model according to your need like “Train LSTM Model”).
---------------------------------------------------------------------------------------------------------------------------
WITH PRE-TRAINED EMBEDDING:
1. Open code.ipynb file.
2. Run the cells in the file one by one.
3. Run “Install python packages” section.
4. Then we will run the cell “To access pre-trained embeddings and dataset (train and test)” which accesses Pre Trained word embeddings from google drive which are mounted from our folder. In order to access it please mail your id to us, we will share with you the folder so all embeddings(glove.6B.300d.txt, glove.6B.200d.txt, glove.6B.300d.txt, wiki-news-300d-1M.vec, and GoogleNews-vectors-negative300.bin) and datasets( train.csv, gold_test.csv) could be accessed by you. We are using word2vec for this assignment as it gives best results.
5. Run the “Header files” section.
6. Run the “Preprocessing function” section to convert our train dataset into embeddings.
7. We will run the “Define Models (with pretrained embedding layer)” section which contains models for RNN, LSTM, Bi-LSTM, GRU and Bi-GRU. Run one at a time.
8. Run the “Import Datasets” section to import train and test dataset.
9. Then run the “Word Embeddings” section, there are 3 embeddings sections: Glove, Word2vec and fasttext. These all embeddings are present in the Word Embeddings section. We will run the Word2vec section as only this is used in our assignment.
10.We will move to the “Data Imbalanced Handling” section and use one of the sampling techniques (like Undersampling Technique-1 etc.) from all the mentioned techniques in the section. First run the Utility functions section, then run any of the following sections for using the corresponding technique.
11. There are different sections of our models (RNN, LSTM, Bi-LSTM, GRU and Bi-GRU). We will train the model and test the accuracy of every model running these models.
----------------------------------------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------------------------------------
GUI:
We have also attached a file containing the GUI (GUI.ipynb). This file first loads the stored model and tokenizer (code.ipynb contains one section named “Save Model“. This section should be run to store the model and tokenizer. It will generate two files model.tar.gz and tokenizer.pkl. model.tar.gz and tokenizer.pkl should be present in the same directory where GUI.ipynb is present ) and uses that throughout the code.
Libraries to be installed in your local PC:
Keras==2.4.3
tensorflow==2.2.0
numpy==1.17.4
nltk==3.4.5
tabulate==0.8.9
pandas==1.0.5
scikit_learn==0.24.1
All the cells should be run sequentially. Then the user will see a window. There will be one textbox where the user has to insert the text and then he can press the “Print” button to get the predicted rating and probabilities of each rating.
Note: This GUI does not work in colab (due to obvious reasons) but works perfectly in the local machines. If you want to test the GUI, one approach could be to run the code.ipynb from Google colab and download the model.tar.gz and tokenizer.pkl files. Put those files in the same directory of GUI.ipynb and run all the cells of GUI.ipynb sequentially.
\ No newline at end of file
This source diff could not be displayed because it is too large. You can view the blob instead.
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"[nltk_data] Downloading package stopwords to /home/rohit/nltk_data...\n",
"[nltk_data] Package stopwords is already up-to-date!\n",
"[nltk_data] Downloading package punkt to /home/rohit/nltk_data...\n",
"[nltk_data] Package punkt is already up-to-date!\n"
]
},
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import tensorflow as tf\n",
"import pickle \n",
"import pandas as pd\n",
"import numpy as np\n",
"import string\n",
"import nltk\n",
"from nltk.corpus import stopwords\n",
"import tensorflow as tf\n",
"from tensorflow.keras.preprocessing.text import Tokenizer\n",
"from tensorflow.keras.preprocessing.text import one_hot\n",
"from tensorflow.keras.preprocessing.sequence import pad_sequences\n",
"from tensorflow.keras import Sequential\n",
"from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, SimpleRNN, LSTM, Bidirectional, GRU\n",
"from sklearn import datasets, model_selection, metrics\n",
"from keras.layers.embeddings import Embedding\n",
"from keras.initializers import Constant\n",
"from nltk.tokenize import word_tokenize\n",
"from sklearn.model_selection import train_test_split \n",
"nltk.download('stopwords')\n",
"stopword = stopwords.words('english') \n",
"nltk.download('punkt')"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"def encode_data(tokenizer, text, tokens, preprocessing_training_data = False):\n",
" # This function will be used to encode the reviews using a dictionary (created using corpus vocabulary) \n",
"\n",
" # Example of encoding :\"The food was fabulous but pricey\" has a vocabulary of 4 words, each one has to be mapped to an integer like: \n",
" # {'The':1,'food':2,'was':3 'fabulous':4 'but':5 'pricey':6} this vocabulary has to be created for the entire corpus and then be used to \n",
" # encode the words into integers \n",
"\n",
" # return encoded examples\n",
" if preprocessing_training_data:\n",
" tokenizer = Tokenizer(oov_token = '<oov>')\n",
" tokenizer.fit_on_texts(tokens)\n",
"\n",
" sequences = tokenizer.texts_to_sequences(text)\n",
" return sequences, tokenizer\n",
"\n",
"def convert_to_lower(text):\n",
" # return the reviews after convering then to lowercase\n",
" lower_text = text.lower()\n",
" return lower_text\n",
"\n",
"def perform_tokenization(text):\n",
" # return the reviews after performing tokenization\n",
" token=nltk.word_tokenize(text)\n",
" return token\n",
"\n",
"def remove_stopwords(text):\n",
" # return the reviews after removing the stopwords\n",
" stopword = [] # not any stopword\n",
" removing_stopwords=[word for word in text if word not in stopword]\n",
" return removing_stopwords\n",
"\n",
"def remove_punctuation(text):\n",
" # return the reviews after removing punctuations\n",
" removing_punctuation = [word for word in text if word.isalpha()]\n",
" return removing_punctuation\n",
"\n",
"def perform_padding(data, maxlen):\n",
" # return the reviews after padding the reviews to maximum length\n",
" padded_data = pad_sequences(data, maxlen=maxlen, padding='post')\n",
" return padded_data\n",
"\n",
"def preprocess_data(tokenizer, data, preprocessing_training_data=False, maxlen=None):\n",
" # make all the following function calls on your data\n",
" # EXAMPLE:->\n",
" '''\n",
" review = data[\"reviews\"]\n",
" review = convert_to_lower(review)\n",
" review = remove_punctuation(review)\n",
" review = remove_stopwords(review)\n",
" review = perform_tokenization(review)\n",
" review = encode_data(review)\n",
" review = perform_padding(review)\n",
" '''\n",
" # return processed data\n",
"\n",
" reviews = data[\"reviews\"]\n",
" list_of_reviews = list(reviews)\n",
" string_of_reviews = ' '.join(str(e) for e in list_of_reviews)\n",
"\n",
" lower_text = convert_to_lower(string_of_reviews)\n",
" tokens = perform_tokenization(lower_text)\n",
" tokens = remove_stopwords(tokens)\n",
" tokens = remove_punctuation(tokens)\n",
" encoded_data, tokenizer = encode_data(tokenizer, reviews, tokens, preprocessing_training_data)\n",
" reviews = perform_padding(encoded_data, maxlen)\n",
"\n",
" return pd.DataFrame(reviews), tokenizer"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"maxlen = 31 # verified "
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"! tar -xzf model.tar.gz\n",
"model = tf.keras.models.load_model(\"model\", compile = False)\n",
"\n",
"with open(r\"tokenizer.pkl\", \"rb\") as input_file:\n",
" tokenizer = pickle.load(input_file)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[[7.46410131e-01 1.02791116e-01 1.10829026e-01 2.15716772e-02\n",
" 1.83980539e-02]\n",
" [9.36661454e-05 1.18578457e-04 2.48400168e-03 7.10299909e-02\n",
" 9.26273704e-01]\n",
" [1.44877762e-03 4.65300196e-04 2.53088167e-03 4.58934791e-02\n",
" 9.49661493e-01]]\n"
]
}
],
"source": [
"# Test input\n",
"Test = ['this is bad', 'wow nice!', 'this is a great product']\n",
"test_reviews = pd.DataFrame(Test, columns=['reviews'])\n",
"\n",
"# pre-process data\n",
"test_reviews, tokenizer = preprocess_data(tokenizer, test_reviews, maxlen=maxlen)\n",
"\n",
"# predict\n",
"test_predictions = model.predict(test_reviews)\n",
"\n",
"# show probabilities\n",
"print(test_predictions)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"def predict_rating(text):\n",
" test_reviews = pd.DataFrame([text], columns=['reviews'])\n",
"\n",
"# test_reviews, _ = preprocess_data(tokenizer, test_reviews, maxlen=train_reviews.shape[1])\n",
" test_reviews, _ = preprocess_data(tokenizer, test_reviews, maxlen=31)\n",
"\n",
" test_predictions = model.predict(test_reviews)\n",
" test_ratings = np.argmax(np.array(test_predictions), axis=1) + 1\n",
" \n",
" import tabulate\n",
" \n",
" str = f\"\\nPredicted Rating: {test_ratings}\\n\\n\\n\"\n",
" str += tabulate.tabulate(test_predictions, headers=[\"Rating-1\", \"Rating-2\", \"Rating-3\", \"Rating-4\", \"Rating-5\"])\n",
" return str"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"import tkinter as tk\n",
" \n",
"# Top level window \n",
"frame = tk.Tk() \n",
"frame.title(\"Rate reviews\") \n",
"frame.geometry('600x600') \n",
"\n",
"# Function for getting Input from textbox and printing it at label widget \n",
"def printInput(): \n",
" inp = inputtxt.get(1.0, \"end-1c\") \n",
" output = predict_rating(inp)\n",
" lbl.config(text = output) \n",
"\n",
"# TextBox Creation \n",
"inputtxt = tk.Text(frame, \n",
" height = 10, \n",
" width = 40, \n",
" font=(\"Courier\", 18)) \n",
" \n",
"inputtxt.pack() \n",
" \n",
"# Button Creation \n",
"printButton = tk.Button(frame, \n",
" text = \"Print\", \n",
" command = printInput, \n",
" font=(\"monospace\", 14)) \n",
"printButton.pack() \n",
" \n",
"# Label Creation \n",
"lbl = tk.Label(frame, text = \"\") \n",
"lbl.pack() \n",
"frame.mainloop() "
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.5"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
Instructions to run the code file for Assignment-4
1. Open code.ipynb file.
2. Run the cells in the file one by one.
3. Run “Install python packages” section.
4. Then we will run the cell “To download dataset (train and test)” which accesses the datasets in the program.
5. Then run the 'To Install the ktrain package' to download the ktrain package.
6. Then 'import the packages'.
7. The 'Download the pre-trained DistilBERT-base-uncased Model' downloads the pretrained model.
8. In the next cell data preprocessing is being done.
9. The model is being trained in "Training the Model" cell.
10.Then the predictions are made in the next cell.
11.The "Explanability of the model" is used for analysis of thr model.
Run the section "To resolve confusion between adjacent classes" to test the overlapping of words between classes.
----------------------------------------------------------------------------------------------------------------------------
GUI:
We have also attached a file containing the GUI (GUI.ipynb). This file first loads the stored model and tokenizer (code.ipynb contains one section named “Save Model“. This section should be run to store the model and tokenizer. It will generate two files model.tar.gz and tokenizer.pkl. model.tar.gz and tokenizer.pkl should be present in the same directory where GUI.ipynb is present ) and uses that throughout the code.
All the cells should be run sequentially. Then the user will see a window. There will be one textbox where the user has to insert the text and then he can press the “Print” button to get the predicted rating and probabilities of each rating.
Note: This GUI does not work in colab (due to obvious reasons) but works perfectly in the local machines. If you want to test the GUI, one approach could be to run the code.ipynb from Google colab and download the model.tar.gz and tokenizer.pkl files. Put those files in the same directory of GUI.ipynb and run all the cells of GUI.ipynb sequentially.
This source diff could not be displayed because it is too large. You can view the blob instead.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment